97415 lines
2.0 MiB
97415 lines
2.0 MiB
From 3c7be0b48c5bf00d7c9a2730d414006e17753093 Mon Sep 17 00:00:00 2001
|
|
From: Lv Ying <lvying6@huawei.com>
|
|
Date: Fri, 10 Dec 2021 02:05:10 +0800
|
|
Subject: [PATCH] use generated vmlinux.h instead of bpftool gen vmlinux.h in
|
|
compile env
|
|
|
|
Signed-off-by: Lv Ying <lvying6@huawei.com>
|
|
---
|
|
Makefile | 22 +-
|
|
atune_bpf_collection.spec | 45 -
|
|
start_readahead_tune | 3 +
|
|
vmlinux.h | 97280 ++++++++++++++++++++++++++++++++++++
|
|
4 files changed, 97286 insertions(+), 64 deletions(-)
|
|
delete mode 100644 atune_bpf_collection.spec
|
|
create mode 100644 vmlinux.h
|
|
|
|
diff --git a/Makefile b/Makefile
|
|
index f5ba4bb..1e070a6 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,21 +1,12 @@
|
|
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
OUTPUT := .output
|
|
CLANG ?= clang -v
|
|
-BPFTOOL ?= bpftool
|
|
-PAHOLE ?= pahole
|
|
-READELF ?= readelf
|
|
-VMLINUX ?= /usr/lib/debug/lib/modules/`uname -r`/vmlinux
|
|
-VMLINUX_HEADER ?= $(OUTPUT)/vmlinux.h
|
|
+VMLINUX ?= vmlinux.h
|
|
|
|
-BTF_PAHOLE_PROBE := $(shell $(READELF) -S $(VMLINUX) | grep .BTF 2>&1)
|
|
-INCLUDES := -I$(OUTPUT)
|
|
+INCLUDES := -I$(OUTPUT) -I$(dir $(VMLINUX))
|
|
CFLAGS := -g -Wall
|
|
ARCH := $(shell uname -m | sed 's/x86_64/x86/')
|
|
|
|
-ifeq ($(BTF_PAHOLE_PROBE),)
|
|
- DWARF2BTF = y
|
|
-endif
|
|
-
|
|
APPS = readahead_tune
|
|
|
|
CLANG_BPF_SYS_INCLUDES = $(shell $(CLANG) -v -E - </dev/null 2>&1 \
|
|
@@ -48,15 +39,8 @@ $(OUTPUT):
|
|
$(call msg,MKDIR,$@)
|
|
$(Q)mkdir -p $@
|
|
|
|
-$(VMLINUX_HEADER):
|
|
- $(call msg,GEN-VMLINUX_H,$@)
|
|
-ifeq ($(DWARF2BTF),y)
|
|
- $(Q)$(PAHOLE) -J $(VMLINUX)
|
|
-endif
|
|
- $(Q)$(BPFTOOL) btf dump file $(VMLINUX) format c > $@
|
|
-
|
|
# Build BPF code
|
|
-$(OUTPUT)/%.bpf.o: %.bpf.c $(wildcard %.h) | $(OUTPUT) $(VMLINUX_HEADER)
|
|
+$(OUTPUT)/%.bpf.o: %.bpf.c $(wildcard %.h) | $(OUTPUT) $(VMLINUX)
|
|
$(call msg,BPF,$@)
|
|
$(Q)$(CLANG) -D__KERNEL__ -D__ASM_SYSREG_H -D__TARGET_ARCH_$(ARCH) \
|
|
$(DEBUG_FLAGS) \
|
|
diff --git a/atune_bpf_collection.spec b/atune_bpf_collection.spec
|
|
deleted file mode 100644
|
|
index 0ce6dce..0000000
|
|
--- a/atune_bpf_collection.spec
|
|
+++ /dev/null
|
|
@@ -1,45 +0,0 @@
|
|
-Name: A-Tune-BPF-Collection
|
|
-Version: 0.1
|
|
-Release: 1
|
|
-License: Mulan PSL v2
|
|
-Summary: BPF program collection to adjust fine-grained kernel mode to get better performance
|
|
-URL: https://gitee.com/openeuler/A-Tune-BPF-Collection
|
|
-Source0: https://gitee.com/openeuler/A-Tune-BPF-Collection/repository/archive/v%{version}.tar.gz
|
|
-
|
|
-BuildRequires: clang, llvm, libbpf-devel, bpftool, dwarves
|
|
-Requires: libbpf
|
|
-Provides: readahead_tune
|
|
-
|
|
-%define debug_package %{nil}
|
|
-
|
|
-%description
|
|
-A-Tune BPF Collection contains a set of BPF program which can interact with kernel in real time.
|
|
-It has the following capabilities:
|
|
-readahead_tune: trace file reading characteristics, then ajust file read mode to get maximum I/O efficency
|
|
-
|
|
-%prep
|
|
-%autosetup -n %{name}-%{version} -p1
|
|
-
|
|
-%build
|
|
-make %{?_smp_mflags}
|
|
-
|
|
-%install
|
|
-install -D -p -m 0755 readahead_tune %{buildroot}/%{_sbindir}/readahead_tune
|
|
-install -D -p -m 0644 readahead_tune.bpf.o %{buildroot}/%{_sbindir}/readahead_tune.bpf.o
|
|
-install -D -p -m 0755 start_readahead_tune %{buildroot}/%{_sbindir}/start_readahead_tune
|
|
-install -D -p -m 0755 stop_readahead_tune %{buildroot}/%{_sbindir}/stop_readahead_tune
|
|
-install -D -p -m 0644 readahead_tune.conf %{buildroot}%{_sysconfdir}/sysconfig/readahead_tune.conf
|
|
-
|
|
-%files
|
|
-%{_sbindir}/readahead_tune
|
|
-%{_sbindir}/readahead_tune.bpf.o
|
|
-%{_sbindir}/start_readahead_tune
|
|
-%{_sbindir}/stop_readahead_tune
|
|
-%config(noreplace) %{_sysconfdir}/sysconfig/readahead_tune.conf
|
|
-
|
|
-%changelog
|
|
-* Tue Nov 9 2021 lvying<lvying6@huawei.com> - 0.1-1
|
|
-- Type:feature
|
|
-- ID:NA
|
|
-- SUG:NA
|
|
-- DESC: Init A-Tune-BPF-Collection repo and add readahead_tune service
|
|
diff --git a/start_readahead_tune b/start_readahead_tune
|
|
index afae00c..5d5cd08 100755
|
|
--- a/start_readahead_tune
|
|
+++ b/start_readahead_tune
|
|
@@ -1,5 +1,8 @@
|
|
#!/bin/bash
|
|
|
|
+# change working directory to current binary position
|
|
+cd "$(dirname "$0")"
|
|
+
|
|
function usage()
|
|
{
|
|
echo "Usage: $0 [ -h | --help] [ -c | --config CONFIG_FILE ]"
|
|
diff --git a/vmlinux.h b/vmlinux.h
|
|
new file mode 100644
|
|
index 0000000..bcb9d76
|
|
--- /dev/null
|
|
+++ b/vmlinux.h
|
|
@@ -0,0 +1,97280 @@
|
|
+typedef signed char __s8;
|
|
+
|
|
+typedef unsigned char __u8;
|
|
+
|
|
+typedef short int __s16;
|
|
+
|
|
+typedef short unsigned int __u16;
|
|
+
|
|
+typedef int __s32;
|
|
+
|
|
+typedef unsigned int __u32;
|
|
+
|
|
+typedef long long int __s64;
|
|
+
|
|
+typedef long long unsigned int __u64;
|
|
+
|
|
+typedef __s8 s8;
|
|
+
|
|
+typedef __u8 u8;
|
|
+
|
|
+typedef __s16 s16;
|
|
+
|
|
+typedef __u16 u16;
|
|
+
|
|
+typedef __s32 s32;
|
|
+
|
|
+typedef __u32 u32;
|
|
+
|
|
+typedef __s64 s64;
|
|
+
|
|
+typedef __u64 u64;
|
|
+
|
|
+enum {
|
|
+ false = 0,
|
|
+ true = 1,
|
|
+};
|
|
+
|
|
+typedef long int __kernel_long_t;
|
|
+
|
|
+typedef long unsigned int __kernel_ulong_t;
|
|
+
|
|
+typedef int __kernel_pid_t;
|
|
+
|
|
+typedef unsigned int __kernel_uid32_t;
|
|
+
|
|
+typedef unsigned int __kernel_gid32_t;
|
|
+
|
|
+typedef __kernel_ulong_t __kernel_size_t;
|
|
+
|
|
+typedef __kernel_long_t __kernel_ssize_t;
|
|
+
|
|
+typedef long long int __kernel_loff_t;
|
|
+
|
|
+typedef __kernel_long_t __kernel_time_t;
|
|
+
|
|
+typedef __kernel_long_t __kernel_clock_t;
|
|
+
|
|
+typedef int __kernel_timer_t;
|
|
+
|
|
+typedef int __kernel_clockid_t;
|
|
+
|
|
+typedef unsigned int __poll_t;
|
|
+
|
|
+typedef u32 __kernel_dev_t;
|
|
+
|
|
+typedef __kernel_dev_t dev_t;
|
|
+
|
|
+typedef short unsigned int umode_t;
|
|
+
|
|
+typedef __kernel_pid_t pid_t;
|
|
+
|
|
+typedef __kernel_clockid_t clockid_t;
|
|
+
|
|
+typedef _Bool bool;
|
|
+
|
|
+typedef __kernel_uid32_t uid_t;
|
|
+
|
|
+typedef __kernel_gid32_t gid_t;
|
|
+
|
|
+typedef __kernel_loff_t loff_t;
|
|
+
|
|
+typedef __kernel_size_t size_t;
|
|
+
|
|
+typedef __kernel_ssize_t ssize_t;
|
|
+
|
|
+typedef u8 uint8_t;
|
|
+
|
|
+typedef u16 uint16_t;
|
|
+
|
|
+typedef u32 uint32_t;
|
|
+
|
|
+typedef long unsigned int sector_t;
|
|
+
|
|
+typedef long unsigned int blkcnt_t;
|
|
+
|
|
+typedef unsigned int gfp_t;
|
|
+
|
|
+typedef unsigned int fmode_t;
|
|
+
|
|
+typedef u64 phys_addr_t;
|
|
+
|
|
+typedef phys_addr_t resource_size_t;
|
|
+
|
|
+typedef struct {
|
|
+ int counter;
|
|
+} atomic_t;
|
|
+
|
|
+typedef struct {
|
|
+ long int counter;
|
|
+} atomic64_t;
|
|
+
|
|
+struct list_head {
|
|
+ struct list_head *next;
|
|
+ struct list_head *prev;
|
|
+};
|
|
+
|
|
+struct hlist_node;
|
|
+
|
|
+struct hlist_head {
|
|
+ struct hlist_node *first;
|
|
+};
|
|
+
|
|
+struct hlist_node {
|
|
+ struct hlist_node *next;
|
|
+ struct hlist_node **pprev;
|
|
+};
|
|
+
|
|
+struct callback_head {
|
|
+ struct callback_head *next;
|
|
+ void (*func)(struct callback_head *);
|
|
+};
|
|
+
|
|
+typedef int initcall_entry_t;
|
|
+
|
|
+typedef u64 jump_label_t;
|
|
+
|
|
+struct jump_entry {
|
|
+ jump_label_t code;
|
|
+ jump_label_t target;
|
|
+ jump_label_t key;
|
|
+};
|
|
+
|
|
+struct static_key_mod;
|
|
+
|
|
+struct static_key {
|
|
+ atomic_t enabled;
|
|
+ union {
|
|
+ long unsigned int type;
|
|
+ struct jump_entry *entries;
|
|
+ struct static_key_mod *next;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct static_key_true {
|
|
+ struct static_key key;
|
|
+};
|
|
+
|
|
+struct static_key_false {
|
|
+ struct static_key key;
|
|
+};
|
|
+
|
|
+typedef void *fl_owner_t;
|
|
+
|
|
+struct module;
|
|
+
|
|
+struct file;
|
|
+
|
|
+struct kiocb;
|
|
+
|
|
+struct iov_iter;
|
|
+
|
|
+struct dir_context;
|
|
+
|
|
+struct poll_table_struct;
|
|
+
|
|
+struct vm_area_struct;
|
|
+
|
|
+struct inode;
|
|
+
|
|
+struct file_lock;
|
|
+
|
|
+struct page;
|
|
+
|
|
+struct pipe_inode_info;
|
|
+
|
|
+struct seq_file;
|
|
+
|
|
+struct file_operations {
|
|
+ struct module *owner;
|
|
+ loff_t (*llseek)(struct file *, loff_t, int);
|
|
+ ssize_t (*read)(struct file *, char *, size_t, loff_t *);
|
|
+ ssize_t (*write)(struct file *, const char *, size_t, loff_t *);
|
|
+ ssize_t (*read_iter)(struct kiocb *, struct iov_iter *);
|
|
+ ssize_t (*write_iter)(struct kiocb *, struct iov_iter *);
|
|
+ int (*iterate)(struct file *, struct dir_context *);
|
|
+ int (*iterate_shared)(struct file *, struct dir_context *);
|
|
+ __poll_t (*poll)(struct file *, struct poll_table_struct *);
|
|
+ long int (*unlocked_ioctl)(struct file *, unsigned int, long unsigned int);
|
|
+ long int (*compat_ioctl)(struct file *, unsigned int, long unsigned int);
|
|
+ int (*mmap)(struct file *, struct vm_area_struct *);
|
|
+ long unsigned int mmap_supported_flags;
|
|
+ int (*open)(struct inode *, struct file *);
|
|
+ int (*flush)(struct file *, fl_owner_t);
|
|
+ int (*release)(struct inode *, struct file *);
|
|
+ int (*fsync)(struct file *, loff_t, loff_t, int);
|
|
+ int (*fasync)(int, struct file *, int);
|
|
+ int (*lock)(struct file *, int, struct file_lock *);
|
|
+ ssize_t (*sendpage)(struct file *, struct page *, int, size_t, loff_t *, int);
|
|
+ long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
|
|
+ int (*check_flags)(int);
|
|
+ int (*flock)(struct file *, int, struct file_lock *);
|
|
+ ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
|
|
+ ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
|
|
+ int (*setlease)(struct file *, long int, struct file_lock **, void **);
|
|
+ long int (*fallocate)(struct file *, int, loff_t, loff_t);
|
|
+ void (*show_fdinfo)(struct seq_file *, struct file *);
|
|
+ ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
|
|
+ int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
|
|
+ int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
|
|
+ int (*fadvise)(struct file *, loff_t, loff_t, int);
|
|
+ int (*iopoll)(struct kiocb *, bool);
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct qspinlock {
|
|
+ union {
|
|
+ atomic_t val;
|
|
+ struct {
|
|
+ u8 locked;
|
|
+ u8 pending;
|
|
+ };
|
|
+ struct {
|
|
+ u16 locked_pending;
|
|
+ u16 tail;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+typedef struct qspinlock arch_spinlock_t;
|
|
+
|
|
+struct raw_spinlock {
|
|
+ arch_spinlock_t raw_lock;
|
|
+};
|
|
+
|
|
+struct spinlock {
|
|
+ union {
|
|
+ struct raw_spinlock rlock;
|
|
+ };
|
|
+};
|
|
+
|
|
+typedef struct spinlock spinlock_t;
|
|
+
|
|
+struct notifier_block;
|
|
+
|
|
+struct atomic_notifier_head {
|
|
+ spinlock_t lock;
|
|
+ struct notifier_block *head;
|
|
+};
|
|
+
|
|
+enum system_states {
|
|
+ SYSTEM_BOOTING = 0,
|
|
+ SYSTEM_SCHEDULING = 1,
|
|
+ SYSTEM_RUNNING = 2,
|
|
+ SYSTEM_HALT = 3,
|
|
+ SYSTEM_POWER_OFF = 4,
|
|
+ SYSTEM_RESTART = 5,
|
|
+ SYSTEM_SUSPEND = 6,
|
|
+};
|
|
+
|
|
+struct taint_flag {
|
|
+ char c_true;
|
|
+ char c_false;
|
|
+ bool module;
|
|
+};
|
|
+
|
|
+typedef __s64 time64_t;
|
|
+
|
|
+struct timespec {
|
|
+ __kernel_time_t tv_sec;
|
|
+ long int tv_nsec;
|
|
+};
|
|
+
|
|
+struct timezone {
|
|
+ int tz_minuteswest;
|
|
+ int tz_dsttime;
|
|
+};
|
|
+
|
|
+struct timespec64 {
|
|
+ time64_t tv_sec;
|
|
+ long int tv_nsec;
|
|
+};
|
|
+
|
|
+enum timespec_type {
|
|
+ TT_NONE = 0,
|
|
+ TT_NATIVE = 1,
|
|
+ TT_COMPAT = 2,
|
|
+};
|
|
+
|
|
+struct compat_timespec;
|
|
+
|
|
+struct pollfd;
|
|
+
|
|
+struct restart_block {
|
|
+ long int (*fn)(struct restart_block *);
|
|
+ union {
|
|
+ struct {
|
|
+ u32 *uaddr;
|
|
+ u32 val;
|
|
+ u32 flags;
|
|
+ u32 bitset;
|
|
+ u64 time;
|
|
+ u32 *uaddr2;
|
|
+ } futex;
|
|
+ struct {
|
|
+ clockid_t clockid;
|
|
+ enum timespec_type type;
|
|
+ union {
|
|
+ struct timespec *rmtp;
|
|
+ struct compat_timespec *compat_rmtp;
|
|
+ };
|
|
+ u64 expires;
|
|
+ } nanosleep;
|
|
+ struct {
|
|
+ struct pollfd *ufds;
|
|
+ int nfds;
|
|
+ int has_timeout;
|
|
+ long unsigned int tv_sec;
|
|
+ long unsigned int tv_nsec;
|
|
+ } poll;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct screen_info {
|
|
+ __u8 orig_x;
|
|
+ __u8 orig_y;
|
|
+ __u16 ext_mem_k;
|
|
+ __u16 orig_video_page;
|
|
+ __u8 orig_video_mode;
|
|
+ __u8 orig_video_cols;
|
|
+ __u8 flags;
|
|
+ __u8 unused2;
|
|
+ __u16 orig_video_ega_bx;
|
|
+ __u16 unused3;
|
|
+ __u8 orig_video_lines;
|
|
+ __u8 orig_video_isVGA;
|
|
+ __u16 orig_video_points;
|
|
+ __u16 lfb_width;
|
|
+ __u16 lfb_height;
|
|
+ __u16 lfb_depth;
|
|
+ __u32 lfb_base;
|
|
+ __u32 lfb_size;
|
|
+ __u16 cl_magic;
|
|
+ __u16 cl_offset;
|
|
+ __u16 lfb_linelength;
|
|
+ __u8 red_size;
|
|
+ __u8 red_pos;
|
|
+ __u8 green_size;
|
|
+ __u8 green_pos;
|
|
+ __u8 blue_size;
|
|
+ __u8 blue_pos;
|
|
+ __u8 rsvd_size;
|
|
+ __u8 rsvd_pos;
|
|
+ __u16 vesapm_seg;
|
|
+ __u16 vesapm_off;
|
|
+ __u16 pages;
|
|
+ __u16 vesa_attributes;
|
|
+ __u32 capabilities;
|
|
+ __u32 ext_lfb_base;
|
|
+ __u8 _reserved[2];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct apm_bios_info {
|
|
+ __u16 version;
|
|
+ __u16 cseg;
|
|
+ __u32 offset;
|
|
+ __u16 cseg_16;
|
|
+ __u16 dseg;
|
|
+ __u16 flags;
|
|
+ __u16 cseg_len;
|
|
+ __u16 cseg_16_len;
|
|
+ __u16 dseg_len;
|
|
+};
|
|
+
|
|
+struct apm_info {
|
|
+ struct apm_bios_info bios;
|
|
+ short unsigned int connection_version;
|
|
+ int get_power_status_broken;
|
|
+ int get_power_status_swabinminutes;
|
|
+ int allow_ints;
|
|
+ int forbid_idle;
|
|
+ int realmode_power_off;
|
|
+ int disabled;
|
|
+};
|
|
+
|
|
+struct edd_device_params {
|
|
+ __u16 length;
|
|
+ __u16 info_flags;
|
|
+ __u32 num_default_cylinders;
|
|
+ __u32 num_default_heads;
|
|
+ __u32 sectors_per_track;
|
|
+ __u64 number_of_sectors;
|
|
+ __u16 bytes_per_sector;
|
|
+ __u32 dpte_ptr;
|
|
+ __u16 key;
|
|
+ __u8 device_path_info_length;
|
|
+ __u8 reserved2;
|
|
+ __u16 reserved3;
|
|
+ __u8 host_bus_type[4];
|
|
+ __u8 interface_type[8];
|
|
+ union {
|
|
+ struct {
|
|
+ __u16 base_address;
|
|
+ __u16 reserved1;
|
|
+ __u32 reserved2;
|
|
+ } isa;
|
|
+ struct {
|
|
+ __u8 bus;
|
|
+ __u8 slot;
|
|
+ __u8 function;
|
|
+ __u8 channel;
|
|
+ __u32 reserved;
|
|
+ } pci;
|
|
+ struct {
|
|
+ __u64 reserved;
|
|
+ } ibnd;
|
|
+ struct {
|
|
+ __u64 reserved;
|
|
+ } xprs;
|
|
+ struct {
|
|
+ __u64 reserved;
|
|
+ } htpt;
|
|
+ struct {
|
|
+ __u64 reserved;
|
|
+ } unknown;
|
|
+ } interface_path;
|
|
+ union {
|
|
+ struct {
|
|
+ __u8 device;
|
|
+ __u8 reserved1;
|
|
+ __u16 reserved2;
|
|
+ __u32 reserved3;
|
|
+ __u64 reserved4;
|
|
+ } ata;
|
|
+ struct {
|
|
+ __u8 device;
|
|
+ __u8 lun;
|
|
+ __u8 reserved1;
|
|
+ __u8 reserved2;
|
|
+ __u32 reserved3;
|
|
+ __u64 reserved4;
|
|
+ } atapi;
|
|
+ struct {
|
|
+ __u16 id;
|
|
+ __u64 lun;
|
|
+ __u16 reserved1;
|
|
+ __u32 reserved2;
|
|
+ } __attribute__((packed)) scsi;
|
|
+ struct {
|
|
+ __u64 serial_number;
|
|
+ __u64 reserved;
|
|
+ } usb;
|
|
+ struct {
|
|
+ __u64 eui;
|
|
+ __u64 reserved;
|
|
+ } i1394;
|
|
+ struct {
|
|
+ __u64 wwid;
|
|
+ __u64 lun;
|
|
+ } fibre;
|
|
+ struct {
|
|
+ __u64 identity_tag;
|
|
+ __u64 reserved;
|
|
+ } i2o;
|
|
+ struct {
|
|
+ __u32 array_number;
|
|
+ __u32 reserved1;
|
|
+ __u64 reserved2;
|
|
+ } raid;
|
|
+ struct {
|
|
+ __u8 device;
|
|
+ __u8 reserved1;
|
|
+ __u16 reserved2;
|
|
+ __u32 reserved3;
|
|
+ __u64 reserved4;
|
|
+ } sata;
|
|
+ struct {
|
|
+ __u64 reserved1;
|
|
+ __u64 reserved2;
|
|
+ } unknown;
|
|
+ } device_path;
|
|
+ __u8 reserved4;
|
|
+ __u8 checksum;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct edd_info {
|
|
+ __u8 device;
|
|
+ __u8 version;
|
|
+ __u16 interface_support;
|
|
+ __u16 legacy_max_cylinder;
|
|
+ __u8 legacy_max_head;
|
|
+ __u8 legacy_sectors_per_track;
|
|
+ struct edd_device_params params;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct edd {
|
|
+ unsigned int mbr_signature[16];
|
|
+ struct edd_info edd_info[6];
|
|
+ unsigned char mbr_signature_nr;
|
|
+ unsigned char edd_info_nr;
|
|
+};
|
|
+
|
|
+struct ist_info {
|
|
+ __u32 signature;
|
|
+ __u32 command;
|
|
+ __u32 event;
|
|
+ __u32 perf_level;
|
|
+};
|
|
+
|
|
+struct edid_info {
|
|
+ unsigned char dummy[128];
|
|
+};
|
|
+
|
|
+struct setup_header {
|
|
+ __u8 setup_sects;
|
|
+ __u16 root_flags;
|
|
+ __u32 syssize;
|
|
+ __u16 ram_size;
|
|
+ __u16 vid_mode;
|
|
+ __u16 root_dev;
|
|
+ __u16 boot_flag;
|
|
+ __u16 jump;
|
|
+ __u32 header;
|
|
+ __u16 version;
|
|
+ __u32 realmode_swtch;
|
|
+ __u16 start_sys_seg;
|
|
+ __u16 kernel_version;
|
|
+ __u8 type_of_loader;
|
|
+ __u8 loadflags;
|
|
+ __u16 setup_move_size;
|
|
+ __u32 code32_start;
|
|
+ __u32 ramdisk_image;
|
|
+ __u32 ramdisk_size;
|
|
+ __u32 bootsect_kludge;
|
|
+ __u16 heap_end_ptr;
|
|
+ __u8 ext_loader_ver;
|
|
+ __u8 ext_loader_type;
|
|
+ __u32 cmd_line_ptr;
|
|
+ __u32 initrd_addr_max;
|
|
+ __u32 kernel_alignment;
|
|
+ __u8 relocatable_kernel;
|
|
+ __u8 min_alignment;
|
|
+ __u16 xloadflags;
|
|
+ __u32 cmdline_size;
|
|
+ __u32 hardware_subarch;
|
|
+ __u64 hardware_subarch_data;
|
|
+ __u32 payload_offset;
|
|
+ __u32 payload_length;
|
|
+ __u64 setup_data;
|
|
+ __u64 pref_address;
|
|
+ __u32 init_size;
|
|
+ __u32 handover_offset;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct sys_desc_table {
|
|
+ __u16 length;
|
|
+ __u8 table[14];
|
|
+};
|
|
+
|
|
+struct olpc_ofw_header {
|
|
+ __u32 ofw_magic;
|
|
+ __u32 ofw_version;
|
|
+ __u32 cif_handler;
|
|
+ __u32 irq_desc_table;
|
|
+};
|
|
+
|
|
+struct efi_info {
|
|
+ __u32 efi_loader_signature;
|
|
+ __u32 efi_systab;
|
|
+ __u32 efi_memdesc_size;
|
|
+ __u32 efi_memdesc_version;
|
|
+ __u32 efi_memmap;
|
|
+ __u32 efi_memmap_size;
|
|
+ __u32 efi_systab_hi;
|
|
+ __u32 efi_memmap_hi;
|
|
+};
|
|
+
|
|
+struct boot_e820_entry {
|
|
+ __u64 addr;
|
|
+ __u64 size;
|
|
+ __u32 type;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct boot_params {
|
|
+ struct screen_info screen_info;
|
|
+ struct apm_bios_info apm_bios_info;
|
|
+ __u8 _pad2[4];
|
|
+ __u64 tboot_addr;
|
|
+ struct ist_info ist_info;
|
|
+ __u8 _pad3[16];
|
|
+ __u8 hd0_info[16];
|
|
+ __u8 hd1_info[16];
|
|
+ struct sys_desc_table sys_desc_table;
|
|
+ struct olpc_ofw_header olpc_ofw_header;
|
|
+ __u32 ext_ramdisk_image;
|
|
+ __u32 ext_ramdisk_size;
|
|
+ __u32 ext_cmd_line_ptr;
|
|
+ __u8 _pad4[116];
|
|
+ struct edid_info edid_info;
|
|
+ struct efi_info efi_info;
|
|
+ __u32 alt_mem_k;
|
|
+ __u32 scratch;
|
|
+ __u8 e820_entries;
|
|
+ __u8 eddbuf_entries;
|
|
+ __u8 edd_mbr_sig_buf_entries;
|
|
+ __u8 kbd_status;
|
|
+ __u8 secure_boot;
|
|
+ __u8 _pad5[2];
|
|
+ __u8 sentinel;
|
|
+ __u8 _pad6[1];
|
|
+ struct setup_header hdr;
|
|
+ __u8 _pad7[40];
|
|
+ __u32 edd_mbr_sig_buffer[16];
|
|
+ struct boot_e820_entry e820_table[128];
|
|
+ __u8 _pad8[48];
|
|
+ struct edd_info eddbuf[6];
|
|
+ __u8 _pad9[276];
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum x86_hardware_subarch {
|
|
+ X86_SUBARCH_PC = 0,
|
|
+ X86_SUBARCH_LGUEST = 1,
|
|
+ X86_SUBARCH_XEN = 2,
|
|
+ X86_SUBARCH_INTEL_MID = 3,
|
|
+ X86_SUBARCH_CE4100 = 4,
|
|
+ X86_NR_SUBARCHS = 5,
|
|
+};
|
|
+
|
|
+struct range {
|
|
+ u64 start;
|
|
+ u64 end;
|
|
+};
|
|
+
|
|
+struct pt_regs {
|
|
+ long unsigned int r15;
|
|
+ long unsigned int r14;
|
|
+ long unsigned int r13;
|
|
+ long unsigned int r12;
|
|
+ long unsigned int bp;
|
|
+ long unsigned int bx;
|
|
+ long unsigned int r11;
|
|
+ long unsigned int r10;
|
|
+ long unsigned int r9;
|
|
+ long unsigned int r8;
|
|
+ long unsigned int ax;
|
|
+ long unsigned int cx;
|
|
+ long unsigned int dx;
|
|
+ long unsigned int si;
|
|
+ long unsigned int di;
|
|
+ long unsigned int orig_ax;
|
|
+ long unsigned int ip;
|
|
+ long unsigned int cs;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int sp;
|
|
+ long unsigned int ss;
|
|
+};
|
|
+
|
|
+struct desc_struct {
|
|
+ u16 limit0;
|
|
+ u16 base0;
|
|
+ u16 base1: 8;
|
|
+ u16 type: 4;
|
|
+ u16 s: 1;
|
|
+ u16 dpl: 2;
|
|
+ u16 p: 1;
|
|
+ u16 limit1: 4;
|
|
+ u16 avl: 1;
|
|
+ u16 l: 1;
|
|
+ u16 d: 1;
|
|
+ u16 g: 1;
|
|
+ u16 base2: 8;
|
|
+};
|
|
+
|
|
+struct idt_bits {
|
|
+ u16 ist: 3;
|
|
+ u16 zero: 5;
|
|
+ u16 type: 5;
|
|
+ u16 dpl: 2;
|
|
+ u16 p: 1;
|
|
+};
|
|
+
|
|
+struct gate_struct {
|
|
+ u16 offset_low;
|
|
+ u16 segment;
|
|
+ struct idt_bits bits;
|
|
+ u16 offset_middle;
|
|
+ u32 offset_high;
|
|
+ u32 reserved;
|
|
+};
|
|
+
|
|
+typedef struct gate_struct gate_desc;
|
|
+
|
|
+struct desc_ptr {
|
|
+ short unsigned int size;
|
|
+ long unsigned int address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef long unsigned int pteval_t;
|
|
+
|
|
+typedef long unsigned int pmdval_t;
|
|
+
|
|
+typedef long unsigned int pudval_t;
|
|
+
|
|
+typedef long unsigned int p4dval_t;
|
|
+
|
|
+typedef long unsigned int pgdval_t;
|
|
+
|
|
+typedef long unsigned int pgprotval_t;
|
|
+
|
|
+typedef struct {
|
|
+ pteval_t pte;
|
|
+} pte_t;
|
|
+
|
|
+struct pgprot {
|
|
+ pgprotval_t pgprot;
|
|
+};
|
|
+
|
|
+typedef struct pgprot pgprot_t;
|
|
+
|
|
+typedef struct {
|
|
+ pgdval_t pgd;
|
|
+} pgd_t;
|
|
+
|
|
+typedef struct {
|
|
+ p4dval_t p4d;
|
|
+} p4d_t;
|
|
+
|
|
+typedef struct {
|
|
+ pudval_t pud;
|
|
+} pud_t;
|
|
+
|
|
+typedef struct {
|
|
+ pmdval_t pmd;
|
|
+} pmd_t;
|
|
+
|
|
+typedef struct page *pgtable_t;
|
|
+
|
|
+struct address_space;
|
|
+
|
|
+struct kmem_cache;
|
|
+
|
|
+struct mm_struct;
|
|
+
|
|
+struct dev_pagemap;
|
|
+
|
|
+struct mem_cgroup;
|
|
+
|
|
+struct page {
|
|
+ long unsigned int flags;
|
|
+ union {
|
|
+ struct {
|
|
+ struct list_head lru;
|
|
+ struct address_space *mapping;
|
|
+ long unsigned int index;
|
|
+ long unsigned int private;
|
|
+ };
|
|
+ struct {
|
|
+ union {
|
|
+ struct list_head slab_list;
|
|
+ struct {
|
|
+ struct page *next;
|
|
+ int pages;
|
|
+ int pobjects;
|
|
+ };
|
|
+ };
|
|
+ struct kmem_cache *slab_cache;
|
|
+ void *freelist;
|
|
+ union {
|
|
+ void *s_mem;
|
|
+ long unsigned int counters;
|
|
+ struct {
|
|
+ unsigned int inuse: 16;
|
|
+ unsigned int objects: 15;
|
|
+ unsigned int frozen: 1;
|
|
+ };
|
|
+ };
|
|
+ };
|
|
+ struct {
|
|
+ long unsigned int compound_head;
|
|
+ unsigned char compound_dtor;
|
|
+ unsigned char compound_order;
|
|
+ atomic_t compound_mapcount;
|
|
+ };
|
|
+ struct {
|
|
+ long unsigned int _compound_pad_1;
|
|
+ long unsigned int _compound_pad_2;
|
|
+ struct list_head deferred_list;
|
|
+ };
|
|
+ struct {
|
|
+ long unsigned int _pt_pad_1;
|
|
+ pgtable_t pmd_huge_pte;
|
|
+ long unsigned int _pt_pad_2;
|
|
+ union {
|
|
+ struct mm_struct *pt_mm;
|
|
+ atomic_t pt_frag_refcount;
|
|
+ };
|
|
+ spinlock_t ptl;
|
|
+ };
|
|
+ struct {
|
|
+ struct dev_pagemap *pgmap;
|
|
+ long unsigned int hmm_data;
|
|
+ long unsigned int _zd_pad_1;
|
|
+ };
|
|
+ struct callback_head callback_head;
|
|
+ };
|
|
+ union {
|
|
+ atomic_t _mapcount;
|
|
+ unsigned int page_type;
|
|
+ unsigned int active;
|
|
+ int units;
|
|
+ };
|
|
+ atomic_t _refcount;
|
|
+ struct mem_cgroup *mem_cgroup;
|
|
+};
|
|
+
|
|
+struct paravirt_callee_save {
|
|
+ void *func;
|
|
+};
|
|
+
|
|
+struct pv_info {
|
|
+ unsigned int kernel_rpl;
|
|
+ int shared_kernel_pmd;
|
|
+ u16 extra_user_64bit_cs;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct pv_init_ops {
|
|
+ unsigned int (*patch)(u8, u16, void *, long unsigned int, unsigned int);
|
|
+};
|
|
+
|
|
+struct pv_lazy_ops {
|
|
+ void (*enter)();
|
|
+ void (*leave)();
|
|
+ void (*flush)();
|
|
+};
|
|
+
|
|
+struct pv_time_ops {
|
|
+ long long unsigned int (*sched_clock)();
|
|
+ long long unsigned int (*steal_clock)(int);
|
|
+};
|
|
+
|
|
+struct thread_struct;
|
|
+
|
|
+struct task_struct;
|
|
+
|
|
+struct pv_cpu_ops {
|
|
+ long unsigned int (*get_debugreg)(int);
|
|
+ void (*set_debugreg)(int, long unsigned int);
|
|
+ long unsigned int (*read_cr0)();
|
|
+ void (*write_cr0)(long unsigned int);
|
|
+ void (*write_cr4)(long unsigned int);
|
|
+ long unsigned int (*read_cr8)();
|
|
+ void (*write_cr8)(long unsigned int);
|
|
+ void (*load_tr_desc)();
|
|
+ void (*load_gdt)(const struct desc_ptr *);
|
|
+ void (*load_idt)(const struct desc_ptr *);
|
|
+ void (*set_ldt)(const void *, unsigned int);
|
|
+ long unsigned int (*store_tr)();
|
|
+ void (*load_tls)(struct thread_struct *, unsigned int);
|
|
+ void (*load_gs_index)(unsigned int);
|
|
+ void (*write_ldt_entry)(struct desc_struct *, int, const void *);
|
|
+ void (*write_gdt_entry)(struct desc_struct *, int, const void *, int);
|
|
+ void (*write_idt_entry)(gate_desc *, int, const gate_desc *);
|
|
+ void (*alloc_ldt)(struct desc_struct *, unsigned int);
|
|
+ void (*free_ldt)(struct desc_struct *, unsigned int);
|
|
+ void (*load_sp0)(long unsigned int);
|
|
+ void (*set_iopl_mask)(unsigned int);
|
|
+ void (*wbinvd)();
|
|
+ void (*io_delay)();
|
|
+ void (*cpuid)(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
|
|
+ u64 (*read_msr)(unsigned int);
|
|
+ void (*write_msr)(unsigned int, unsigned int, unsigned int);
|
|
+ u64 (*read_msr_safe)(unsigned int, int *);
|
|
+ int (*write_msr_safe)(unsigned int, unsigned int, unsigned int);
|
|
+ u64 (*read_pmc)(int);
|
|
+ void (*usergs_sysret64)();
|
|
+ void (*iret)();
|
|
+ void (*swapgs)();
|
|
+ void (*start_context_switch)(struct task_struct *);
|
|
+ void (*end_context_switch)(struct task_struct *);
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int seg;
|
|
+} mm_segment_t;
|
|
+
|
|
+struct fregs_state {
|
|
+ u32 cwd;
|
|
+ u32 swd;
|
|
+ u32 twd;
|
|
+ u32 fip;
|
|
+ u32 fcs;
|
|
+ u32 foo;
|
|
+ u32 fos;
|
|
+ u32 st_space[20];
|
|
+ u32 status;
|
|
+};
|
|
+
|
|
+struct fxregs_state {
|
|
+ u16 cwd;
|
|
+ u16 swd;
|
|
+ u16 twd;
|
|
+ u16 fop;
|
|
+ union {
|
|
+ struct {
|
|
+ u64 rip;
|
|
+ u64 rdp;
|
|
+ };
|
|
+ struct {
|
|
+ u32 fip;
|
|
+ u32 fcs;
|
|
+ u32 foo;
|
|
+ u32 fos;
|
|
+ };
|
|
+ };
|
|
+ u32 mxcsr;
|
|
+ u32 mxcsr_mask;
|
|
+ u32 st_space[32];
|
|
+ u32 xmm_space[64];
|
|
+ u32 padding[12];
|
|
+ union {
|
|
+ u32 padding1[12];
|
|
+ u32 sw_reserved[12];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct math_emu_info;
|
|
+
|
|
+struct swregs_state {
|
|
+ u32 cwd;
|
|
+ u32 swd;
|
|
+ u32 twd;
|
|
+ u32 fip;
|
|
+ u32 fcs;
|
|
+ u32 foo;
|
|
+ u32 fos;
|
|
+ u32 st_space[20];
|
|
+ u8 ftop;
|
|
+ u8 changed;
|
|
+ u8 lookahead;
|
|
+ u8 no_update;
|
|
+ u8 rm;
|
|
+ u8 alimit;
|
|
+ struct math_emu_info *info;
|
|
+ u32 entry_eip;
|
|
+};
|
|
+
|
|
+struct xstate_header {
|
|
+ u64 xfeatures;
|
|
+ u64 xcomp_bv;
|
|
+ u64 reserved[6];
|
|
+};
|
|
+
|
|
+struct xregs_state {
|
|
+ struct fxregs_state i387;
|
|
+ struct xstate_header header;
|
|
+ u8 extended_state_area[0];
|
|
+};
|
|
+
|
|
+union fpregs_state {
|
|
+ struct fregs_state fsave;
|
|
+ struct fxregs_state fxsave;
|
|
+ struct swregs_state soft;
|
|
+ struct xregs_state xsave;
|
|
+ u8 __padding[4096];
|
|
+};
|
|
+
|
|
+struct fpu {
|
|
+ unsigned int last_cpu;
|
|
+ unsigned char initialized;
|
|
+ long: 24;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ union fpregs_state state;
|
|
+};
|
|
+
|
|
+struct perf_event;
|
|
+
|
|
+struct thread_struct {
|
|
+ struct desc_struct tls_array[3];
|
|
+ long unsigned int sp;
|
|
+ short unsigned int es;
|
|
+ short unsigned int ds;
|
|
+ short unsigned int fsindex;
|
|
+ short unsigned int gsindex;
|
|
+ long unsigned int fsbase;
|
|
+ long unsigned int gsbase;
|
|
+ struct perf_event *ptrace_bps[4];
|
|
+ long unsigned int debugreg6;
|
|
+ long unsigned int ptrace_dr7;
|
|
+ long unsigned int cr2;
|
|
+ long unsigned int trap_nr;
|
|
+ long unsigned int error_code;
|
|
+ long unsigned int *io_bitmap_ptr;
|
|
+ long unsigned int iopl;
|
|
+ unsigned int io_bitmap_max;
|
|
+ mm_segment_t addr_limit;
|
|
+ unsigned int sig_on_uaccess_err: 1;
|
|
+ unsigned int uaccess_err: 1;
|
|
+ long: 62;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct fpu fpu;
|
|
+};
|
|
+
|
|
+struct thread_info {
|
|
+ long unsigned int flags;
|
|
+ u32 status;
|
|
+};
|
|
+
|
|
+struct llist_node {
|
|
+ struct llist_node *next;
|
|
+};
|
|
+
|
|
+struct load_weight {
|
|
+ long unsigned int weight;
|
|
+ u32 inv_weight;
|
|
+};
|
|
+
|
|
+struct rb_node {
|
|
+ long unsigned int __rb_parent_color;
|
|
+ struct rb_node *rb_right;
|
|
+ struct rb_node *rb_left;
|
|
+};
|
|
+
|
|
+struct sched_statistics {
|
|
+ u64 wait_start;
|
|
+ u64 wait_max;
|
|
+ u64 wait_count;
|
|
+ u64 wait_sum;
|
|
+ u64 iowait_count;
|
|
+ u64 iowait_sum;
|
|
+ u64 sleep_start;
|
|
+ u64 sleep_max;
|
|
+ s64 sum_sleep_runtime;
|
|
+ u64 block_start;
|
|
+ u64 block_max;
|
|
+ u64 exec_max;
|
|
+ u64 slice_max;
|
|
+ u64 nr_migrations_cold;
|
|
+ u64 nr_failed_migrations_affine;
|
|
+ u64 nr_failed_migrations_running;
|
|
+ u64 nr_failed_migrations_hot;
|
|
+ u64 nr_forced_migrations;
|
|
+ u64 nr_wakeups;
|
|
+ u64 nr_wakeups_sync;
|
|
+ u64 nr_wakeups_migrate;
|
|
+ u64 nr_wakeups_local;
|
|
+ u64 nr_wakeups_remote;
|
|
+ u64 nr_wakeups_affine;
|
|
+ u64 nr_wakeups_affine_attempts;
|
|
+ u64 nr_wakeups_passive;
|
|
+ u64 nr_wakeups_idle;
|
|
+};
|
|
+
|
|
+struct util_est {
|
|
+ unsigned int enqueued;
|
|
+ unsigned int ewma;
|
|
+};
|
|
+
|
|
+struct sched_avg {
|
|
+ u64 last_update_time;
|
|
+ u64 load_sum;
|
|
+ u64 runnable_load_sum;
|
|
+ u32 util_sum;
|
|
+ u32 period_contrib;
|
|
+ long unsigned int load_avg;
|
|
+ long unsigned int runnable_load_avg;
|
|
+ long unsigned int util_avg;
|
|
+ struct util_est util_est;
|
|
+};
|
|
+
|
|
+struct cfs_rq;
|
|
+
|
|
+struct sched_entity {
|
|
+ struct load_weight load;
|
|
+ long unsigned int runnable_weight;
|
|
+ struct rb_node run_node;
|
|
+ struct list_head group_node;
|
|
+ unsigned int on_rq;
|
|
+ u64 exec_start;
|
|
+ u64 sum_exec_runtime;
|
|
+ u64 vruntime;
|
|
+ u64 prev_sum_exec_runtime;
|
|
+ u64 nr_migrations;
|
|
+ struct sched_statistics statistics;
|
|
+ int depth;
|
|
+ struct sched_entity *parent;
|
|
+ struct cfs_rq *cfs_rq;
|
|
+ struct cfs_rq *my_q;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct sched_avg avg;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct rt_rq;
|
|
+
|
|
+struct sched_rt_entity {
|
|
+ struct list_head run_list;
|
|
+ long unsigned int timeout;
|
|
+ long unsigned int watchdog_stamp;
|
|
+ unsigned int time_slice;
|
|
+ short unsigned int on_rq;
|
|
+ short unsigned int on_list;
|
|
+ struct sched_rt_entity *back;
|
|
+ struct sched_rt_entity *parent;
|
|
+ struct rt_rq *rt_rq;
|
|
+ struct rt_rq *my_q;
|
|
+};
|
|
+
|
|
+typedef s64 ktime_t;
|
|
+
|
|
+struct timerqueue_node {
|
|
+ struct rb_node node;
|
|
+ ktime_t expires;
|
|
+};
|
|
+
|
|
+enum hrtimer_restart {
|
|
+ HRTIMER_NORESTART = 0,
|
|
+ HRTIMER_RESTART = 1,
|
|
+};
|
|
+
|
|
+struct hrtimer_clock_base;
|
|
+
|
|
+struct hrtimer {
|
|
+ struct timerqueue_node node;
|
|
+ ktime_t _softexpires;
|
|
+ enum hrtimer_restart (*function)(struct hrtimer *);
|
|
+ struct hrtimer_clock_base *base;
|
|
+ u8 state;
|
|
+ u8 is_rel;
|
|
+ u8 is_soft;
|
|
+};
|
|
+
|
|
+struct sched_dl_entity {
|
|
+ struct rb_node rb_node;
|
|
+ u64 dl_runtime;
|
|
+ u64 dl_deadline;
|
|
+ u64 dl_period;
|
|
+ u64 dl_bw;
|
|
+ u64 dl_density;
|
|
+ s64 runtime;
|
|
+ u64 deadline;
|
|
+ unsigned int flags;
|
|
+ unsigned int dl_throttled: 1;
|
|
+ unsigned int dl_boosted: 1;
|
|
+ unsigned int dl_yielded: 1;
|
|
+ unsigned int dl_non_contending: 1;
|
|
+ unsigned int dl_overrun: 1;
|
|
+ struct hrtimer dl_timer;
|
|
+ struct hrtimer inactive_timer;
|
|
+};
|
|
+
|
|
+struct cpumask {
|
|
+ long unsigned int bits[128];
|
|
+};
|
|
+
|
|
+typedef struct cpumask cpumask_t;
|
|
+
|
|
+struct sched_info {
|
|
+ long unsigned int pcount;
|
|
+ long long unsigned int run_delay;
|
|
+ long long unsigned int last_arrival;
|
|
+ long long unsigned int last_queued;
|
|
+};
|
|
+
|
|
+struct plist_node {
|
|
+ int prio;
|
|
+ struct list_head prio_list;
|
|
+ struct list_head node_list;
|
|
+};
|
|
+
|
|
+struct vmacache {
|
|
+ u64 seqnum;
|
|
+ struct vm_area_struct *vmas[4];
|
|
+};
|
|
+
|
|
+struct task_rss_stat {
|
|
+ int events;
|
|
+ int count[4];
|
|
+};
|
|
+
|
|
+typedef struct raw_spinlock raw_spinlock_t;
|
|
+
|
|
+struct prev_cputime {
|
|
+ u64 utime;
|
|
+ u64 stime;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct seqcount {
|
|
+ unsigned int sequence;
|
|
+};
|
|
+
|
|
+typedef struct seqcount seqcount_t;
|
|
+
|
|
+enum vtime_state {
|
|
+ VTIME_INACTIVE = 0,
|
|
+ VTIME_USER = 1,
|
|
+ VTIME_SYS = 2,
|
|
+};
|
|
+
|
|
+struct vtime {
|
|
+ seqcount_t seqcount;
|
|
+ long long unsigned int starttime;
|
|
+ enum vtime_state state;
|
|
+ u64 utime;
|
|
+ u64 stime;
|
|
+ u64 gtime;
|
|
+};
|
|
+
|
|
+struct task_cputime {
|
|
+ u64 utime;
|
|
+ u64 stime;
|
|
+ long long unsigned int sum_exec_runtime;
|
|
+};
|
|
+
|
|
+struct sem_undo_list;
|
|
+
|
|
+struct sysv_sem {
|
|
+ struct sem_undo_list *undo_list;
|
|
+};
|
|
+
|
|
+struct sysv_shm {
|
|
+ struct list_head shm_clist;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int sig[1];
|
|
+} sigset_t;
|
|
+
|
|
+struct sigpending {
|
|
+ struct list_head list;
|
|
+ sigset_t signal;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ uid_t val;
|
|
+} kuid_t;
|
|
+
|
|
+struct seccomp_filter;
|
|
+
|
|
+struct seccomp {
|
|
+ int mode;
|
|
+ struct seccomp_filter *filter;
|
|
+};
|
|
+
|
|
+struct wake_q_node {
|
|
+ struct wake_q_node *next;
|
|
+};
|
|
+
|
|
+struct rb_root {
|
|
+ struct rb_node *rb_node;
|
|
+};
|
|
+
|
|
+struct rb_root_cached {
|
|
+ struct rb_root rb_root;
|
|
+ struct rb_node *rb_leftmost;
|
|
+};
|
|
+
|
|
+struct task_io_accounting {
|
|
+ u64 rchar;
|
|
+ u64 wchar;
|
|
+ u64 syscr;
|
|
+ u64 syscw;
|
|
+ u64 read_bytes;
|
|
+ u64 write_bytes;
|
|
+ u64 cancelled_write_bytes;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int bits[16];
|
|
+} nodemask_t;
|
|
+
|
|
+typedef atomic64_t atomic_long_t;
|
|
+
|
|
+struct optimistic_spin_queue {
|
|
+ atomic_t tail;
|
|
+};
|
|
+
|
|
+struct mutex {
|
|
+ atomic_long_t owner;
|
|
+ spinlock_t wait_lock;
|
|
+ struct optimistic_spin_queue osq;
|
|
+ struct list_head wait_list;
|
|
+};
|
|
+
|
|
+struct arch_tlbflush_unmap_batch {
|
|
+ struct cpumask cpumask;
|
|
+};
|
|
+
|
|
+struct tlbflush_unmap_batch {
|
|
+ struct arch_tlbflush_unmap_batch arch;
|
|
+ bool flush_required;
|
|
+ bool writable;
|
|
+};
|
|
+
|
|
+struct refcount_struct {
|
|
+ atomic_t refs;
|
|
+};
|
|
+
|
|
+typedef struct refcount_struct refcount_t;
|
|
+
|
|
+struct page_frag {
|
|
+ struct page *page;
|
|
+ __u32 offset;
|
|
+ __u32 size;
|
|
+};
|
|
+
|
|
+struct sched_class;
|
|
+
|
|
+struct task_group;
|
|
+
|
|
+struct pid;
|
|
+
|
|
+struct completion;
|
|
+
|
|
+struct cred;
|
|
+
|
|
+struct nameidata;
|
|
+
|
|
+struct fs_struct;
|
|
+
|
|
+struct files_struct;
|
|
+
|
|
+struct nsproxy;
|
|
+
|
|
+struct signal_struct;
|
|
+
|
|
+struct sighand_struct;
|
|
+
|
|
+struct audit_context;
|
|
+
|
|
+struct rt_mutex_waiter;
|
|
+
|
|
+struct bio_list;
|
|
+
|
|
+struct blk_plug;
|
|
+
|
|
+struct reclaim_state;
|
|
+
|
|
+struct backing_dev_info;
|
|
+
|
|
+struct io_context;
|
|
+
|
|
+struct siginfo;
|
|
+
|
|
+typedef struct siginfo siginfo_t;
|
|
+
|
|
+struct css_set;
|
|
+
|
|
+struct robust_list_head;
|
|
+
|
|
+struct compat_robust_list_head;
|
|
+
|
|
+struct futex_pi_state;
|
|
+
|
|
+struct perf_event_context;
|
|
+
|
|
+struct mempolicy;
|
|
+
|
|
+struct numa_group;
|
|
+
|
|
+struct rseq;
|
|
+
|
|
+struct task_delay_info;
|
|
+
|
|
+struct ftrace_ret_stack;
|
|
+
|
|
+struct request_queue;
|
|
+
|
|
+struct uprobe_task;
|
|
+
|
|
+struct vm_struct;
|
|
+
|
|
+struct task_struct {
|
|
+ struct thread_info thread_info;
|
|
+ volatile long int state;
|
|
+ void *stack;
|
|
+ atomic_t usage;
|
|
+ unsigned int flags;
|
|
+ unsigned int ptrace;
|
|
+ struct llist_node wake_entry;
|
|
+ int on_cpu;
|
|
+ unsigned int cpu;
|
|
+ unsigned int wakee_flips;
|
|
+ long unsigned int wakee_flip_decay_ts;
|
|
+ struct task_struct *last_wakee;
|
|
+ int recent_used_cpu;
|
|
+ int wake_cpu;
|
|
+ int on_rq;
|
|
+ int prio;
|
|
+ int static_prio;
|
|
+ int normal_prio;
|
|
+ unsigned int rt_priority;
|
|
+ const struct sched_class *sched_class;
|
|
+ struct sched_entity se;
|
|
+ struct sched_rt_entity rt;
|
|
+ struct task_group *sched_task_group;
|
|
+ struct sched_dl_entity dl;
|
|
+ struct hlist_head preempt_notifiers;
|
|
+ unsigned int btrace_seq;
|
|
+ unsigned int policy;
|
|
+ int nr_cpus_allowed;
|
|
+ cpumask_t cpus_allowed;
|
|
+ struct sched_info sched_info;
|
|
+ struct list_head tasks;
|
|
+ struct plist_node pushable_tasks;
|
|
+ struct rb_node pushable_dl_tasks;
|
|
+ struct mm_struct *mm;
|
|
+ struct mm_struct *active_mm;
|
|
+ struct vmacache vmacache;
|
|
+ struct task_rss_stat rss_stat;
|
|
+ int exit_state;
|
|
+ int exit_code;
|
|
+ int exit_signal;
|
|
+ int pdeath_signal;
|
|
+ long unsigned int jobctl;
|
|
+ unsigned int personality;
|
|
+ unsigned int sched_reset_on_fork: 1;
|
|
+ unsigned int sched_contributes_to_load: 1;
|
|
+ unsigned int sched_migrated: 1;
|
|
+ unsigned int sched_remote_wakeup: 1;
|
|
+ int: 28;
|
|
+ unsigned int in_execve: 1;
|
|
+ unsigned int in_iowait: 1;
|
|
+ unsigned int restore_sigmask: 1;
|
|
+ unsigned int in_user_fault: 1;
|
|
+ unsigned int memcg_kmem_skip_account: 1;
|
|
+ unsigned int no_cgroup_migration: 1;
|
|
+ unsigned int use_memdelay: 1;
|
|
+ long unsigned int atomic_flags;
|
|
+ struct restart_block restart_block;
|
|
+ pid_t pid;
|
|
+ pid_t tgid;
|
|
+ long unsigned int stack_canary;
|
|
+ struct task_struct *real_parent;
|
|
+ struct task_struct *parent;
|
|
+ struct list_head children;
|
|
+ struct list_head sibling;
|
|
+ struct task_struct *group_leader;
|
|
+ struct list_head ptraced;
|
|
+ struct list_head ptrace_entry;
|
|
+ struct pid *thread_pid;
|
|
+ struct hlist_node pid_links[4];
|
|
+ struct list_head thread_group;
|
|
+ struct list_head thread_node;
|
|
+ struct completion *vfork_done;
|
|
+ int *set_child_tid;
|
|
+ int *clear_child_tid;
|
|
+ u64 utime;
|
|
+ u64 stime;
|
|
+ u64 gtime;
|
|
+ struct prev_cputime prev_cputime;
|
|
+ struct vtime vtime;
|
|
+ atomic_t tick_dep_mask;
|
|
+ long unsigned int nvcsw;
|
|
+ long unsigned int nivcsw;
|
|
+ u64 start_time;
|
|
+ u64 real_start_time;
|
|
+ long unsigned int min_flt;
|
|
+ long unsigned int maj_flt;
|
|
+ struct task_cputime cputime_expires;
|
|
+ struct list_head cpu_timers[3];
|
|
+ const struct cred *ptracer_cred;
|
|
+ const struct cred *real_cred;
|
|
+ const struct cred *cred;
|
|
+ char comm[16];
|
|
+ struct nameidata *nameidata;
|
|
+ struct sysv_sem sysvsem;
|
|
+ struct sysv_shm sysvshm;
|
|
+ long unsigned int last_switch_count;
|
|
+ long unsigned int last_switch_time;
|
|
+ struct fs_struct *fs;
|
|
+ struct files_struct *files;
|
|
+ struct nsproxy *nsproxy;
|
|
+ struct signal_struct *signal;
|
|
+ struct sighand_struct *sighand;
|
|
+ sigset_t blocked;
|
|
+ sigset_t real_blocked;
|
|
+ sigset_t saved_sigmask;
|
|
+ struct sigpending pending;
|
|
+ long unsigned int sas_ss_sp;
|
|
+ size_t sas_ss_size;
|
|
+ unsigned int sas_ss_flags;
|
|
+ struct callback_head *task_works;
|
|
+ struct audit_context *audit_context;
|
|
+ kuid_t loginuid;
|
|
+ unsigned int sessionid;
|
|
+ struct seccomp seccomp;
|
|
+ u32 parent_exec_id;
|
|
+ u32 self_exec_id;
|
|
+ spinlock_t alloc_lock;
|
|
+ raw_spinlock_t pi_lock;
|
|
+ struct wake_q_node wake_q;
|
|
+ struct rb_root_cached pi_waiters;
|
|
+ struct task_struct *pi_top_task;
|
|
+ struct rt_mutex_waiter *pi_blocked_on;
|
|
+ void *journal_info;
|
|
+ struct bio_list *bio_list;
|
|
+ struct blk_plug *plug;
|
|
+ struct reclaim_state *reclaim_state;
|
|
+ struct backing_dev_info *backing_dev_info;
|
|
+ struct io_context *io_context;
|
|
+ long unsigned int ptrace_message;
|
|
+ siginfo_t *last_siginfo;
|
|
+ struct task_io_accounting ioac;
|
|
+ u64 acct_rss_mem1;
|
|
+ u64 acct_vm_mem1;
|
|
+ u64 acct_timexpd;
|
|
+ nodemask_t mems_allowed;
|
|
+ seqcount_t mems_allowed_seq;
|
|
+ int cpuset_mem_spread_rotor;
|
|
+ int cpuset_slab_spread_rotor;
|
|
+ struct css_set *cgroups;
|
|
+ struct list_head cg_list;
|
|
+ struct robust_list_head *robust_list;
|
|
+ struct compat_robust_list_head *compat_robust_list;
|
|
+ struct list_head pi_state_list;
|
|
+ struct futex_pi_state *pi_state_cache;
|
|
+ struct perf_event_context *perf_event_ctxp[2];
|
|
+ struct mutex perf_event_mutex;
|
|
+ struct list_head perf_event_list;
|
|
+ struct mempolicy *mempolicy;
|
|
+ short int il_prev;
|
|
+ short int pref_node_fork;
|
|
+ int numa_scan_seq;
|
|
+ unsigned int numa_scan_period;
|
|
+ unsigned int numa_scan_period_max;
|
|
+ int numa_preferred_nid;
|
|
+ long unsigned int numa_migrate_retry;
|
|
+ u64 node_stamp;
|
|
+ u64 last_task_numa_placement;
|
|
+ u64 last_sum_exec_runtime;
|
|
+ struct callback_head numa_work;
|
|
+ struct numa_group *numa_group;
|
|
+ long unsigned int *numa_faults;
|
|
+ long unsigned int total_numa_faults;
|
|
+ long unsigned int numa_faults_locality[3];
|
|
+ long unsigned int numa_pages_migrated;
|
|
+ struct rseq *rseq;
|
|
+ u32 rseq_len;
|
|
+ u32 rseq_sig;
|
|
+ long unsigned int rseq_event_mask;
|
|
+ struct tlbflush_unmap_batch tlb_ubc;
|
|
+ union {
|
|
+ refcount_t rcu_users;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ struct pipe_inode_info *splice_pipe;
|
|
+ struct page_frag task_frag;
|
|
+ struct task_delay_info *delays;
|
|
+ int nr_dirtied;
|
|
+ int nr_dirtied_pause;
|
|
+ long unsigned int dirty_paused_when;
|
|
+ u64 timer_slack_ns;
|
|
+ u64 default_timer_slack_ns;
|
|
+ int curr_ret_stack;
|
|
+ int curr_ret_depth;
|
|
+ struct ftrace_ret_stack *ret_stack;
|
|
+ long long unsigned int ftrace_timestamp;
|
|
+ atomic_t trace_overrun;
|
|
+ atomic_t tracing_graph_pause;
|
|
+ long unsigned int trace;
|
|
+ long unsigned int trace_recursion;
|
|
+ struct mem_cgroup *memcg_in_oom;
|
|
+ gfp_t memcg_oom_gfp_mask;
|
|
+ int memcg_oom_order;
|
|
+ unsigned int memcg_nr_pages_over_high;
|
|
+ struct mem_cgroup *active_memcg;
|
|
+ struct request_queue *throttle_queue;
|
|
+ struct uprobe_task *utask;
|
|
+ int pagefault_disabled;
|
|
+ struct task_struct *oom_reaper_list;
|
|
+ struct vm_struct *stack_vm_area;
|
|
+ atomic_t stack_refcount;
|
|
+ int patch_state;
|
|
+ void *security;
|
|
+ u64 parent_exec_id_u64;
|
|
+ u64 self_exec_id_u64;
|
|
+ struct mutex *futex_exit_mutex;
|
|
+ long unsigned int futex_state;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct thread_struct thread;
|
|
+};
|
|
+
|
|
+struct pv_irq_ops {
|
|
+ struct paravirt_callee_save save_fl;
|
|
+ struct paravirt_callee_save restore_fl;
|
|
+ struct paravirt_callee_save irq_disable;
|
|
+ struct paravirt_callee_save irq_enable;
|
|
+ void (*safe_halt)();
|
|
+ void (*halt)();
|
|
+};
|
|
+
|
|
+struct flush_tlb_info;
|
|
+
|
|
+struct mmu_gather;
|
|
+
|
|
+struct pv_mmu_ops {
|
|
+ long unsigned int (*read_cr2)();
|
|
+ void (*write_cr2)(long unsigned int);
|
|
+ long unsigned int (*read_cr3)();
|
|
+ void (*write_cr3)(long unsigned int);
|
|
+ void (*activate_mm)(struct mm_struct *, struct mm_struct *);
|
|
+ void (*dup_mmap)(struct mm_struct *, struct mm_struct *);
|
|
+ void (*exit_mmap)(struct mm_struct *);
|
|
+ void (*flush_tlb_user)();
|
|
+ void (*flush_tlb_kernel)();
|
|
+ void (*flush_tlb_one_user)(long unsigned int);
|
|
+ void (*flush_tlb_others)(const struct cpumask *, const struct flush_tlb_info *);
|
|
+ void (*tlb_remove_table)(struct mmu_gather *, void *);
|
|
+ int (*pgd_alloc)(struct mm_struct *);
|
|
+ void (*pgd_free)(struct mm_struct *, pgd_t *);
|
|
+ void (*alloc_pte)(struct mm_struct *, long unsigned int);
|
|
+ void (*alloc_pmd)(struct mm_struct *, long unsigned int);
|
|
+ void (*alloc_pud)(struct mm_struct *, long unsigned int);
|
|
+ void (*alloc_p4d)(struct mm_struct *, long unsigned int);
|
|
+ void (*release_pte)(long unsigned int);
|
|
+ void (*release_pmd)(long unsigned int);
|
|
+ void (*release_pud)(long unsigned int);
|
|
+ void (*release_p4d)(long unsigned int);
|
|
+ void (*set_pte)(pte_t *, pte_t);
|
|
+ void (*set_pte_at)(struct mm_struct *, long unsigned int, pte_t *, pte_t);
|
|
+ void (*set_pmd)(pmd_t *, pmd_t);
|
|
+ pte_t (*ptep_modify_prot_start)(struct mm_struct *, long unsigned int, pte_t *);
|
|
+ void (*ptep_modify_prot_commit)(struct mm_struct *, long unsigned int, pte_t *, pte_t);
|
|
+ struct paravirt_callee_save pte_val;
|
|
+ struct paravirt_callee_save make_pte;
|
|
+ struct paravirt_callee_save pgd_val;
|
|
+ struct paravirt_callee_save make_pgd;
|
|
+ void (*set_pud)(pud_t *, pud_t);
|
|
+ struct paravirt_callee_save pmd_val;
|
|
+ struct paravirt_callee_save make_pmd;
|
|
+ struct paravirt_callee_save pud_val;
|
|
+ struct paravirt_callee_save make_pud;
|
|
+ void (*set_p4d)(p4d_t *, p4d_t);
|
|
+ struct paravirt_callee_save p4d_val;
|
|
+ struct paravirt_callee_save make_p4d;
|
|
+ void (*set_pgd)(pgd_t *, pgd_t);
|
|
+ struct pv_lazy_ops lazy_mode;
|
|
+ void (*set_fixmap)(unsigned int, phys_addr_t, pgprot_t);
|
|
+};
|
|
+
|
|
+struct rw_semaphore {
|
|
+ atomic_long_t count;
|
|
+ struct list_head wait_list;
|
|
+ raw_spinlock_t wait_lock;
|
|
+ struct optimistic_spin_queue osq;
|
|
+ struct task_struct *owner;
|
|
+};
|
|
+
|
|
+struct mm_rss_stat {
|
|
+ atomic_long_t count[4];
|
|
+};
|
|
+
|
|
+struct ldt_struct;
|
|
+
|
|
+struct vdso_image;
|
|
+
|
|
+typedef struct {
|
|
+ u64 ctx_id;
|
|
+ atomic64_t tlb_gen;
|
|
+ struct rw_semaphore ldt_usr_sem;
|
|
+ struct ldt_struct *ldt;
|
|
+ short unsigned int ia32_compat;
|
|
+ struct mutex lock;
|
|
+ void *vdso;
|
|
+ const struct vdso_image *vdso_image;
|
|
+ atomic_t perf_rdpmc_allowed;
|
|
+ u16 pkey_allocation_map;
|
|
+ s16 execute_only_pkey;
|
|
+} mm_context_t;
|
|
+
|
|
+struct xol_area;
|
|
+
|
|
+struct uprobes_state {
|
|
+ struct xol_area *xol_area;
|
|
+};
|
|
+
|
|
+struct work_struct;
|
|
+
|
|
+typedef void (*work_func_t)(struct work_struct *);
|
|
+
|
|
+struct work_struct {
|
|
+ atomic_long_t data;
|
|
+ struct list_head entry;
|
|
+ work_func_t func;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct kvm;
|
|
+
|
|
+struct linux_binfmt;
|
|
+
|
|
+struct core_state;
|
|
+
|
|
+struct kioctx_table;
|
|
+
|
|
+struct user_namespace;
|
|
+
|
|
+struct mmu_notifier_mm;
|
|
+
|
|
+struct hmm;
|
|
+
|
|
+struct mm_struct {
|
|
+ struct {
|
|
+ struct vm_area_struct *mmap;
|
|
+ struct rb_root mm_rb;
|
|
+ u64 vmacache_seqnum;
|
|
+ long unsigned int (*get_unmapped_area)(struct file *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
|
|
+ long unsigned int mmap_base;
|
|
+ long unsigned int mmap_legacy_base;
|
|
+ long unsigned int mmap_compat_base;
|
|
+ long unsigned int mmap_compat_legacy_base;
|
|
+ long unsigned int task_size;
|
|
+ long unsigned int highest_vm_end;
|
|
+ pgd_t *pgd;
|
|
+ atomic_t membarrier_state;
|
|
+ atomic_t mm_users;
|
|
+ atomic_t mm_count;
|
|
+ atomic_long_t pgtables_bytes;
|
|
+ int map_count;
|
|
+ spinlock_t page_table_lock;
|
|
+ struct rw_semaphore mmap_sem;
|
|
+ struct list_head mmlist;
|
|
+ long unsigned int hiwater_rss;
|
|
+ long unsigned int hiwater_vm;
|
|
+ long unsigned int total_vm;
|
|
+ atomic_long_t locked_vm;
|
|
+ long unsigned int pinned_vm;
|
|
+ long unsigned int data_vm;
|
|
+ long unsigned int exec_vm;
|
|
+ long unsigned int stack_vm;
|
|
+ long unsigned int def_flags;
|
|
+ spinlock_t arg_lock;
|
|
+ long unsigned int start_code;
|
|
+ long unsigned int end_code;
|
|
+ long unsigned int start_data;
|
|
+ long unsigned int end_data;
|
|
+ long unsigned int start_brk;
|
|
+ long unsigned int brk;
|
|
+ long unsigned int start_stack;
|
|
+ long unsigned int arg_start;
|
|
+ long unsigned int arg_end;
|
|
+ long unsigned int env_start;
|
|
+ long unsigned int env_end;
|
|
+ long unsigned int saved_auxv[46];
|
|
+ struct mm_rss_stat rss_stat;
|
|
+ struct linux_binfmt *binfmt;
|
|
+ mm_context_t context;
|
|
+ long unsigned int flags;
|
|
+ struct core_state *core_state;
|
|
+ spinlock_t ioctx_lock;
|
|
+ struct kioctx_table *ioctx_table;
|
|
+ struct task_struct *owner;
|
|
+ struct user_namespace *user_ns;
|
|
+ struct file *exe_file;
|
|
+ struct mmu_notifier_mm *mmu_notifier_mm;
|
|
+ long unsigned int numa_next_scan;
|
|
+ long unsigned int numa_scan_offset;
|
|
+ int numa_scan_seq;
|
|
+ atomic_t tlb_flush_pending;
|
|
+ bool tlb_flush_batched;
|
|
+ struct uprobes_state uprobes_state;
|
|
+ atomic_long_t hugetlb_usage;
|
|
+ struct work_struct async_put_work;
|
|
+ struct hmm *hmm;
|
|
+ };
|
|
+ struct kvm *kvm;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int cpu_bitmap[0];
|
|
+};
|
|
+
|
|
+struct flush_tlb_info {
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+ u64 new_tlb_gen;
|
|
+};
|
|
+
|
|
+struct qrwlock {
|
|
+ union {
|
|
+ atomic_t cnts;
|
|
+ struct {
|
|
+ u8 wlocked;
|
|
+ u8 __lstate[3];
|
|
+ };
|
|
+ };
|
|
+ arch_spinlock_t wait_lock;
|
|
+};
|
|
+
|
|
+typedef struct qrwlock arch_rwlock_t;
|
|
+
|
|
+struct pv_lock_ops {
|
|
+ void (*queued_spin_lock_slowpath)(struct qspinlock *, u32);
|
|
+ struct paravirt_callee_save queued_spin_unlock;
|
|
+ void (*wait)(u8 *, u8);
|
|
+ void (*kick)(int);
|
|
+ struct paravirt_callee_save vcpu_is_preempted;
|
|
+};
|
|
+
|
|
+struct paravirt_patch_site {
|
|
+ u8 *instr;
|
|
+ u8 instrtype;
|
|
+ u8 len;
|
|
+ u16 clobbers;
|
|
+};
|
|
+
|
|
+struct math_emu_info {
|
|
+ long int ___orig_eip;
|
|
+ struct pt_regs *regs;
|
|
+};
|
|
+
|
|
+typedef struct cpumask *cpumask_var_t;
|
|
+
|
|
+struct tracepoint_func {
|
|
+ void *func;
|
|
+ void *data;
|
|
+ int prio;
|
|
+};
|
|
+
|
|
+struct tracepoint {
|
|
+ const char *name;
|
|
+ struct static_key key;
|
|
+ int (*regfunc)();
|
|
+ void (*unregfunc)();
|
|
+ struct tracepoint_func *funcs;
|
|
+};
|
|
+
|
|
+struct cpuinfo_x86 {
|
|
+ __u8 x86;
|
|
+ __u8 x86_vendor;
|
|
+ __u8 x86_model;
|
|
+ __u8 x86_stepping;
|
|
+ int x86_tlbsize;
|
|
+ __u8 x86_virt_bits;
|
|
+ __u8 x86_phys_bits;
|
|
+ __u8 x86_coreid_bits;
|
|
+ __u8 cu_id;
|
|
+ __u32 extended_cpuid_level;
|
|
+ int cpuid_level;
|
|
+ __u32 x86_capability[20];
|
|
+ char x86_vendor_id[16];
|
|
+ char x86_model_id[64];
|
|
+ unsigned int x86_cache_size;
|
|
+ int x86_cache_alignment;
|
|
+ int x86_cache_max_rmid;
|
|
+ int x86_cache_occ_scale;
|
|
+ int x86_power;
|
|
+ long unsigned int loops_per_jiffy;
|
|
+ u16 x86_max_cores;
|
|
+ u16 apicid;
|
|
+ u16 initial_apicid;
|
|
+ u16 x86_clflush_size;
|
|
+ u16 booted_cores;
|
|
+ u16 phys_proc_id;
|
|
+ u16 logical_proc_id;
|
|
+ u16 cpu_core_id;
|
|
+ u16 cpu_index;
|
|
+ u32 microcode;
|
|
+ u8 x86_cache_bits;
|
|
+ unsigned int initialized: 1;
|
|
+ u16 cpu_die_id;
|
|
+ u16 logical_die_id;
|
|
+};
|
|
+
|
|
+struct x86_hw_tss {
|
|
+ u32 reserved1;
|
|
+ u64 sp0;
|
|
+ u64 sp1;
|
|
+ u64 sp2;
|
|
+ u64 reserved2;
|
|
+ u64 ist[7];
|
|
+ u32 reserved3;
|
|
+ u32 reserved4;
|
|
+ u16 reserved5;
|
|
+ u16 io_bitmap_base;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct seq_file___2;
|
|
+
|
|
+struct seq_operations {
|
|
+ void * (*start)(struct seq_file___2 *, loff_t *);
|
|
+ void (*stop)(struct seq_file___2 *, void *);
|
|
+ void * (*next)(struct seq_file___2 *, void *, loff_t *);
|
|
+ int (*show)(struct seq_file___2 *, void *);
|
|
+};
|
|
+
|
|
+struct entry_stack {
|
|
+ long unsigned int words[64];
|
|
+};
|
|
+
|
|
+struct entry_stack_page {
|
|
+ struct entry_stack stack;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct tss_struct {
|
|
+ struct x86_hw_tss x86_tss;
|
|
+ long unsigned int io_bitmap[1025];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct orig_ist {
|
|
+ long unsigned int ist[7];
|
|
+};
|
|
+
|
|
+union irq_stack_union {
|
|
+ char irq_stack[16384];
|
|
+ struct {
|
|
+ char gs_base[40];
|
|
+ long unsigned int stack_canary;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum l1tf_mitigations {
|
|
+ L1TF_MITIGATION_OFF = 0,
|
|
+ L1TF_MITIGATION_FLUSH_NOWARN = 1,
|
|
+ L1TF_MITIGATION_FLUSH = 2,
|
|
+ L1TF_MITIGATION_FLUSH_NOSMT = 3,
|
|
+ L1TF_MITIGATION_FULL = 4,
|
|
+ L1TF_MITIGATION_FULL_FORCE = 5,
|
|
+};
|
|
+
|
|
+struct mpc_table {
|
|
+ char signature[4];
|
|
+ short unsigned int length;
|
|
+ char spec;
|
|
+ char checksum;
|
|
+ char oem[8];
|
|
+ char productid[12];
|
|
+ unsigned int oemptr;
|
|
+ short unsigned int oemsize;
|
|
+ short unsigned int oemcount;
|
|
+ unsigned int lapic;
|
|
+ unsigned int reserved;
|
|
+};
|
|
+
|
|
+struct mpc_cpu {
|
|
+ unsigned char type;
|
|
+ unsigned char apicid;
|
|
+ unsigned char apicver;
|
|
+ unsigned char cpuflag;
|
|
+ unsigned int cpufeature;
|
|
+ unsigned int featureflag;
|
|
+ unsigned int reserved[2];
|
|
+};
|
|
+
|
|
+struct mpc_bus {
|
|
+ unsigned char type;
|
|
+ unsigned char busid;
|
|
+ unsigned char bustype[6];
|
|
+};
|
|
+
|
|
+struct mpc_intsrc {
|
|
+ unsigned char type;
|
|
+ unsigned char irqtype;
|
|
+ short unsigned int irqflag;
|
|
+ unsigned char srcbus;
|
|
+ unsigned char srcbusirq;
|
|
+ unsigned char dstapic;
|
|
+ unsigned char dstirq;
|
|
+};
|
|
+
|
|
+struct x86_init_mpparse {
|
|
+ void (*mpc_record)(unsigned int);
|
|
+ void (*setup_ioapic_ids)();
|
|
+ int (*mpc_apic_id)(struct mpc_cpu *);
|
|
+ void (*smp_read_mpc_oem)(struct mpc_table *);
|
|
+ void (*mpc_oem_pci_bus)(struct mpc_bus *);
|
|
+ void (*mpc_oem_bus_info)(struct mpc_bus *, char *);
|
|
+ void (*find_smp_config)();
|
|
+ void (*get_smp_config)(unsigned int);
|
|
+};
|
|
+
|
|
+struct x86_init_resources {
|
|
+ void (*probe_roms)();
|
|
+ void (*reserve_resources)();
|
|
+ char * (*memory_setup)();
|
|
+};
|
|
+
|
|
+struct x86_init_irqs {
|
|
+ void (*pre_vector_init)();
|
|
+ void (*intr_init)();
|
|
+ void (*trap_init)();
|
|
+ void (*intr_mode_select)();
|
|
+ void (*intr_mode_init)();
|
|
+};
|
|
+
|
|
+struct x86_init_oem {
|
|
+ void (*arch_setup)();
|
|
+ void (*banner)();
|
|
+};
|
|
+
|
|
+struct x86_init_paging {
|
|
+ void (*pagetable_init)();
|
|
+};
|
|
+
|
|
+struct x86_init_timers {
|
|
+ void (*setup_percpu_clockev)();
|
|
+ void (*timer_init)();
|
|
+ void (*wallclock_init)();
|
|
+};
|
|
+
|
|
+struct x86_init_iommu {
|
|
+ int (*iommu_init)();
|
|
+};
|
|
+
|
|
+struct x86_init_pci {
|
|
+ int (*arch_init)();
|
|
+ int (*init)();
|
|
+ void (*init_irq)();
|
|
+ void (*fixup_irqs)();
|
|
+};
|
|
+
|
|
+struct x86_hyper_init {
|
|
+ void (*init_platform)();
|
|
+ void (*guest_late_init)();
|
|
+ bool (*x2apic_available)();
|
|
+ void (*init_mem_mapping)();
|
|
+ void (*init_after_bootmem)();
|
|
+};
|
|
+
|
|
+struct x86_init_acpi {
|
|
+ u64 (*get_root_pointer)();
|
|
+ void (*reduced_hw_early_init)();
|
|
+};
|
|
+
|
|
+struct x86_init_ops {
|
|
+ struct x86_init_resources resources;
|
|
+ struct x86_init_mpparse mpparse;
|
|
+ struct x86_init_irqs irqs;
|
|
+ struct x86_init_oem oem;
|
|
+ struct x86_init_paging paging;
|
|
+ struct x86_init_timers timers;
|
|
+ struct x86_init_iommu iommu;
|
|
+ struct x86_init_pci pci;
|
|
+ struct x86_hyper_init hyper;
|
|
+ struct x86_init_acpi acpi;
|
|
+};
|
|
+
|
|
+struct x86_cpuinit_ops {
|
|
+ void (*setup_percpu_clockev)();
|
|
+ void (*early_percpu_clock_init)();
|
|
+ void (*fixup_cpu_id)(struct cpuinfo_x86 *, int);
|
|
+};
|
|
+
|
|
+struct x86_legacy_devices {
|
|
+ int pnpbios;
|
|
+};
|
|
+
|
|
+enum x86_legacy_i8042_state {
|
|
+ X86_LEGACY_I8042_PLATFORM_ABSENT = 0,
|
|
+ X86_LEGACY_I8042_FIRMWARE_ABSENT = 1,
|
|
+ X86_LEGACY_I8042_EXPECTED_PRESENT = 2,
|
|
+};
|
|
+
|
|
+struct x86_legacy_features {
|
|
+ enum x86_legacy_i8042_state i8042;
|
|
+ int rtc;
|
|
+ int warm_reset;
|
|
+ int no_vga;
|
|
+ int reserve_bios_regions;
|
|
+ struct x86_legacy_devices devices;
|
|
+};
|
|
+
|
|
+struct x86_hyper_runtime {
|
|
+ void (*pin_vcpu)(int);
|
|
+};
|
|
+
|
|
+struct x86_platform_ops {
|
|
+ long unsigned int (*calibrate_cpu)();
|
|
+ long unsigned int (*calibrate_tsc)();
|
|
+ void (*get_wallclock)(struct timespec64 *);
|
|
+ int (*set_wallclock)(const struct timespec64 *);
|
|
+ void (*iommu_shutdown)();
|
|
+ bool (*is_untracked_pat_range)(u64, u64);
|
|
+ void (*nmi_init)();
|
|
+ unsigned char (*get_nmi_reason)();
|
|
+ void (*save_sched_clock_state)();
|
|
+ void (*restore_sched_clock_state)();
|
|
+ void (*apic_post_init)();
|
|
+ struct x86_legacy_features legacy;
|
|
+ void (*set_legacy_features)();
|
|
+ struct x86_hyper_runtime hyper;
|
|
+};
|
|
+
|
|
+struct pci_dev;
|
|
+
|
|
+struct x86_msi_ops {
|
|
+ int (*setup_msi_irqs)(struct pci_dev *, int, int);
|
|
+ void (*teardown_msi_irq)(unsigned int);
|
|
+ void (*teardown_msi_irqs)(struct pci_dev *);
|
|
+ void (*restore_msi_irqs)(struct pci_dev *);
|
|
+};
|
|
+
|
|
+struct x86_apic_ops {
|
|
+ unsigned int (*io_apic_read)(unsigned int, unsigned int);
|
|
+ void (*restore)();
|
|
+};
|
|
+
|
|
+struct physid_mask {
|
|
+ long unsigned int mask[512];
|
|
+};
|
|
+
|
|
+typedef struct physid_mask physid_mask_t;
|
|
+
|
|
+struct lock_class_key {};
|
|
+
|
|
+typedef struct {
|
|
+ arch_rwlock_t raw_lock;
|
|
+} rwlock_t;
|
|
+
|
|
+struct vdso_image {
|
|
+ void *data;
|
|
+ long unsigned int size;
|
|
+ long unsigned int alt;
|
|
+ long unsigned int alt_len;
|
|
+ long int sym_vvar_start;
|
|
+ long int sym_vvar_page;
|
|
+ long int sym_hpet_page;
|
|
+ long int sym_pvclock_page;
|
|
+ long int sym_hvclock_page;
|
|
+ long int sym_VDSO32_NOTE_MASK;
|
|
+ long int sym___kernel_sigreturn;
|
|
+ long int sym___kernel_rt_sigreturn;
|
|
+ long int sym___kernel_vsyscall;
|
|
+ long int sym_int80_landing_pad;
|
|
+};
|
|
+
|
|
+enum xen_domain_type {
|
|
+ XEN_NATIVE = 0,
|
|
+ XEN_PV_DOMAIN = 1,
|
|
+ XEN_HVM_DOMAIN = 2,
|
|
+};
|
|
+
|
|
+struct fwnode_operations;
|
|
+
|
|
+struct fwnode_handle {
|
|
+ struct fwnode_handle *secondary;
|
|
+ const struct fwnode_operations *ops;
|
|
+};
|
|
+
|
|
+struct device;
|
|
+
|
|
+struct fwnode_reference_args;
|
|
+
|
|
+struct fwnode_endpoint;
|
|
+
|
|
+struct fwnode_operations {
|
|
+ struct fwnode_handle * (*get)(struct fwnode_handle *);
|
|
+ void (*put)(struct fwnode_handle *);
|
|
+ bool (*device_is_available)(const struct fwnode_handle *);
|
|
+ const void * (*device_get_match_data)(const struct fwnode_handle *, const struct device *);
|
|
+ bool (*property_present)(const struct fwnode_handle *, const char *);
|
|
+ int (*property_read_int_array)(const struct fwnode_handle *, const char *, unsigned int, void *, size_t);
|
|
+ int (*property_read_string_array)(const struct fwnode_handle *, const char *, const char **, size_t);
|
|
+ struct fwnode_handle * (*get_parent)(const struct fwnode_handle *);
|
|
+ struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *, struct fwnode_handle *);
|
|
+ struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *, const char *);
|
|
+ int (*get_reference_args)(const struct fwnode_handle *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args *);
|
|
+ struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *, struct fwnode_handle *);
|
|
+ struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *);
|
|
+ struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *);
|
|
+ int (*graph_parse_endpoint)(const struct fwnode_handle *, struct fwnode_endpoint *);
|
|
+};
|
|
+
|
|
+struct fwnode_endpoint {
|
|
+ unsigned int port;
|
|
+ unsigned int id;
|
|
+ const struct fwnode_handle *local_fwnode;
|
|
+};
|
|
+
|
|
+struct fwnode_reference_args {
|
|
+ struct fwnode_handle *fwnode;
|
|
+ unsigned int nargs;
|
|
+ u64 args[8];
|
|
+};
|
|
+
|
|
+struct kref {
|
|
+ refcount_t refcount;
|
|
+};
|
|
+
|
|
+struct kset;
|
|
+
|
|
+struct kobj_type;
|
|
+
|
|
+struct kernfs_node;
|
|
+
|
|
+struct kobject {
|
|
+ const char *name;
|
|
+ struct list_head entry;
|
|
+ struct kobject *parent;
|
|
+ struct kset *kset;
|
|
+ struct kobj_type *ktype;
|
|
+ struct kernfs_node *sd;
|
|
+ struct kref kref;
|
|
+ unsigned int state_initialized: 1;
|
|
+ unsigned int state_in_sysfs: 1;
|
|
+ unsigned int state_add_uevent_sent: 1;
|
|
+ unsigned int state_remove_uevent_sent: 1;
|
|
+ unsigned int uevent_suppress: 1;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+enum dl_dev_state {
|
|
+ DL_DEV_NO_DRIVER = 0,
|
|
+ DL_DEV_PROBING = 1,
|
|
+ DL_DEV_DRIVER_BOUND = 2,
|
|
+ DL_DEV_UNBINDING = 3,
|
|
+};
|
|
+
|
|
+struct dev_links_info {
|
|
+ struct list_head suppliers;
|
|
+ struct list_head consumers;
|
|
+ enum dl_dev_state status;
|
|
+};
|
|
+
|
|
+struct pm_message {
|
|
+ int event;
|
|
+};
|
|
+
|
|
+typedef struct pm_message pm_message_t;
|
|
+
|
|
+struct wait_queue_head {
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+};
|
|
+
|
|
+typedef struct wait_queue_head wait_queue_head_t;
|
|
+
|
|
+struct completion {
|
|
+ unsigned int done;
|
|
+ wait_queue_head_t wait;
|
|
+};
|
|
+
|
|
+struct timer_list {
|
|
+ struct hlist_node entry;
|
|
+ long unsigned int expires;
|
|
+ void (*function)(struct timer_list *);
|
|
+ u32 flags;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+enum rpm_request {
|
|
+ RPM_REQ_NONE = 0,
|
|
+ RPM_REQ_IDLE = 1,
|
|
+ RPM_REQ_SUSPEND = 2,
|
|
+ RPM_REQ_AUTOSUSPEND = 3,
|
|
+ RPM_REQ_RESUME = 4,
|
|
+};
|
|
+
|
|
+enum rpm_status {
|
|
+ RPM_ACTIVE = 0,
|
|
+ RPM_RESUMING = 1,
|
|
+ RPM_SUSPENDED = 2,
|
|
+ RPM_SUSPENDING = 3,
|
|
+};
|
|
+
|
|
+struct wakeup_source;
|
|
+
|
|
+struct wake_irq;
|
|
+
|
|
+struct pm_subsys_data;
|
|
+
|
|
+struct dev_pm_qos;
|
|
+
|
|
+struct dev_pm_info {
|
|
+ pm_message_t power_state;
|
|
+ unsigned int can_wakeup: 1;
|
|
+ unsigned int async_suspend: 1;
|
|
+ bool in_dpm_list: 1;
|
|
+ bool is_prepared: 1;
|
|
+ bool is_suspended: 1;
|
|
+ bool is_noirq_suspended: 1;
|
|
+ bool is_late_suspended: 1;
|
|
+ bool early_init: 1;
|
|
+ bool direct_complete: 1;
|
|
+ u32 driver_flags;
|
|
+ spinlock_t lock;
|
|
+ struct list_head entry;
|
|
+ struct completion completion;
|
|
+ struct wakeup_source *wakeup;
|
|
+ bool wakeup_path: 1;
|
|
+ bool syscore: 1;
|
|
+ bool no_pm_callbacks: 1;
|
|
+ unsigned int must_resume: 1;
|
|
+ unsigned int may_skip_resume: 1;
|
|
+ struct timer_list suspend_timer;
|
|
+ long unsigned int timer_expires;
|
|
+ struct work_struct work;
|
|
+ wait_queue_head_t wait_queue;
|
|
+ struct wake_irq *wakeirq;
|
|
+ atomic_t usage_count;
|
|
+ atomic_t child_count;
|
|
+ unsigned int disable_depth: 3;
|
|
+ unsigned int idle_notification: 1;
|
|
+ unsigned int request_pending: 1;
|
|
+ unsigned int deferred_resume: 1;
|
|
+ unsigned int runtime_auto: 1;
|
|
+ bool ignore_children: 1;
|
|
+ unsigned int no_callbacks: 1;
|
|
+ unsigned int irq_safe: 1;
|
|
+ unsigned int use_autosuspend: 1;
|
|
+ unsigned int timer_autosuspends: 1;
|
|
+ unsigned int memalloc_noio: 1;
|
|
+ unsigned int links_count;
|
|
+ enum rpm_request request;
|
|
+ enum rpm_status runtime_status;
|
|
+ int runtime_error;
|
|
+ int autosuspend_delay;
|
|
+ long unsigned int last_busy;
|
|
+ long unsigned int active_jiffies;
|
|
+ long unsigned int suspended_jiffies;
|
|
+ long unsigned int accounting_timestamp;
|
|
+ struct pm_subsys_data *subsys_data;
|
|
+ void (*set_latency_tolerance)(struct device *, s32);
|
|
+ struct dev_pm_qos *qos;
|
|
+};
|
|
+
|
|
+struct dma_coherent_mem;
|
|
+
|
|
+struct dev_archdata {
|
|
+ void *iommu;
|
|
+};
|
|
+
|
|
+struct klist_node {
|
|
+ void *n_klist;
|
|
+ struct list_head n_node;
|
|
+ struct kref n_ref;
|
|
+};
|
|
+
|
|
+struct device_private;
|
|
+
|
|
+struct device_type;
|
|
+
|
|
+struct bus_type;
|
|
+
|
|
+struct device_driver;
|
|
+
|
|
+struct dev_pm_domain;
|
|
+
|
|
+struct irq_domain;
|
|
+
|
|
+struct dev_pin_info;
|
|
+
|
|
+struct dma_map_ops;
|
|
+
|
|
+struct device_dma_parameters;
|
|
+
|
|
+struct device_node;
|
|
+
|
|
+struct class;
|
|
+
|
|
+struct attribute_group;
|
|
+
|
|
+struct iommu_group;
|
|
+
|
|
+struct iommu_fwspec;
|
|
+
|
|
+struct iommu_param;
|
|
+
|
|
+struct device {
|
|
+ struct device *parent;
|
|
+ struct device_private *p;
|
|
+ struct kobject kobj;
|
|
+ const char *init_name;
|
|
+ const struct device_type *type;
|
|
+ struct mutex mutex;
|
|
+ struct bus_type *bus;
|
|
+ struct device_driver *driver;
|
|
+ void *platform_data;
|
|
+ void *driver_data;
|
|
+ struct dev_links_info links;
|
|
+ struct dev_pm_info power;
|
|
+ struct dev_pm_domain *pm_domain;
|
|
+ struct irq_domain *msi_domain;
|
|
+ struct dev_pin_info *pins;
|
|
+ struct list_head msi_list;
|
|
+ int numa_node;
|
|
+ const struct dma_map_ops *dma_ops;
|
|
+ u64 *dma_mask;
|
|
+ u64 coherent_dma_mask;
|
|
+ u64 bus_dma_mask;
|
|
+ long unsigned int dma_pfn_offset;
|
|
+ struct device_dma_parameters *dma_parms;
|
|
+ struct list_head dma_pools;
|
|
+ struct dma_coherent_mem *dma_mem;
|
|
+ struct dev_archdata archdata;
|
|
+ struct device_node *of_node;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ dev_t devt;
|
|
+ u32 id;
|
|
+ spinlock_t devres_lock;
|
|
+ struct list_head devres_head;
|
|
+ struct klist_node knode_class;
|
|
+ struct class *class;
|
|
+ const struct attribute_group **groups;
|
|
+ void (*release)(struct device *);
|
|
+ struct iommu_group *iommu_group;
|
|
+ struct iommu_fwspec *iommu_fwspec;
|
|
+ struct iommu_param *iommu_param;
|
|
+ bool offline_disabled: 1;
|
|
+ bool offline: 1;
|
|
+ bool of_node_reused: 1;
|
|
+ union {
|
|
+ raw_spinlock_t msi_lock;
|
|
+ long unsigned int kabi_reserve1;
|
|
+ };
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+};
|
|
+
|
|
+struct vm_struct {
|
|
+ struct vm_struct *next;
|
|
+ void *addr;
|
|
+ long unsigned int size;
|
|
+ long unsigned int flags;
|
|
+ struct page **pages;
|
|
+ unsigned int nr_pages;
|
|
+ phys_addr_t phys_addr;
|
|
+ const void *caller;
|
|
+};
|
|
+
|
|
+struct real_mode_header {
|
|
+ u32 text_start;
|
|
+ u32 ro_end;
|
|
+ u32 trampoline_start;
|
|
+ u32 trampoline_status;
|
|
+ u32 trampoline_header;
|
|
+ u32 trampoline_pgd;
|
|
+ u32 wakeup_start;
|
|
+ u32 wakeup_header;
|
|
+ u32 machine_real_restart_asm;
|
|
+ u32 machine_real_restart_seg;
|
|
+};
|
|
+
|
|
+enum fixed_addresses {
|
|
+ VSYSCALL_PAGE = 511,
|
|
+ FIX_DBGP_BASE = 512,
|
|
+ FIX_EARLYCON_MEM_BASE = 513,
|
|
+ FIX_OHCI1394_BASE = 514,
|
|
+ FIX_APIC_BASE = 515,
|
|
+ FIX_IO_APIC_BASE_0 = 516,
|
|
+ FIX_IO_APIC_BASE_END = 643,
|
|
+ FIX_PARAVIRT_BOOTMAP = 644,
|
|
+ FIX_TEXT_POKE1 = 645,
|
|
+ FIX_TEXT_POKE0 = 646,
|
|
+ FIX_APEI_GHES_IRQ = 647,
|
|
+ FIX_APEI_GHES_NMI = 648,
|
|
+ __end_of_permanent_fixed_addresses = 649,
|
|
+ FIX_BTMAP_END = 1024,
|
|
+ FIX_BTMAP_BEGIN = 1535,
|
|
+ FIX_TBOOT_BASE = 1536,
|
|
+ __end_of_fixed_addresses = 1537,
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ u16 __softirq_pending;
|
|
+ u8 kvm_cpu_l1tf_flush_l1d;
|
|
+ unsigned int __nmi_count;
|
|
+ unsigned int apic_timer_irqs;
|
|
+ unsigned int irq_spurious_count;
|
|
+ unsigned int icr_read_retry_count;
|
|
+ unsigned int kvm_posted_intr_ipis;
|
|
+ unsigned int kvm_posted_intr_wakeup_ipis;
|
|
+ unsigned int kvm_posted_intr_nested_ipis;
|
|
+ unsigned int x86_platform_ipis;
|
|
+ unsigned int apic_perf_irqs;
|
|
+ unsigned int apic_irq_work_irqs;
|
|
+ unsigned int irq_resched_count;
|
|
+ unsigned int irq_call_count;
|
|
+ unsigned int irq_tlb_count;
|
|
+ unsigned int irq_thermal_count;
|
|
+ unsigned int irq_threshold_count;
|
|
+ unsigned int irq_deferred_error_count;
|
|
+ unsigned int irq_hv_callback_count;
|
|
+ unsigned int irq_hv_reenlightenment_count;
|
|
+ unsigned int hyperv_stimer0_count;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+} irq_cpustat_t;
|
|
+
|
|
+enum apic_intr_mode_id {
|
|
+ APIC_PIC = 0,
|
|
+ APIC_VIRTUAL_WIRE = 1,
|
|
+ APIC_VIRTUAL_WIRE_NO_CONFIG = 2,
|
|
+ APIC_SYMMETRIC_IO = 3,
|
|
+ APIC_SYMMETRIC_IO_NO_ROUTING = 4,
|
|
+};
|
|
+
|
|
+struct apic {
|
|
+ void (*eoi_write)(u32, u32);
|
|
+ void (*native_eoi_write)(u32, u32);
|
|
+ void (*write)(u32, u32);
|
|
+ u32 (*read)(u32);
|
|
+ void (*wait_icr_idle)();
|
|
+ u32 (*safe_wait_icr_idle)();
|
|
+ void (*send_IPI)(int, int);
|
|
+ void (*send_IPI_mask)(const struct cpumask *, int);
|
|
+ void (*send_IPI_mask_allbutself)(const struct cpumask *, int);
|
|
+ void (*send_IPI_allbutself)(int);
|
|
+ void (*send_IPI_all)(int);
|
|
+ void (*send_IPI_self)(int);
|
|
+ u32 dest_logical;
|
|
+ u32 disable_esr;
|
|
+ u32 irq_delivery_mode;
|
|
+ u32 irq_dest_mode;
|
|
+ u32 (*calc_dest_apicid)(unsigned int);
|
|
+ u64 (*icr_read)();
|
|
+ void (*icr_write)(u32, u32);
|
|
+ int (*probe)();
|
|
+ int (*acpi_madt_oem_check)(char *, char *);
|
|
+ int (*apic_id_valid)(u32);
|
|
+ int (*apic_id_registered)();
|
|
+ bool (*check_apicid_used)(physid_mask_t *, int);
|
|
+ void (*init_apic_ldr)();
|
|
+ void (*ioapic_phys_id_map)(physid_mask_t *, physid_mask_t *);
|
|
+ void (*setup_apic_routing)();
|
|
+ int (*cpu_present_to_apicid)(int);
|
|
+ void (*apicid_to_cpu_present)(int, physid_mask_t *);
|
|
+ int (*check_phys_apicid_present)(int);
|
|
+ int (*phys_pkg_id)(int, int);
|
|
+ u32 (*get_apic_id)(long unsigned int);
|
|
+ u32 (*set_apic_id)(unsigned int);
|
|
+ int (*wakeup_secondary_cpu)(int, long unsigned int);
|
|
+ void (*inquire_remote_apic)(int);
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+struct smp_ops {
|
|
+ void (*smp_prepare_boot_cpu)();
|
|
+ void (*smp_prepare_cpus)(unsigned int);
|
|
+ void (*smp_cpus_done)(unsigned int);
|
|
+ void (*stop_other_cpus)(int);
|
|
+ void (*crash_stop_other_cpus)();
|
|
+ void (*smp_send_reschedule)(int);
|
|
+ int (*cpu_up)(unsigned int, struct task_struct *);
|
|
+ int (*cpu_disable)();
|
|
+ void (*cpu_die)(unsigned int);
|
|
+ void (*play_dead)();
|
|
+ void (*send_call_func_ipi)(const struct cpumask *);
|
|
+ void (*send_call_func_single_ipi)(int);
|
|
+};
|
|
+
|
|
+enum pcpu_fc {
|
|
+ PCPU_FC_AUTO = 0,
|
|
+ PCPU_FC_EMBED = 1,
|
|
+ PCPU_FC_PAGE = 2,
|
|
+ PCPU_FC_NR = 3,
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ struct seqcount seqcount;
|
|
+ spinlock_t lock;
|
|
+} seqlock_t;
|
|
+
|
|
+struct free_area {
|
|
+ struct list_head free_list[5];
|
|
+ long unsigned int nr_free;
|
|
+};
|
|
+
|
|
+struct zone_padding {
|
|
+ char x[0];
|
|
+};
|
|
+
|
|
+enum numa_stat_item {
|
|
+ NUMA_HIT = 0,
|
|
+ NUMA_MISS = 1,
|
|
+ NUMA_FOREIGN = 2,
|
|
+ NUMA_INTERLEAVE_HIT = 3,
|
|
+ NUMA_LOCAL = 4,
|
|
+ NUMA_OTHER = 5,
|
|
+ NR_VM_NUMA_STAT_ITEMS = 6,
|
|
+};
|
|
+
|
|
+enum zone_stat_item {
|
|
+ NR_FREE_PAGES = 0,
|
|
+ NR_ZONE_LRU_BASE = 1,
|
|
+ NR_ZONE_INACTIVE_ANON = 1,
|
|
+ NR_ZONE_ACTIVE_ANON = 2,
|
|
+ NR_ZONE_INACTIVE_FILE = 3,
|
|
+ NR_ZONE_ACTIVE_FILE = 4,
|
|
+ NR_ZONE_UNEVICTABLE = 5,
|
|
+ NR_ZONE_WRITE_PENDING = 6,
|
|
+ NR_MLOCK = 7,
|
|
+ NR_PAGETABLE = 8,
|
|
+ NR_KERNEL_STACK_KB = 9,
|
|
+ NR_BOUNCE = 10,
|
|
+ NR_ZSPAGES = 11,
|
|
+ NR_FREE_CMA_PAGES = 12,
|
|
+ NR_VM_ZONE_STAT_ITEMS = 13,
|
|
+};
|
|
+
|
|
+enum node_stat_item {
|
|
+ NR_LRU_BASE = 0,
|
|
+ NR_INACTIVE_ANON = 0,
|
|
+ NR_ACTIVE_ANON = 1,
|
|
+ NR_INACTIVE_FILE = 2,
|
|
+ NR_ACTIVE_FILE = 3,
|
|
+ NR_UNEVICTABLE = 4,
|
|
+ NR_SLAB_RECLAIMABLE = 5,
|
|
+ NR_SLAB_UNRECLAIMABLE = 6,
|
|
+ NR_ISOLATED_ANON = 7,
|
|
+ NR_ISOLATED_FILE = 8,
|
|
+ WORKINGSET_REFAULT = 9,
|
|
+ WORKINGSET_ACTIVATE = 10,
|
|
+ WORKINGSET_NODERECLAIM = 11,
|
|
+ NR_ANON_MAPPED = 12,
|
|
+ NR_FILE_MAPPED = 13,
|
|
+ NR_FILE_PAGES = 14,
|
|
+ NR_FILE_DIRTY = 15,
|
|
+ NR_WRITEBACK = 16,
|
|
+ NR_WRITEBACK_TEMP = 17,
|
|
+ NR_SHMEM = 18,
|
|
+ NR_SHMEM_THPS = 19,
|
|
+ NR_SHMEM_PMDMAPPED = 20,
|
|
+ NR_ANON_THPS = 21,
|
|
+ NR_UNSTABLE_NFS = 22,
|
|
+ NR_VMSCAN_WRITE = 23,
|
|
+ NR_VMSCAN_IMMEDIATE = 24,
|
|
+ NR_DIRTIED = 25,
|
|
+ NR_WRITTEN = 26,
|
|
+ NR_INDIRECTLY_RECLAIMABLE_BYTES = 27,
|
|
+ NR_VM_NODE_STAT_ITEMS = 28,
|
|
+};
|
|
+
|
|
+struct zone_reclaim_stat {
|
|
+ long unsigned int recent_rotated[2];
|
|
+ long unsigned int recent_scanned[2];
|
|
+};
|
|
+
|
|
+struct pglist_data;
|
|
+
|
|
+struct lruvec {
|
|
+ struct list_head lists[5];
|
|
+ struct zone_reclaim_stat reclaim_stat;
|
|
+ atomic_long_t inactive_age;
|
|
+ long unsigned int refaults;
|
|
+ struct pglist_data *pgdat;
|
|
+};
|
|
+
|
|
+struct per_cpu_pageset;
|
|
+
|
|
+struct zone {
|
|
+ long unsigned int watermark[3];
|
|
+ long unsigned int nr_reserved_highatomic;
|
|
+ long int lowmem_reserve[5];
|
|
+ int node;
|
|
+ struct pglist_data *zone_pgdat;
|
|
+ struct per_cpu_pageset *pageset;
|
|
+ long unsigned int zone_start_pfn;
|
|
+ long unsigned int managed_pages;
|
|
+ long unsigned int spanned_pages;
|
|
+ long unsigned int present_pages;
|
|
+ const char *name;
|
|
+ long unsigned int nr_isolate_pageblock;
|
|
+ seqlock_t span_seqlock;
|
|
+ int initialized;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad1_;
|
|
+ struct free_area free_area[11];
|
|
+ long unsigned int flags;
|
|
+ spinlock_t lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad2_;
|
|
+ long unsigned int percpu_drift_mark;
|
|
+ long unsigned int compact_cached_free_pfn;
|
|
+ long unsigned int compact_cached_migrate_pfn[2];
|
|
+ unsigned int compact_considered;
|
|
+ unsigned int compact_defer_shift;
|
|
+ int compact_order_failed;
|
|
+ bool compact_blockskip_flush;
|
|
+ bool contiguous;
|
|
+ long: 16;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad3_;
|
|
+ atomic_long_t vm_stat[13];
|
|
+ atomic_long_t vm_numa_stat[6];
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct zoneref {
|
|
+ struct zone *zone;
|
|
+ int zone_idx;
|
|
+};
|
|
+
|
|
+struct zonelist {
|
|
+ struct zoneref _zonerefs[5121];
|
|
+};
|
|
+
|
|
+enum zone_type {
|
|
+ ZONE_DMA = 0,
|
|
+ ZONE_DMA32 = 1,
|
|
+ ZONE_NORMAL = 2,
|
|
+ ZONE_MOVABLE = 3,
|
|
+ ZONE_DEVICE = 4,
|
|
+ __MAX_NR_ZONES = 5,
|
|
+};
|
|
+
|
|
+struct per_cpu_nodestat;
|
|
+
|
|
+struct pglist_data {
|
|
+ struct zone node_zones[5];
|
|
+ struct zonelist node_zonelists[2];
|
|
+ int nr_zones;
|
|
+ spinlock_t node_size_lock;
|
|
+ long unsigned int node_start_pfn;
|
|
+ long unsigned int node_present_pages;
|
|
+ long unsigned int node_spanned_pages;
|
|
+ int node_id;
|
|
+ wait_queue_head_t kswapd_wait;
|
|
+ wait_queue_head_t pfmemalloc_wait;
|
|
+ struct task_struct *kswapd;
|
|
+ int kswapd_order;
|
|
+ enum zone_type kswapd_classzone_idx;
|
|
+ int kswapd_failures;
|
|
+ int kcompactd_max_order;
|
|
+ enum zone_type kcompactd_classzone_idx;
|
|
+ wait_queue_head_t kcompactd_wait;
|
|
+ struct task_struct *kcompactd;
|
|
+ long unsigned int totalreserve_pages;
|
|
+ long unsigned int min_unmapped_pages;
|
|
+ long unsigned int min_slab_pages;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad1_;
|
|
+ spinlock_t lru_lock;
|
|
+ long unsigned int first_deferred_pfn;
|
|
+ long unsigned int static_init_pgcnt;
|
|
+ spinlock_t split_queue_lock;
|
|
+ struct list_head split_queue;
|
|
+ long unsigned int split_queue_len;
|
|
+ struct lruvec lruvec;
|
|
+ long unsigned int flags;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad2_;
|
|
+ struct per_cpu_nodestat *per_cpu_nodestats;
|
|
+ atomic_long_t vm_stat[28];
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+typedef unsigned int isolate_mode_t;
|
|
+
|
|
+struct per_cpu_pages {
|
|
+ int count;
|
|
+ int high;
|
|
+ int batch;
|
|
+ struct list_head lists[3];
|
|
+};
|
|
+
|
|
+struct per_cpu_pageset {
|
|
+ struct per_cpu_pages pcp;
|
|
+ s8 expire;
|
|
+ u16 vm_numa_stat_diff[6];
|
|
+ s8 stat_threshold;
|
|
+ s8 vm_stat_diff[13];
|
|
+};
|
|
+
|
|
+struct per_cpu_nodestat {
|
|
+ s8 stat_threshold;
|
|
+ s8 vm_node_stat_diff[28];
|
|
+};
|
|
+
|
|
+typedef struct pglist_data pg_data_t;
|
|
+
|
|
+typedef int (*notifier_fn_t)(struct notifier_block *, long unsigned int, void *);
|
|
+
|
|
+struct notifier_block {
|
|
+ notifier_fn_t notifier_call;
|
|
+ struct notifier_block *next;
|
|
+ int priority;
|
|
+};
|
|
+
|
|
+struct blocking_notifier_head {
|
|
+ struct rw_semaphore rwsem;
|
|
+ struct notifier_block *head;
|
|
+};
|
|
+
|
|
+struct mem_section {
|
|
+ long unsigned int section_mem_map;
|
|
+ long unsigned int *pageblock_flags;
|
|
+};
|
|
+
|
|
+struct userfaultfd_ctx;
|
|
+
|
|
+struct vm_userfaultfd_ctx {
|
|
+ struct userfaultfd_ctx *ctx;
|
|
+};
|
|
+
|
|
+struct anon_vma;
|
|
+
|
|
+struct vm_operations_struct;
|
|
+
|
|
+struct vm_area_struct {
|
|
+ long unsigned int vm_start;
|
|
+ long unsigned int vm_end;
|
|
+ struct vm_area_struct *vm_next;
|
|
+ struct vm_area_struct *vm_prev;
|
|
+ struct rb_node vm_rb;
|
|
+ long unsigned int rb_subtree_gap;
|
|
+ struct mm_struct *vm_mm;
|
|
+ pgprot_t vm_page_prot;
|
|
+ long unsigned int vm_flags;
|
|
+ struct {
|
|
+ struct rb_node rb;
|
|
+ long unsigned int rb_subtree_last;
|
|
+ } shared;
|
|
+ struct list_head anon_vma_chain;
|
|
+ struct anon_vma *anon_vma;
|
|
+ const struct vm_operations_struct *vm_ops;
|
|
+ long unsigned int vm_pgoff;
|
|
+ struct file *vm_file;
|
|
+ void *vm_private_data;
|
|
+ atomic_long_t swap_readahead_info;
|
|
+ struct mempolicy *vm_policy;
|
|
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct arch_uprobe_task {
|
|
+ long unsigned int saved_scratch_register;
|
|
+ unsigned int saved_trap_nr;
|
|
+ unsigned int saved_tf;
|
|
+};
|
|
+
|
|
+enum uprobe_task_state {
|
|
+ UTASK_RUNNING = 0,
|
|
+ UTASK_SSTEP = 1,
|
|
+ UTASK_SSTEP_ACK = 2,
|
|
+ UTASK_SSTEP_TRAPPED = 3,
|
|
+};
|
|
+
|
|
+struct uprobe;
|
|
+
|
|
+struct return_instance;
|
|
+
|
|
+struct uprobe_task {
|
|
+ enum uprobe_task_state state;
|
|
+ union {
|
|
+ struct {
|
|
+ struct arch_uprobe_task autask;
|
|
+ long unsigned int vaddr;
|
|
+ };
|
|
+ struct {
|
|
+ struct callback_head dup_xol_work;
|
|
+ long unsigned int dup_xol_addr;
|
|
+ };
|
|
+ };
|
|
+ struct uprobe *active_uprobe;
|
|
+ long unsigned int xol_vaddr;
|
|
+ struct return_instance *return_instances;
|
|
+ unsigned int depth;
|
|
+};
|
|
+
|
|
+struct return_instance {
|
|
+ struct uprobe *uprobe;
|
|
+ long unsigned int func;
|
|
+ long unsigned int stack;
|
|
+ long unsigned int orig_ret_vaddr;
|
|
+ bool chained;
|
|
+ struct return_instance *next;
|
|
+};
|
|
+
|
|
+typedef int vm_fault_t;
|
|
+
|
|
+struct radix_tree_node;
|
|
+
|
|
+struct radix_tree_root {
|
|
+ spinlock_t xa_lock;
|
|
+ gfp_t gfp_mask;
|
|
+ struct radix_tree_node *rnode;
|
|
+};
|
|
+
|
|
+typedef u32 errseq_t;
|
|
+
|
|
+struct address_space_operations;
|
|
+
|
|
+struct address_space {
|
|
+ struct inode *host;
|
|
+ struct radix_tree_root i_pages;
|
|
+ atomic_t i_mmap_writable;
|
|
+ struct rb_root_cached i_mmap;
|
|
+ struct rw_semaphore i_mmap_rwsem;
|
|
+ long unsigned int nrpages;
|
|
+ long unsigned int nrexceptional;
|
|
+ long unsigned int writeback_index;
|
|
+ const struct address_space_operations *a_ops;
|
|
+ long unsigned int flags;
|
|
+ spinlock_t private_lock;
|
|
+ gfp_t gfp_mask;
|
|
+ struct list_head private_list;
|
|
+ void *private_data;
|
|
+ errseq_t wb_err;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+typedef int (*dev_page_fault_t)(struct vm_area_struct *, long unsigned int, const struct page *, unsigned int, pmd_t *);
|
|
+
|
|
+typedef void (*dev_page_free_t)(struct page *, void *);
|
|
+
|
|
+struct vmem_altmap {
|
|
+ const long unsigned int base_pfn;
|
|
+ const long unsigned int reserve;
|
|
+ long unsigned int free;
|
|
+ long unsigned int align;
|
|
+ long unsigned int alloc;
|
|
+};
|
|
+
|
|
+struct resource {
|
|
+ resource_size_t start;
|
|
+ resource_size_t end;
|
|
+ const char *name;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int desc;
|
|
+ struct resource *parent;
|
|
+ struct resource *sibling;
|
|
+ struct resource *child;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+enum memory_type {
|
|
+ MEMORY_DEVICE_PRIVATE = 1,
|
|
+ MEMORY_DEVICE_PUBLIC = 2,
|
|
+ MEMORY_DEVICE_FS_DAX = 3,
|
|
+};
|
|
+
|
|
+struct percpu_ref;
|
|
+
|
|
+struct dev_pagemap {
|
|
+ dev_page_fault_t page_fault;
|
|
+ dev_page_free_t page_free;
|
|
+ struct vmem_altmap altmap;
|
|
+ bool altmap_valid;
|
|
+ struct resource res;
|
|
+ struct percpu_ref *ref;
|
|
+ void (*kill)(struct percpu_ref *);
|
|
+ struct device *dev;
|
|
+ void *data;
|
|
+ enum memory_type type;
|
|
+};
|
|
+
|
|
+struct vfsmount;
|
|
+
|
|
+struct dentry;
|
|
+
|
|
+struct path {
|
|
+ struct vfsmount *mnt;
|
|
+ struct dentry *dentry;
|
|
+};
|
|
+
|
|
+enum rw_hint {
|
|
+ WRITE_LIFE_NOT_SET = 0,
|
|
+ WRITE_LIFE_NONE = 1,
|
|
+ WRITE_LIFE_SHORT = 2,
|
|
+ WRITE_LIFE_MEDIUM = 3,
|
|
+ WRITE_LIFE_LONG = 4,
|
|
+ WRITE_LIFE_EXTREME = 5,
|
|
+};
|
|
+
|
|
+enum pid_type {
|
|
+ PIDTYPE_PID = 0,
|
|
+ PIDTYPE_TGID = 1,
|
|
+ PIDTYPE_PGID = 2,
|
|
+ PIDTYPE_SID = 3,
|
|
+ PIDTYPE_MAX = 4,
|
|
+};
|
|
+
|
|
+struct fown_struct {
|
|
+ rwlock_t lock;
|
|
+ struct pid *pid;
|
|
+ enum pid_type pid_type;
|
|
+ kuid_t uid;
|
|
+ kuid_t euid;
|
|
+ int signum;
|
|
+};
|
|
+
|
|
+struct file_ra_state {
|
|
+ long unsigned int start;
|
|
+ unsigned int size;
|
|
+ unsigned int async_size;
|
|
+ unsigned int ra_pages;
|
|
+ unsigned int mmap_miss;
|
|
+ loff_t prev_pos;
|
|
+};
|
|
+
|
|
+struct file {
|
|
+ union {
|
|
+ struct llist_node fu_llist;
|
|
+ struct callback_head fu_rcuhead;
|
|
+ } f_u;
|
|
+ struct path f_path;
|
|
+ struct inode *f_inode;
|
|
+ const struct file_operations *f_op;
|
|
+ spinlock_t f_lock;
|
|
+ enum rw_hint f_write_hint;
|
|
+ atomic_long_t f_count;
|
|
+ unsigned int f_flags;
|
|
+ fmode_t f_mode;
|
|
+ struct mutex f_pos_lock;
|
|
+ loff_t f_pos;
|
|
+ struct fown_struct f_owner;
|
|
+ const struct cred *f_cred;
|
|
+ struct file_ra_state f_ra;
|
|
+ u64 f_version;
|
|
+ void *f_security;
|
|
+ void *private_data;
|
|
+ struct list_head f_ep_links;
|
|
+ struct list_head f_tfile_llink;
|
|
+ struct address_space *f_mapping;
|
|
+ errseq_t f_wb_err;
|
|
+};
|
|
+
|
|
+enum page_entry_size {
|
|
+ PE_SIZE_PTE = 0,
|
|
+ PE_SIZE_PMD = 1,
|
|
+ PE_SIZE_PUD = 2,
|
|
+};
|
|
+
|
|
+struct vm_fault;
|
|
+
|
|
+struct vm_operations_struct {
|
|
+ void (*open)(struct vm_area_struct *);
|
|
+ void (*close)(struct vm_area_struct *);
|
|
+ int (*split)(struct vm_area_struct *, long unsigned int);
|
|
+ int (*mremap)(struct vm_area_struct *);
|
|
+ vm_fault_t (*fault)(struct vm_fault *);
|
|
+ vm_fault_t (*huge_fault)(struct vm_fault *, enum page_entry_size);
|
|
+ void (*map_pages)(struct vm_fault *, long unsigned int, long unsigned int);
|
|
+ long unsigned int (*pagesize)(struct vm_area_struct *);
|
|
+ vm_fault_t (*page_mkwrite)(struct vm_fault *);
|
|
+ vm_fault_t (*pfn_mkwrite)(struct vm_fault *);
|
|
+ int (*access)(struct vm_area_struct *, long unsigned int, void *, int, int);
|
|
+ const char * (*name)(struct vm_area_struct *);
|
|
+ int (*set_policy)(struct vm_area_struct *, struct mempolicy *);
|
|
+ struct mempolicy * (*get_policy)(struct vm_area_struct *, long unsigned int);
|
|
+ struct page * (*find_special_page)(struct vm_area_struct *, long unsigned int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct core_thread {
|
|
+ struct task_struct *task;
|
|
+ struct core_thread *next;
|
|
+};
|
|
+
|
|
+struct core_state {
|
|
+ atomic_t nr_threads;
|
|
+ struct core_thread dumper;
|
|
+ struct completion startup;
|
|
+};
|
|
+
|
|
+struct vm_fault {
|
|
+ struct vm_area_struct *vma;
|
|
+ unsigned int flags;
|
|
+ gfp_t gfp_mask;
|
|
+ long unsigned int pgoff;
|
|
+ long unsigned int address;
|
|
+ pmd_t *pmd;
|
|
+ pud_t *pud;
|
|
+ pte_t orig_pte;
|
|
+ struct page *cow_page;
|
|
+ struct mem_cgroup *memcg;
|
|
+ struct page *page;
|
|
+ pte_t *pte;
|
|
+ spinlock_t *ptl;
|
|
+ pgtable_t prealloc_pte;
|
|
+};
|
|
+
|
|
+typedef void percpu_ref_func_t(struct percpu_ref *);
|
|
+
|
|
+struct percpu_ref {
|
|
+ atomic_long_t count;
|
|
+ long unsigned int percpu_count_ptr;
|
|
+ percpu_ref_func_t *release;
|
|
+ percpu_ref_func_t *confirm_switch;
|
|
+ bool force_atomic: 1;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct shrink_control {
|
|
+ gfp_t gfp_mask;
|
|
+ int nid;
|
|
+ long unsigned int nr_to_scan;
|
|
+ long unsigned int nr_scanned;
|
|
+ struct mem_cgroup *memcg;
|
|
+};
|
|
+
|
|
+struct shrinker {
|
|
+ long unsigned int (*count_objects)(struct shrinker *, struct shrink_control *);
|
|
+ long unsigned int (*scan_objects)(struct shrinker *, struct shrink_control *);
|
|
+ long int batch;
|
|
+ int seeks;
|
|
+ unsigned int flags;
|
|
+ struct list_head list;
|
|
+ int id;
|
|
+ atomic_long_t *nr_deferred;
|
|
+};
|
|
+
|
|
+struct page_ext_operations {
|
|
+ size_t offset;
|
|
+ size_t size;
|
|
+ bool (*need)();
|
|
+ void (*init)();
|
|
+};
|
|
+
|
|
+struct hlist_bl_node;
|
|
+
|
|
+struct hlist_bl_head {
|
|
+ struct hlist_bl_node *first;
|
|
+};
|
|
+
|
|
+struct hlist_bl_node {
|
|
+ struct hlist_bl_node *next;
|
|
+ struct hlist_bl_node **pprev;
|
|
+};
|
|
+
|
|
+struct lockref {
|
|
+ union {
|
|
+ __u64 lock_count;
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ int count;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct qstr {
|
|
+ union {
|
|
+ struct {
|
|
+ u32 hash;
|
|
+ u32 len;
|
|
+ };
|
|
+ u64 hash_len;
|
|
+ };
|
|
+ const unsigned char *name;
|
|
+};
|
|
+
|
|
+struct dentry_stat_t {
|
|
+ long int nr_dentry;
|
|
+ long int nr_unused;
|
|
+ long int age_limit;
|
|
+ long int want_pages;
|
|
+ long int dummy[2];
|
|
+};
|
|
+
|
|
+struct dentry_operations;
|
|
+
|
|
+struct super_block;
|
|
+
|
|
+struct dentry {
|
|
+ unsigned int d_flags;
|
|
+ seqcount_t d_seq;
|
|
+ struct hlist_bl_node d_hash;
|
|
+ struct dentry *d_parent;
|
|
+ struct qstr d_name;
|
|
+ struct inode *d_inode;
|
|
+ unsigned char d_iname[32];
|
|
+ struct lockref d_lockref;
|
|
+ const struct dentry_operations *d_op;
|
|
+ struct super_block *d_sb;
|
|
+ long unsigned int d_time;
|
|
+ void *d_fsdata;
|
|
+ union {
|
|
+ struct list_head d_lru;
|
|
+ wait_queue_head_t *d_wait;
|
|
+ };
|
|
+ struct list_head d_child;
|
|
+ struct list_head d_subdirs;
|
|
+ union {
|
|
+ struct hlist_node d_alias;
|
|
+ struct hlist_bl_node d_in_lookup_hash;
|
|
+ struct callback_head d_rcu;
|
|
+ } d_u;
|
|
+ atomic_t d_neg_dnum;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ gid_t val;
|
|
+} kgid_t;
|
|
+
|
|
+struct posix_acl;
|
|
+
|
|
+struct inode_operations;
|
|
+
|
|
+struct bdi_writeback;
|
|
+
|
|
+struct file_lock_context;
|
|
+
|
|
+struct block_device;
|
|
+
|
|
+struct cdev;
|
|
+
|
|
+struct fsnotify_mark_connector;
|
|
+
|
|
+struct inode {
|
|
+ umode_t i_mode;
|
|
+ short unsigned int i_opflags;
|
|
+ kuid_t i_uid;
|
|
+ kgid_t i_gid;
|
|
+ unsigned int i_flags;
|
|
+ struct posix_acl *i_acl;
|
|
+ struct posix_acl *i_default_acl;
|
|
+ const struct inode_operations *i_op;
|
|
+ struct super_block *i_sb;
|
|
+ struct address_space *i_mapping;
|
|
+ void *i_security;
|
|
+ long unsigned int i_ino;
|
|
+ union {
|
|
+ const unsigned int i_nlink;
|
|
+ unsigned int __i_nlink;
|
|
+ };
|
|
+ dev_t i_rdev;
|
|
+ loff_t i_size;
|
|
+ struct timespec64 i_atime;
|
|
+ struct timespec64 i_mtime;
|
|
+ struct timespec64 i_ctime;
|
|
+ spinlock_t i_lock;
|
|
+ short unsigned int i_bytes;
|
|
+ u8 i_blkbits;
|
|
+ u8 i_write_hint;
|
|
+ blkcnt_t i_blocks;
|
|
+ long unsigned int i_state;
|
|
+ struct rw_semaphore i_rwsem;
|
|
+ long unsigned int dirtied_when;
|
|
+ long unsigned int dirtied_time_when;
|
|
+ struct hlist_node i_hash;
|
|
+ struct list_head i_io_list;
|
|
+ struct bdi_writeback *i_wb;
|
|
+ int i_wb_frn_winner;
|
|
+ u16 i_wb_frn_avg_time;
|
|
+ u16 i_wb_frn_history;
|
|
+ struct list_head i_lru;
|
|
+ struct list_head i_sb_list;
|
|
+ struct list_head i_wb_list;
|
|
+ union {
|
|
+ struct hlist_head i_dentry;
|
|
+ struct callback_head i_rcu;
|
|
+ };
|
|
+ atomic64_t i_version;
|
|
+ atomic_t i_count;
|
|
+ atomic_t i_dio_count;
|
|
+ atomic_t i_writecount;
|
|
+ atomic_t i_readcount;
|
|
+ const struct file_operations *i_fop;
|
|
+ struct file_lock_context *i_flctx;
|
|
+ struct address_space i_data;
|
|
+ struct list_head i_devices;
|
|
+ union {
|
|
+ struct pipe_inode_info *i_pipe;
|
|
+ struct block_device *i_bdev;
|
|
+ struct cdev *i_cdev;
|
|
+ char *i_link;
|
|
+ unsigned int i_dir_seq;
|
|
+ };
|
|
+ __u32 i_generation;
|
|
+ __u32 i_fsnotify_mask;
|
|
+ struct fsnotify_mark_connector *i_fsnotify_marks;
|
|
+ void *i_private;
|
|
+ atomic64_t i_sequence;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct dentry_operations {
|
|
+ int (*d_revalidate)(struct dentry *, unsigned int);
|
|
+ int (*d_weak_revalidate)(struct dentry *, unsigned int);
|
|
+ int (*d_hash)(const struct dentry *, struct qstr *);
|
|
+ int (*d_compare)(const struct dentry *, unsigned int, const char *, const struct qstr *);
|
|
+ int (*d_delete)(const struct dentry *);
|
|
+ int (*d_init)(struct dentry *);
|
|
+ void (*d_release)(struct dentry *);
|
|
+ void (*d_prune)(struct dentry *);
|
|
+ void (*d_iput)(struct dentry *, struct inode *);
|
|
+ char * (*d_dname)(struct dentry *, char *, int);
|
|
+ struct vfsmount * (*d_automount)(struct path *);
|
|
+ int (*d_manage)(const struct path *, bool);
|
|
+ struct dentry * (*d_real)(struct dentry *, const struct inode *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct mtd_info;
|
|
+
|
|
+typedef long long int qsize_t;
|
|
+
|
|
+struct quota_format_type;
|
|
+
|
|
+struct mem_dqinfo {
|
|
+ struct quota_format_type *dqi_format;
|
|
+ int dqi_fmt_id;
|
|
+ struct list_head dqi_dirty_list;
|
|
+ long unsigned int dqi_flags;
|
|
+ unsigned int dqi_bgrace;
|
|
+ unsigned int dqi_igrace;
|
|
+ qsize_t dqi_max_spc_limit;
|
|
+ qsize_t dqi_max_ino_limit;
|
|
+ void *dqi_priv;
|
|
+};
|
|
+
|
|
+struct quota_format_ops;
|
|
+
|
|
+struct quota_info {
|
|
+ unsigned int flags;
|
|
+ struct rw_semaphore dqio_sem;
|
|
+ struct inode *files[3];
|
|
+ struct mem_dqinfo info[3];
|
|
+ const struct quota_format_ops *ops[3];
|
|
+};
|
|
+
|
|
+enum rcu_sync_type {
|
|
+ RCU_SYNC = 0,
|
|
+ RCU_SCHED_SYNC = 1,
|
|
+ RCU_BH_SYNC = 2,
|
|
+};
|
|
+
|
|
+struct rcu_sync {
|
|
+ int gp_state;
|
|
+ int gp_count;
|
|
+ wait_queue_head_t gp_wait;
|
|
+ int cb_state;
|
|
+ struct callback_head cb_head;
|
|
+ enum rcu_sync_type gp_type;
|
|
+};
|
|
+
|
|
+struct rcuwait {
|
|
+ struct task_struct *task;
|
|
+};
|
|
+
|
|
+struct percpu_rw_semaphore {
|
|
+ struct rcu_sync rss;
|
|
+ unsigned int *read_count;
|
|
+ struct rw_semaphore rw_sem;
|
|
+ struct rcuwait writer;
|
|
+ int readers_block;
|
|
+};
|
|
+
|
|
+struct sb_writers {
|
|
+ int frozen;
|
|
+ wait_queue_head_t wait_unfrozen;
|
|
+ struct percpu_rw_semaphore rw_sem[3];
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ __u8 b[16];
|
|
+} uuid_t;
|
|
+
|
|
+struct list_lru_node;
|
|
+
|
|
+struct list_lru {
|
|
+ struct list_lru_node *node;
|
|
+ struct list_head list;
|
|
+ int shrinker_id;
|
|
+ bool memcg_aware;
|
|
+};
|
|
+
|
|
+struct file_system_type;
|
|
+
|
|
+struct super_operations;
|
|
+
|
|
+struct dquot_operations;
|
|
+
|
|
+struct quotactl_ops;
|
|
+
|
|
+struct export_operations;
|
|
+
|
|
+struct xattr_handler;
|
|
+
|
|
+struct workqueue_struct;
|
|
+
|
|
+struct super_block {
|
|
+ struct list_head s_list;
|
|
+ dev_t s_dev;
|
|
+ unsigned char s_blocksize_bits;
|
|
+ long unsigned int s_blocksize;
|
|
+ loff_t s_maxbytes;
|
|
+ struct file_system_type *s_type;
|
|
+ const struct super_operations *s_op;
|
|
+ const struct dquot_operations *dq_op;
|
|
+ const struct quotactl_ops *s_qcop;
|
|
+ const struct export_operations *s_export_op;
|
|
+ long unsigned int s_flags;
|
|
+ long unsigned int s_iflags;
|
|
+ long unsigned int s_magic;
|
|
+ struct dentry *s_root;
|
|
+ struct rw_semaphore s_umount;
|
|
+ int s_count;
|
|
+ atomic_t s_active;
|
|
+ void *s_security;
|
|
+ const struct xattr_handler **s_xattr;
|
|
+ struct hlist_bl_head s_roots;
|
|
+ struct list_head s_mounts;
|
|
+ struct block_device *s_bdev;
|
|
+ struct backing_dev_info *s_bdi;
|
|
+ struct mtd_info *s_mtd;
|
|
+ struct hlist_node s_instances;
|
|
+ unsigned int s_quota_types;
|
|
+ struct quota_info s_dquot;
|
|
+ struct sb_writers s_writers;
|
|
+ char s_id[32];
|
|
+ uuid_t s_uuid;
|
|
+ void *s_fs_info;
|
|
+ unsigned int s_max_links;
|
|
+ fmode_t s_mode;
|
|
+ u32 s_time_gran;
|
|
+ struct mutex s_vfs_rename_mutex;
|
|
+ char *s_subtype;
|
|
+ const struct dentry_operations *s_d_op;
|
|
+ int cleancache_poolid;
|
|
+ struct shrinker s_shrink;
|
|
+ atomic_long_t s_remove_count;
|
|
+ atomic_long_t s_fsnotify_inode_refs;
|
|
+ int s_readonly_remount;
|
|
+ struct workqueue_struct *s_dio_done_wq;
|
|
+ struct hlist_head s_pins;
|
|
+ struct user_namespace *s_user_ns;
|
|
+ struct list_lru s_dentry_lru;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct list_lru s_inode_lru;
|
|
+ struct callback_head rcu;
|
|
+ struct work_struct destroy_work;
|
|
+ struct mutex s_sync_lock;
|
|
+ int s_stack_depth;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ spinlock_t s_inode_list_lock;
|
|
+ struct list_head s_inodes;
|
|
+ spinlock_t s_inode_wblist_lock;
|
|
+ struct list_head s_inodes_wb;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct kstat {
|
|
+ u32 result_mask;
|
|
+ umode_t mode;
|
|
+ unsigned int nlink;
|
|
+ uint32_t blksize;
|
|
+ u64 attributes;
|
|
+ u64 attributes_mask;
|
|
+ u64 ino;
|
|
+ dev_t dev;
|
|
+ dev_t rdev;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ loff_t size;
|
|
+ struct timespec64 atime;
|
|
+ struct timespec64 mtime;
|
|
+ struct timespec64 ctime;
|
|
+ struct timespec64 btime;
|
|
+ u64 blocks;
|
|
+};
|
|
+
|
|
+struct list_lru_one {
|
|
+ struct list_head list;
|
|
+ long int nr_items;
|
|
+};
|
|
+
|
|
+struct list_lru_memcg {
|
|
+ struct callback_head rcu;
|
|
+ struct list_lru_one *lru[0];
|
|
+};
|
|
+
|
|
+struct list_lru_node {
|
|
+ spinlock_t lock;
|
|
+ struct list_lru_one lru;
|
|
+ struct list_lru_memcg *memcg_lrus;
|
|
+ long int nr_items;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct radix_tree_node {
|
|
+ unsigned char shift;
|
|
+ unsigned char offset;
|
|
+ unsigned char count;
|
|
+ unsigned char exceptional;
|
|
+ struct radix_tree_node *parent;
|
|
+ struct radix_tree_root *root;
|
|
+ union {
|
|
+ struct list_head private_list;
|
|
+ struct callback_head callback_head;
|
|
+ };
|
|
+ void *slots[64];
|
|
+ long unsigned int tags[3];
|
|
+};
|
|
+
|
|
+struct pid_namespace;
|
|
+
|
|
+struct upid {
|
|
+ int nr;
|
|
+ struct pid_namespace *ns;
|
|
+};
|
|
+
|
|
+struct pid {
|
|
+ atomic_t count;
|
|
+ unsigned int level;
|
|
+ struct hlist_head tasks[4];
|
|
+ struct callback_head rcu;
|
|
+ struct upid numbers[1];
|
|
+};
|
|
+
|
|
+struct kernel_cap_struct {
|
|
+ __u32 cap[2];
|
|
+};
|
|
+
|
|
+typedef struct kernel_cap_struct kernel_cap_t;
|
|
+
|
|
+struct fiemap_extent {
|
|
+ __u64 fe_logical;
|
|
+ __u64 fe_physical;
|
|
+ __u64 fe_length;
|
|
+ __u64 fe_reserved64[2];
|
|
+ __u32 fe_flags;
|
|
+ __u32 fe_reserved[3];
|
|
+};
|
|
+
|
|
+enum migrate_mode {
|
|
+ MIGRATE_ASYNC = 0,
|
|
+ MIGRATE_SYNC_LIGHT = 1,
|
|
+ MIGRATE_SYNC = 2,
|
|
+ MIGRATE_SYNC_NO_COPY = 3,
|
|
+};
|
|
+
|
|
+struct delayed_call {
|
|
+ void (*fn)(void *);
|
|
+ void *arg;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ __u8 b[16];
|
|
+} guid_t;
|
|
+
|
|
+struct timerqueue_head {
|
|
+ struct rb_root_cached rb_root;
|
|
+};
|
|
+
|
|
+struct hrtimer_cpu_base;
|
|
+
|
|
+struct hrtimer_clock_base {
|
|
+ struct hrtimer_cpu_base *cpu_base;
|
|
+ unsigned int index;
|
|
+ clockid_t clockid;
|
|
+ seqcount_t seq;
|
|
+ struct hrtimer *running;
|
|
+ struct timerqueue_head active;
|
|
+ ktime_t (*get_time)();
|
|
+ ktime_t offset;
|
|
+};
|
|
+
|
|
+struct hrtimer_cpu_base {
|
|
+ raw_spinlock_t lock;
|
|
+ unsigned int cpu;
|
|
+ unsigned int active_bases;
|
|
+ unsigned int clock_was_set_seq;
|
|
+ unsigned int hres_active: 1;
|
|
+ unsigned int in_hrtirq: 1;
|
|
+ unsigned int hang_detected: 1;
|
|
+ unsigned int softirq_activated: 1;
|
|
+ unsigned int nr_events;
|
|
+ short unsigned int nr_retries;
|
|
+ short unsigned int nr_hangs;
|
|
+ unsigned int max_hang_time;
|
|
+ ktime_t expires_next;
|
|
+ struct hrtimer *next_timer;
|
|
+ ktime_t softirq_expires_next;
|
|
+ struct hrtimer *softirq_next_timer;
|
|
+ struct hrtimer_clock_base clock_base[8];
|
|
+};
|
|
+
|
|
+struct tick_device;
|
|
+
|
|
+union sigval {
|
|
+ int sival_int;
|
|
+ void *sival_ptr;
|
|
+};
|
|
+
|
|
+typedef union sigval sigval_t;
|
|
+
|
|
+struct siginfo {
|
|
+ int si_signo;
|
|
+ int si_errno;
|
|
+ int si_code;
|
|
+ union {
|
|
+ int _pad[28];
|
|
+ struct {
|
|
+ __kernel_pid_t _pid;
|
|
+ __kernel_uid32_t _uid;
|
|
+ } _kill;
|
|
+ struct {
|
|
+ __kernel_timer_t _tid;
|
|
+ int _overrun;
|
|
+ sigval_t _sigval;
|
|
+ int _sys_private;
|
|
+ } _timer;
|
|
+ struct {
|
|
+ __kernel_pid_t _pid;
|
|
+ __kernel_uid32_t _uid;
|
|
+ sigval_t _sigval;
|
|
+ } _rt;
|
|
+ struct {
|
|
+ __kernel_pid_t _pid;
|
|
+ __kernel_uid32_t _uid;
|
|
+ int _status;
|
|
+ __kernel_clock_t _utime;
|
|
+ __kernel_clock_t _stime;
|
|
+ } _sigchld;
|
|
+ struct {
|
|
+ void *_addr;
|
|
+ union {
|
|
+ short int _addr_lsb;
|
|
+ struct {
|
|
+ char _dummy_bnd[8];
|
|
+ void *_lower;
|
|
+ void *_upper;
|
|
+ } _addr_bnd;
|
|
+ struct {
|
|
+ char _dummy_pkey[8];
|
|
+ __u32 _pkey;
|
|
+ } _addr_pkey;
|
|
+ };
|
|
+ } _sigfault;
|
|
+ struct {
|
|
+ long int _band;
|
|
+ int _fd;
|
|
+ } _sigpoll;
|
|
+ struct {
|
|
+ void *_call_addr;
|
|
+ int _syscall;
|
|
+ unsigned int _arch;
|
|
+ } _sigsys;
|
|
+ } _sifields;
|
|
+};
|
|
+
|
|
+struct rseq {
|
|
+ __u32 cpu_id_start;
|
|
+ __u32 cpu_id;
|
|
+ union {
|
|
+ __u64 ptr64;
|
|
+ __u64 ptr;
|
|
+ } rseq_cs;
|
|
+ __u32 flags;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct rq;
|
|
+
|
|
+struct rq_flags;
|
|
+
|
|
+struct sched_class {
|
|
+ const struct sched_class *next;
|
|
+ void (*enqueue_task)(struct rq *, struct task_struct *, int);
|
|
+ void (*dequeue_task)(struct rq *, struct task_struct *, int);
|
|
+ void (*yield_task)(struct rq *);
|
|
+ bool (*yield_to_task)(struct rq *, struct task_struct *, bool);
|
|
+ void (*check_preempt_curr)(struct rq *, struct task_struct *, int);
|
|
+ struct task_struct * (*pick_next_task)(struct rq *, struct task_struct *, struct rq_flags *);
|
|
+ void (*put_prev_task)(struct rq *, struct task_struct *);
|
|
+ int (*select_task_rq)(struct task_struct *, int, int, int);
|
|
+ void (*migrate_task_rq)(struct task_struct *, int);
|
|
+ void (*task_woken)(struct rq *, struct task_struct *);
|
|
+ void (*set_cpus_allowed)(struct task_struct *, const struct cpumask *);
|
|
+ void (*rq_online)(struct rq *);
|
|
+ void (*rq_offline)(struct rq *);
|
|
+ void (*set_curr_task)(struct rq *);
|
|
+ void (*task_tick)(struct rq *, struct task_struct *, int);
|
|
+ void (*task_fork)(struct task_struct *);
|
|
+ void (*task_dead)(struct task_struct *);
|
|
+ void (*switched_from)(struct rq *, struct task_struct *);
|
|
+ void (*switched_to)(struct rq *, struct task_struct *);
|
|
+ void (*prio_changed)(struct rq *, struct task_struct *, int);
|
|
+ unsigned int (*get_rr_interval)(struct rq *, struct task_struct *);
|
|
+ void (*update_curr)(struct rq *);
|
|
+ void (*task_change_group)(struct task_struct *, int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct key;
|
|
+
|
|
+struct user_struct;
|
|
+
|
|
+struct group_info;
|
|
+
|
|
+struct cred {
|
|
+ atomic_t usage;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ kuid_t suid;
|
|
+ kgid_t sgid;
|
|
+ kuid_t euid;
|
|
+ kgid_t egid;
|
|
+ kuid_t fsuid;
|
|
+ kgid_t fsgid;
|
|
+ unsigned int securebits;
|
|
+ kernel_cap_t cap_inheritable;
|
|
+ kernel_cap_t cap_permitted;
|
|
+ kernel_cap_t cap_effective;
|
|
+ kernel_cap_t cap_bset;
|
|
+ kernel_cap_t cap_ambient;
|
|
+ unsigned char jit_keyring;
|
|
+ struct key *session_keyring;
|
|
+ struct key *process_keyring;
|
|
+ struct key *thread_keyring;
|
|
+ struct key *request_key_auth;
|
|
+ void *security;
|
|
+ struct user_struct *user;
|
|
+ struct user_namespace *user_ns;
|
|
+ struct group_info *group_info;
|
|
+ union {
|
|
+ int non_rcu;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct io_cq;
|
|
+
|
|
+struct io_context {
|
|
+ atomic_long_t refcount;
|
|
+ atomic_t active_ref;
|
|
+ atomic_t nr_tasks;
|
|
+ spinlock_t lock;
|
|
+ short unsigned int ioprio;
|
|
+ int nr_batch_requests;
|
|
+ long unsigned int last_waited;
|
|
+ struct radix_tree_root icq_tree;
|
|
+ struct io_cq *icq_hint;
|
|
+ struct hlist_head icq_list;
|
|
+ struct work_struct release_work;
|
|
+};
|
|
+
|
|
+struct io_cq {
|
|
+ struct request_queue *q;
|
|
+ struct io_context *ioc;
|
|
+ union {
|
|
+ struct list_head q_node;
|
|
+ struct kmem_cache *__rcu_icq_cache;
|
|
+ };
|
|
+ union {
|
|
+ struct hlist_node ioc_node;
|
|
+ struct callback_head __rcu_head;
|
|
+ };
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct files_stat_struct {
|
|
+ long unsigned int nr_files;
|
|
+ long unsigned int nr_free_files;
|
|
+ long unsigned int max_files;
|
|
+};
|
|
+
|
|
+struct inodes_stat_t {
|
|
+ long int nr_inodes;
|
|
+ long int nr_unused;
|
|
+ long int dummy[5];
|
|
+};
|
|
+
|
|
+struct kiocb {
|
|
+ struct file *ki_filp;
|
|
+ loff_t ki_pos;
|
|
+ void (*ki_complete)(struct kiocb *, long int, long int);
|
|
+ void *private;
|
|
+ int ki_flags;
|
|
+ u16 ki_hint;
|
|
+ u16 ki_ioprio;
|
|
+};
|
|
+
|
|
+struct iattr {
|
|
+ unsigned int ia_valid;
|
|
+ umode_t ia_mode;
|
|
+ kuid_t ia_uid;
|
|
+ kgid_t ia_gid;
|
|
+ loff_t ia_size;
|
|
+ struct timespec64 ia_atime;
|
|
+ struct timespec64 ia_mtime;
|
|
+ struct timespec64 ia_ctime;
|
|
+ struct file *ia_file;
|
|
+};
|
|
+
|
|
+struct percpu_counter {
|
|
+ raw_spinlock_t lock;
|
|
+ s64 count;
|
|
+ struct list_head list;
|
|
+ s32 *counters;
|
|
+};
|
|
+
|
|
+typedef __kernel_uid32_t projid_t;
|
|
+
|
|
+typedef struct {
|
|
+ projid_t val;
|
|
+} kprojid_t;
|
|
+
|
|
+enum quota_type {
|
|
+ USRQUOTA = 0,
|
|
+ GRPQUOTA = 1,
|
|
+ PRJQUOTA = 2,
|
|
+};
|
|
+
|
|
+struct kqid {
|
|
+ union {
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ kprojid_t projid;
|
|
+ };
|
|
+ enum quota_type type;
|
|
+};
|
|
+
|
|
+struct mem_dqblk {
|
|
+ qsize_t dqb_bhardlimit;
|
|
+ qsize_t dqb_bsoftlimit;
|
|
+ qsize_t dqb_curspace;
|
|
+ qsize_t dqb_rsvspace;
|
|
+ qsize_t dqb_ihardlimit;
|
|
+ qsize_t dqb_isoftlimit;
|
|
+ qsize_t dqb_curinodes;
|
|
+ time64_t dqb_btime;
|
|
+ time64_t dqb_itime;
|
|
+};
|
|
+
|
|
+struct dquot {
|
|
+ struct hlist_node dq_hash;
|
|
+ struct list_head dq_inuse;
|
|
+ struct list_head dq_free;
|
|
+ struct list_head dq_dirty;
|
|
+ struct mutex dq_lock;
|
|
+ spinlock_t dq_dqb_lock;
|
|
+ atomic_t dq_count;
|
|
+ struct super_block *dq_sb;
|
|
+ struct kqid dq_id;
|
|
+ loff_t dq_off;
|
|
+ long unsigned int dq_flags;
|
|
+ struct mem_dqblk dq_dqb;
|
|
+};
|
|
+
|
|
+struct quota_format_type {
|
|
+ int qf_fmt_id;
|
|
+ const struct quota_format_ops *qf_ops;
|
|
+ struct module *qf_owner;
|
|
+ struct quota_format_type *qf_next;
|
|
+};
|
|
+
|
|
+struct dqstats {
|
|
+ long unsigned int stat[8];
|
|
+ struct percpu_counter counter[8];
|
|
+};
|
|
+
|
|
+struct quota_format_ops {
|
|
+ int (*check_quota_file)(struct super_block *, int);
|
|
+ int (*read_file_info)(struct super_block *, int);
|
|
+ int (*write_file_info)(struct super_block *, int);
|
|
+ int (*free_file_info)(struct super_block *, int);
|
|
+ int (*read_dqblk)(struct dquot *);
|
|
+ int (*commit_dqblk)(struct dquot *);
|
|
+ int (*release_dqblk)(struct dquot *);
|
|
+ int (*get_next_id)(struct super_block *, struct kqid *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct dquot_operations {
|
|
+ int (*write_dquot)(struct dquot *);
|
|
+ struct dquot * (*alloc_dquot)(struct super_block *, int);
|
|
+ void (*destroy_dquot)(struct dquot *);
|
|
+ int (*acquire_dquot)(struct dquot *);
|
|
+ int (*release_dquot)(struct dquot *);
|
|
+ int (*mark_dirty)(struct dquot *);
|
|
+ int (*write_info)(struct super_block *, int);
|
|
+ qsize_t * (*get_reserved_space)(struct inode *);
|
|
+ int (*get_projid)(struct inode *, kprojid_t *);
|
|
+ int (*get_inode_usage)(struct inode *, qsize_t *);
|
|
+ int (*get_next_id)(struct super_block *, struct kqid *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct qc_dqblk {
|
|
+ int d_fieldmask;
|
|
+ u64 d_spc_hardlimit;
|
|
+ u64 d_spc_softlimit;
|
|
+ u64 d_ino_hardlimit;
|
|
+ u64 d_ino_softlimit;
|
|
+ u64 d_space;
|
|
+ u64 d_ino_count;
|
|
+ s64 d_ino_timer;
|
|
+ s64 d_spc_timer;
|
|
+ int d_ino_warns;
|
|
+ int d_spc_warns;
|
|
+ u64 d_rt_spc_hardlimit;
|
|
+ u64 d_rt_spc_softlimit;
|
|
+ u64 d_rt_space;
|
|
+ s64 d_rt_spc_timer;
|
|
+ int d_rt_spc_warns;
|
|
+};
|
|
+
|
|
+struct qc_type_state {
|
|
+ unsigned int flags;
|
|
+ unsigned int spc_timelimit;
|
|
+ unsigned int ino_timelimit;
|
|
+ unsigned int rt_spc_timelimit;
|
|
+ unsigned int spc_warnlimit;
|
|
+ unsigned int ino_warnlimit;
|
|
+ unsigned int rt_spc_warnlimit;
|
|
+ long long unsigned int ino;
|
|
+ blkcnt_t blocks;
|
|
+ blkcnt_t nextents;
|
|
+};
|
|
+
|
|
+struct qc_state {
|
|
+ unsigned int s_incoredqs;
|
|
+ struct qc_type_state s_state[3];
|
|
+};
|
|
+
|
|
+struct qc_info {
|
|
+ int i_fieldmask;
|
|
+ unsigned int i_flags;
|
|
+ unsigned int i_spc_timelimit;
|
|
+ unsigned int i_ino_timelimit;
|
|
+ unsigned int i_rt_spc_timelimit;
|
|
+ unsigned int i_spc_warnlimit;
|
|
+ unsigned int i_ino_warnlimit;
|
|
+ unsigned int i_rt_spc_warnlimit;
|
|
+};
|
|
+
|
|
+struct quotactl_ops {
|
|
+ int (*quota_on)(struct super_block *, int, int, const struct path *);
|
|
+ int (*quota_off)(struct super_block *, int);
|
|
+ int (*quota_enable)(struct super_block *, unsigned int);
|
|
+ int (*quota_disable)(struct super_block *, unsigned int);
|
|
+ int (*quota_sync)(struct super_block *, int);
|
|
+ int (*set_info)(struct super_block *, int, struct qc_info *);
|
|
+ int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
|
|
+ int (*get_nextdqblk)(struct super_block *, struct kqid *, struct qc_dqblk *);
|
|
+ int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
|
|
+ int (*get_state)(struct super_block *, struct qc_state *);
|
|
+ int (*rm_xquota)(struct super_block *, unsigned int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct writeback_control;
|
|
+
|
|
+struct swap_info_struct;
|
|
+
|
|
+struct address_space_operations {
|
|
+ int (*writepage)(struct page *, struct writeback_control *);
|
|
+ int (*readpage)(struct file *, struct page *);
|
|
+ int (*writepages)(struct address_space *, struct writeback_control *);
|
|
+ int (*set_page_dirty)(struct page *);
|
|
+ int (*readpages)(struct file *, struct address_space *, struct list_head *, unsigned int);
|
|
+ int (*write_begin)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page **, void **);
|
|
+ int (*write_end)(struct file *, struct address_space *, loff_t, unsigned int, unsigned int, struct page *, void *);
|
|
+ sector_t (*bmap)(struct address_space *, sector_t);
|
|
+ void (*invalidatepage)(struct page *, unsigned int, unsigned int);
|
|
+ int (*releasepage)(struct page *, gfp_t);
|
|
+ void (*freepage)(struct page *);
|
|
+ ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *);
|
|
+ int (*migratepage)(struct address_space *, struct page *, struct page *, enum migrate_mode);
|
|
+ bool (*isolate_page)(struct page *, isolate_mode_t);
|
|
+ void (*putback_page)(struct page *);
|
|
+ int (*launder_page)(struct page *);
|
|
+ int (*is_partially_uptodate)(struct page *, long unsigned int, long unsigned int);
|
|
+ void (*is_dirty_writeback)(struct page *, bool *, bool *);
|
|
+ int (*error_remove_page)(struct address_space *, struct page *);
|
|
+ int (*swap_activate)(struct swap_info_struct *, struct file *, sector_t *);
|
|
+ void (*swap_deactivate)(struct file *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct hd_struct;
|
|
+
|
|
+struct gendisk;
|
|
+
|
|
+struct block_device {
|
|
+ dev_t bd_dev;
|
|
+ int bd_openers;
|
|
+ int bd_write_openers;
|
|
+ struct inode *bd_inode;
|
|
+ struct super_block *bd_super;
|
|
+ struct mutex bd_mutex;
|
|
+ void *bd_claiming;
|
|
+ void *bd_holder;
|
|
+ int bd_holders;
|
|
+ bool bd_write_holder;
|
|
+ struct list_head bd_holder_disks;
|
|
+ struct block_device *bd_contains;
|
|
+ unsigned int bd_block_size;
|
|
+ u8 bd_partno;
|
|
+ struct hd_struct *bd_part;
|
|
+ unsigned int bd_part_count;
|
|
+ int bd_invalidated;
|
|
+ struct gendisk *bd_disk;
|
|
+ struct request_queue *bd_queue;
|
|
+ struct backing_dev_info *bd_bdi;
|
|
+ struct list_head bd_list;
|
|
+ long unsigned int bd_private;
|
|
+ int bd_fsfreeze_count;
|
|
+ struct mutex bd_fsfreeze_mutex;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct fiemap_extent_info;
|
|
+
|
|
+struct inode_operations {
|
|
+ struct dentry * (*lookup)(struct inode *, struct dentry *, unsigned int);
|
|
+ const char * (*get_link)(struct dentry *, struct inode *, struct delayed_call *);
|
|
+ int (*permission)(struct inode *, int);
|
|
+ struct posix_acl * (*get_acl)(struct inode *, int);
|
|
+ int (*readlink)(struct dentry *, char *, int);
|
|
+ int (*create)(struct inode *, struct dentry *, umode_t, bool);
|
|
+ int (*link)(struct dentry *, struct inode *, struct dentry *);
|
|
+ int (*unlink)(struct inode *, struct dentry *);
|
|
+ int (*symlink)(struct inode *, struct dentry *, const char *);
|
|
+ int (*mkdir)(struct inode *, struct dentry *, umode_t);
|
|
+ int (*rmdir)(struct inode *, struct dentry *);
|
|
+ int (*mknod)(struct inode *, struct dentry *, umode_t, dev_t);
|
|
+ int (*rename)(struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int);
|
|
+ int (*setattr)(struct dentry *, struct iattr *);
|
|
+ int (*getattr)(const struct path *, struct kstat *, u32, unsigned int);
|
|
+ ssize_t (*listxattr)(struct dentry *, char *, size_t);
|
|
+ int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64, u64);
|
|
+ int (*update_time)(struct inode *, struct timespec64 *, int);
|
|
+ int (*atomic_open)(struct inode *, struct dentry *, struct file *, unsigned int, umode_t);
|
|
+ int (*tmpfile)(struct inode *, struct dentry *, umode_t);
|
|
+ int (*set_acl)(struct inode *, struct posix_acl *, int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct file_lock_context {
|
|
+ spinlock_t flc_lock;
|
|
+ struct list_head flc_flock;
|
|
+ struct list_head flc_posix;
|
|
+ struct list_head flc_lease;
|
|
+};
|
|
+
|
|
+struct file_lock_operations {
|
|
+ void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
|
|
+ void (*fl_release_private)(struct file_lock *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct nlm_lockowner;
|
|
+
|
|
+struct nfs_lock_info {
|
|
+ u32 state;
|
|
+ struct nlm_lockowner *owner;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct nfs4_lock_state;
|
|
+
|
|
+struct nfs4_lock_info {
|
|
+ struct nfs4_lock_state *owner;
|
|
+};
|
|
+
|
|
+struct fasync_struct;
|
|
+
|
|
+struct lock_manager_operations;
|
|
+
|
|
+struct file_lock {
|
|
+ struct file_lock *fl_next;
|
|
+ struct list_head fl_list;
|
|
+ struct hlist_node fl_link;
|
|
+ struct list_head fl_block;
|
|
+ fl_owner_t fl_owner;
|
|
+ unsigned int fl_flags;
|
|
+ unsigned char fl_type;
|
|
+ unsigned int fl_pid;
|
|
+ int fl_link_cpu;
|
|
+ wait_queue_head_t fl_wait;
|
|
+ struct file *fl_file;
|
|
+ loff_t fl_start;
|
|
+ loff_t fl_end;
|
|
+ struct fasync_struct *fl_fasync;
|
|
+ long unsigned int fl_break_time;
|
|
+ long unsigned int fl_downgrade_time;
|
|
+ const struct file_lock_operations *fl_ops;
|
|
+ const struct lock_manager_operations *fl_lmops;
|
|
+ union {
|
|
+ struct nfs_lock_info nfs_fl;
|
|
+ struct nfs4_lock_info nfs4_fl;
|
|
+ struct {
|
|
+ struct list_head link;
|
|
+ int state;
|
|
+ } afs;
|
|
+ } fl_u;
|
|
+};
|
|
+
|
|
+struct lock_manager_operations {
|
|
+ int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
|
|
+ long unsigned int (*lm_owner_key)(struct file_lock *);
|
|
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
|
|
+ void (*lm_put_owner)(fl_owner_t);
|
|
+ void (*lm_notify)(struct file_lock *);
|
|
+ int (*lm_grant)(struct file_lock *, int);
|
|
+ bool (*lm_break)(struct file_lock *);
|
|
+ int (*lm_change)(struct file_lock *, int, struct list_head *);
|
|
+ void (*lm_setup)(struct file_lock *, void **);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct fasync_struct {
|
|
+ rwlock_t fa_lock;
|
|
+ int magic;
|
|
+ int fa_fd;
|
|
+ struct fasync_struct *fa_next;
|
|
+ struct file *fa_file;
|
|
+ struct callback_head fa_rcu;
|
|
+};
|
|
+
|
|
+struct file_system_type {
|
|
+ const char *name;
|
|
+ int fs_flags;
|
|
+ struct dentry * (*mount)(struct file_system_type *, int, const char *, void *);
|
|
+ void (*kill_sb)(struct super_block *);
|
|
+ struct module *owner;
|
|
+ struct file_system_type *next;
|
|
+ struct hlist_head fs_supers;
|
|
+ struct lock_class_key s_lock_key;
|
|
+ struct lock_class_key s_umount_key;
|
|
+ struct lock_class_key s_vfs_rename_key;
|
|
+ struct lock_class_key s_writers_key[3];
|
|
+ struct lock_class_key i_lock_key;
|
|
+ struct lock_class_key i_mutex_key;
|
|
+ struct lock_class_key i_mutex_dir_key;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct kstatfs;
|
|
+
|
|
+struct super_operations {
|
|
+ struct inode * (*alloc_inode)(struct super_block *);
|
|
+ void (*destroy_inode)(struct inode *);
|
|
+ void (*dirty_inode)(struct inode *, int);
|
|
+ int (*write_inode)(struct inode *, struct writeback_control *);
|
|
+ int (*drop_inode)(struct inode *);
|
|
+ void (*evict_inode)(struct inode *);
|
|
+ void (*put_super)(struct super_block *);
|
|
+ int (*sync_fs)(struct super_block *, int);
|
|
+ int (*freeze_super)(struct super_block *);
|
|
+ int (*freeze_fs)(struct super_block *);
|
|
+ int (*thaw_super)(struct super_block *);
|
|
+ int (*unfreeze_fs)(struct super_block *);
|
|
+ int (*statfs)(struct dentry *, struct kstatfs *);
|
|
+ int (*remount_fs)(struct super_block *, int *, char *);
|
|
+ void (*umount_begin)(struct super_block *);
|
|
+ int (*show_options)(struct seq_file *, struct dentry *);
|
|
+ int (*show_devname)(struct seq_file *, struct dentry *);
|
|
+ int (*show_path)(struct seq_file *, struct dentry *);
|
|
+ int (*show_stats)(struct seq_file *, struct dentry *);
|
|
+ ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
|
|
+ ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
|
|
+ struct dquot ** (*get_dquots)(struct inode *);
|
|
+ int (*bdev_try_to_free_page)(struct super_block *, struct page *, gfp_t);
|
|
+ long int (*nr_cached_objects)(struct super_block *, struct shrink_control *);
|
|
+ long int (*free_cached_objects)(struct super_block *, struct shrink_control *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct iomap;
|
|
+
|
|
+struct inode___2;
|
|
+
|
|
+struct dentry___2;
|
|
+
|
|
+struct super_block___2;
|
|
+
|
|
+struct fid;
|
|
+
|
|
+struct iattr___2;
|
|
+
|
|
+struct export_operations {
|
|
+ int (*encode_fh)(struct inode___2 *, __u32 *, int *, struct inode___2 *);
|
|
+ struct dentry___2 * (*fh_to_dentry)(struct super_block___2 *, struct fid *, int, int);
|
|
+ struct dentry___2 * (*fh_to_parent)(struct super_block___2 *, struct fid *, int, int);
|
|
+ int (*get_name)(struct dentry___2 *, char *, struct dentry___2 *);
|
|
+ struct dentry___2 * (*get_parent)(struct dentry___2 *);
|
|
+ int (*commit_metadata)(struct inode___2 *);
|
|
+ int (*get_uuid)(struct super_block___2 *, u8 *, u32 *, u64 *);
|
|
+ int (*map_blocks)(struct inode___2 *, loff_t, u64, struct iomap *, bool, u32 *);
|
|
+ int (*commit_blocks)(struct inode___2 *, struct iomap *, int, struct iattr___2 *);
|
|
+};
|
|
+
|
|
+struct xattr_handler {
|
|
+ const char *name;
|
|
+ const char *prefix;
|
|
+ int flags;
|
|
+ bool (*list)(struct dentry *);
|
|
+ int (*get)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, void *, size_t);
|
|
+ int (*set)(const struct xattr_handler *, struct dentry *, struct inode *, const char *, const void *, size_t, int);
|
|
+};
|
|
+
|
|
+struct fiemap_extent_info {
|
|
+ unsigned int fi_flags;
|
|
+ unsigned int fi_extents_mapped;
|
|
+ unsigned int fi_extents_max;
|
|
+ struct fiemap_extent *fi_extents_start;
|
|
+};
|
|
+
|
|
+typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64, unsigned int);
|
|
+
|
|
+struct dir_context {
|
|
+ filldir_t actor;
|
|
+ loff_t pos;
|
|
+};
|
|
+
|
|
+struct attribute {
|
|
+ const char *name;
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+struct kobj_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct kobject *, struct kobj_attribute *, char *);
|
|
+ ssize_t (*store)(struct kobject *, struct kobj_attribute *, const char *, size_t);
|
|
+};
|
|
+
|
|
+typedef void compound_page_dtor(struct page *);
|
|
+
|
|
+enum vm_event_item {
|
|
+ PGPGIN = 0,
|
|
+ PGPGOUT = 1,
|
|
+ PSWPIN = 2,
|
|
+ PSWPOUT = 3,
|
|
+ PGALLOC_DMA = 4,
|
|
+ PGALLOC_DMA32 = 5,
|
|
+ PGALLOC_NORMAL = 6,
|
|
+ PGALLOC_MOVABLE = 7,
|
|
+ ALLOCSTALL_DMA = 8,
|
|
+ ALLOCSTALL_DMA32 = 9,
|
|
+ ALLOCSTALL_NORMAL = 10,
|
|
+ ALLOCSTALL_MOVABLE = 11,
|
|
+ PGSCAN_SKIP_DMA = 12,
|
|
+ PGSCAN_SKIP_DMA32 = 13,
|
|
+ PGSCAN_SKIP_NORMAL = 14,
|
|
+ PGSCAN_SKIP_MOVABLE = 15,
|
|
+ PGFREE = 16,
|
|
+ PGACTIVATE = 17,
|
|
+ PGDEACTIVATE = 18,
|
|
+ PGLAZYFREE = 19,
|
|
+ PGFAULT = 20,
|
|
+ PGMAJFAULT = 21,
|
|
+ PGLAZYFREED = 22,
|
|
+ PGREFILL = 23,
|
|
+ PGSTEAL_KSWAPD = 24,
|
|
+ PGSTEAL_DIRECT = 25,
|
|
+ PGSCAN_KSWAPD = 26,
|
|
+ PGSCAN_DIRECT = 27,
|
|
+ PGSCAN_DIRECT_THROTTLE = 28,
|
|
+ PGSCAN_ZONE_RECLAIM_FAILED = 29,
|
|
+ PGINODESTEAL = 30,
|
|
+ SLABS_SCANNED = 31,
|
|
+ KSWAPD_INODESTEAL = 32,
|
|
+ KSWAPD_LOW_WMARK_HIT_QUICKLY = 33,
|
|
+ KSWAPD_HIGH_WMARK_HIT_QUICKLY = 34,
|
|
+ PAGEOUTRUN = 35,
|
|
+ PGROTATED = 36,
|
|
+ DROP_PAGECACHE = 37,
|
|
+ DROP_SLAB = 38,
|
|
+ OOM_KILL = 39,
|
|
+ NUMA_PTE_UPDATES = 40,
|
|
+ NUMA_HUGE_PTE_UPDATES = 41,
|
|
+ NUMA_HINT_FAULTS = 42,
|
|
+ NUMA_HINT_FAULTS_LOCAL = 43,
|
|
+ NUMA_PAGE_MIGRATE = 44,
|
|
+ PGMIGRATE_SUCCESS = 45,
|
|
+ PGMIGRATE_FAIL = 46,
|
|
+ COMPACTMIGRATE_SCANNED = 47,
|
|
+ COMPACTFREE_SCANNED = 48,
|
|
+ COMPACTISOLATED = 49,
|
|
+ COMPACTSTALL = 50,
|
|
+ COMPACTFAIL = 51,
|
|
+ COMPACTSUCCESS = 52,
|
|
+ KCOMPACTD_WAKE = 53,
|
|
+ KCOMPACTD_MIGRATE_SCANNED = 54,
|
|
+ KCOMPACTD_FREE_SCANNED = 55,
|
|
+ HTLB_BUDDY_PGALLOC = 56,
|
|
+ HTLB_BUDDY_PGALLOC_FAIL = 57,
|
|
+ UNEVICTABLE_PGCULLED = 58,
|
|
+ UNEVICTABLE_PGSCANNED = 59,
|
|
+ UNEVICTABLE_PGRESCUED = 60,
|
|
+ UNEVICTABLE_PGMLOCKED = 61,
|
|
+ UNEVICTABLE_PGMUNLOCKED = 62,
|
|
+ UNEVICTABLE_PGCLEARED = 63,
|
|
+ UNEVICTABLE_PGSTRANDED = 64,
|
|
+ THP_FAULT_ALLOC = 65,
|
|
+ THP_FAULT_FALLBACK = 66,
|
|
+ THP_COLLAPSE_ALLOC = 67,
|
|
+ THP_COLLAPSE_ALLOC_FAILED = 68,
|
|
+ THP_FILE_ALLOC = 69,
|
|
+ THP_FILE_MAPPED = 70,
|
|
+ THP_SPLIT_PAGE = 71,
|
|
+ THP_SPLIT_PAGE_FAILED = 72,
|
|
+ THP_DEFERRED_SPLIT_PAGE = 73,
|
|
+ THP_SPLIT_PMD = 74,
|
|
+ THP_SPLIT_PUD = 75,
|
|
+ THP_ZERO_PAGE_ALLOC = 76,
|
|
+ THP_ZERO_PAGE_ALLOC_FAILED = 77,
|
|
+ THP_SWPOUT = 78,
|
|
+ THP_SWPOUT_FALLBACK = 79,
|
|
+ BALLOON_INFLATE = 80,
|
|
+ BALLOON_DEFLATE = 81,
|
|
+ BALLOON_MIGRATE = 82,
|
|
+ SWAP_RA = 83,
|
|
+ SWAP_RA_HIT = 84,
|
|
+ NR_VM_EVENT_ITEMS = 85,
|
|
+};
|
|
+
|
|
+struct vm_event_state {
|
|
+ long unsigned int event[85];
|
|
+};
|
|
+
|
|
+enum memblock_flags {
|
|
+ MEMBLOCK_NONE = 0,
|
|
+ MEMBLOCK_HOTPLUG = 1,
|
|
+ MEMBLOCK_MIRROR = 2,
|
|
+ MEMBLOCK_NOMAP = 4,
|
|
+};
|
|
+
|
|
+struct memblock_region {
|
|
+ phys_addr_t base;
|
|
+ phys_addr_t size;
|
|
+ enum memblock_flags flags;
|
|
+ int nid;
|
|
+};
|
|
+
|
|
+struct memblock_type {
|
|
+ long unsigned int cnt;
|
|
+ long unsigned int max;
|
|
+ phys_addr_t total_size;
|
|
+ struct memblock_region *regions;
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+struct memblock {
|
|
+ bool bottom_up;
|
|
+ phys_addr_t current_limit;
|
|
+ struct memblock_type memory;
|
|
+ struct memblock_type reserved;
|
|
+};
|
|
+
|
|
+struct debug_store {
|
|
+ u64 bts_buffer_base;
|
|
+ u64 bts_index;
|
|
+ u64 bts_absolute_maximum;
|
|
+ u64 bts_interrupt_threshold;
|
|
+ u64 pebs_buffer_base;
|
|
+ u64 pebs_index;
|
|
+ u64 pebs_absolute_maximum;
|
|
+ u64 pebs_interrupt_threshold;
|
|
+ u64 pebs_event_reset[12];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct debug_store_buffers {
|
|
+ char bts_buffer[65536];
|
|
+ char pebs_buffer[65536];
|
|
+};
|
|
+
|
|
+struct cpu_entry_area {
|
|
+ char gdt[4096];
|
|
+ struct entry_stack_page entry_stack_page;
|
|
+ struct tss_struct tss;
|
|
+ char entry_trampoline[4096];
|
|
+ char exception_stacks[20480];
|
|
+ struct debug_store cpu_debug_store;
|
|
+ struct debug_store_buffers cpu_debug_buffers;
|
|
+};
|
|
+
|
|
+struct gdt_page {
|
|
+ struct desc_struct gdt[16];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct tlb_context {
|
|
+ u64 ctx_id;
|
|
+ u64 tlb_gen;
|
|
+};
|
|
+
|
|
+struct tlb_state {
|
|
+ struct mm_struct *loaded_mm;
|
|
+ union {
|
|
+ struct mm_struct *last_user_mm;
|
|
+ long unsigned int last_user_mm_ibpb;
|
|
+ };
|
|
+ u16 loaded_mm_asid;
|
|
+ u16 next_asid;
|
|
+ bool is_lazy;
|
|
+ bool invalidate_other;
|
|
+ short unsigned int user_pcid_flush_mask;
|
|
+ long unsigned int cr4;
|
|
+ struct tlb_context ctxs[6];
|
|
+};
|
|
+
|
|
+struct exception_table_entry {
|
|
+ int insn;
|
|
+ int fixup;
|
|
+ int handler;
|
|
+};
|
|
+
|
|
+enum e820_type {
|
|
+ E820_TYPE_RAM = 1,
|
|
+ E820_TYPE_RESERVED = 2,
|
|
+ E820_TYPE_ACPI = 3,
|
|
+ E820_TYPE_NVS = 4,
|
|
+ E820_TYPE_UNUSABLE = 5,
|
|
+ E820_TYPE_PMEM = 7,
|
|
+ E820_TYPE_PRAM = 12,
|
|
+ E820_TYPE_RESERVED_KERN = 128,
|
|
+};
|
|
+
|
|
+struct e820_entry {
|
|
+ u64 addr;
|
|
+ u64 size;
|
|
+ enum e820_type type;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct e820_table {
|
|
+ __u32 nr_entries;
|
|
+ struct e820_entry entries[3200];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct boot_params_to_save {
|
|
+ unsigned int start;
|
|
+ unsigned int len;
|
|
+};
|
|
+
|
|
+struct idr {
|
|
+ struct radix_tree_root idr_rt;
|
|
+ unsigned int idr_base;
|
|
+ unsigned int idr_next;
|
|
+};
|
|
+
|
|
+struct ida_bitmap {
|
|
+ long unsigned int bitmap[16];
|
|
+};
|
|
+
|
|
+struct kernfs_root;
|
|
+
|
|
+struct kernfs_elem_dir {
|
|
+ long unsigned int subdirs;
|
|
+ struct rb_root children;
|
|
+ struct kernfs_root *root;
|
|
+};
|
|
+
|
|
+struct kernfs_syscall_ops;
|
|
+
|
|
+struct kernfs_root {
|
|
+ struct kernfs_node *kn;
|
|
+ unsigned int flags;
|
|
+ struct idr ino_idr;
|
|
+ u32 last_ino;
|
|
+ u32 next_generation;
|
|
+ struct kernfs_syscall_ops *syscall_ops;
|
|
+ struct list_head supers;
|
|
+ wait_queue_head_t deactivate_waitq;
|
|
+};
|
|
+
|
|
+struct kernfs_elem_symlink {
|
|
+ struct kernfs_node *target_kn;
|
|
+};
|
|
+
|
|
+struct kernfs_ops;
|
|
+
|
|
+struct kernfs_open_node;
|
|
+
|
|
+struct kernfs_elem_attr {
|
|
+ const struct kernfs_ops *ops;
|
|
+ struct kernfs_open_node *open;
|
|
+ loff_t size;
|
|
+ struct kernfs_node *notify_next;
|
|
+};
|
|
+
|
|
+union kernfs_node_id {
|
|
+ struct {
|
|
+ u32 ino;
|
|
+ u32 generation;
|
|
+ };
|
|
+ u64 id;
|
|
+};
|
|
+
|
|
+struct kernfs_iattrs;
|
|
+
|
|
+struct kernfs_node {
|
|
+ atomic_t count;
|
|
+ atomic_t active;
|
|
+ struct kernfs_node *parent;
|
|
+ const char *name;
|
|
+ struct rb_node rb;
|
|
+ const void *ns;
|
|
+ unsigned int hash;
|
|
+ union {
|
|
+ struct kernfs_elem_dir dir;
|
|
+ struct kernfs_elem_symlink symlink;
|
|
+ struct kernfs_elem_attr attr;
|
|
+ };
|
|
+ void *priv;
|
|
+ union kernfs_node_id id;
|
|
+ short unsigned int flags;
|
|
+ umode_t mode;
|
|
+ struct kernfs_iattrs *iattr;
|
|
+};
|
|
+
|
|
+struct kernfs_open_file;
|
|
+
|
|
+struct kernfs_ops {
|
|
+ int (*open)(struct kernfs_open_file *);
|
|
+ void (*release)(struct kernfs_open_file *);
|
|
+ int (*seq_show)(struct seq_file *, void *);
|
|
+ void * (*seq_start)(struct seq_file *, loff_t *);
|
|
+ void * (*seq_next)(struct seq_file *, void *, loff_t *);
|
|
+ void (*seq_stop)(struct seq_file *, void *);
|
|
+ ssize_t (*read)(struct kernfs_open_file *, char *, size_t, loff_t);
|
|
+ size_t atomic_write_len;
|
|
+ bool prealloc;
|
|
+ ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t);
|
|
+ int (*mmap)(struct kernfs_open_file *, struct vm_area_struct *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct kernfs_syscall_ops {
|
|
+ int (*remount_fs)(struct kernfs_root *, int *, char *);
|
|
+ int (*show_options)(struct seq_file *, struct kernfs_root *);
|
|
+ int (*mkdir)(struct kernfs_node *, const char *, umode_t);
|
|
+ int (*rmdir)(struct kernfs_node *);
|
|
+ int (*rename)(struct kernfs_node *, struct kernfs_node *, const char *);
|
|
+ int (*show_path)(struct seq_file *, struct kernfs_node *, struct kernfs_root *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct kernfs_open_file {
|
|
+ struct kernfs_node *kn;
|
|
+ struct file *file;
|
|
+ struct seq_file *seq_file;
|
|
+ void *priv;
|
|
+ struct mutex mutex;
|
|
+ struct mutex prealloc_mutex;
|
|
+ int event;
|
|
+ struct list_head list;
|
|
+ char *prealloc_buf;
|
|
+ size_t atomic_write_len;
|
|
+ bool mmapped: 1;
|
|
+ bool released: 1;
|
|
+ const struct vm_operations_struct *vm_ops;
|
|
+};
|
|
+
|
|
+enum kobj_ns_type {
|
|
+ KOBJ_NS_TYPE_NONE = 0,
|
|
+ KOBJ_NS_TYPE_NET = 1,
|
|
+ KOBJ_NS_TYPES = 2,
|
|
+};
|
|
+
|
|
+struct sock;
|
|
+
|
|
+struct kobj_ns_type_operations {
|
|
+ enum kobj_ns_type type;
|
|
+ bool (*current_may_mount)();
|
|
+ void * (*grab_current_ns)();
|
|
+ const void * (*netlink_ns)(struct sock *);
|
|
+ const void * (*initial_ns)();
|
|
+ void (*drop_ns)(void *);
|
|
+};
|
|
+
|
|
+struct bin_attribute;
|
|
+
|
|
+struct attribute_group {
|
|
+ const char *name;
|
|
+ umode_t (*is_visible)(struct kobject *, struct attribute *, int);
|
|
+ umode_t (*is_bin_visible)(struct kobject *, struct bin_attribute *, int);
|
|
+ struct attribute **attrs;
|
|
+ struct bin_attribute **bin_attrs;
|
|
+};
|
|
+
|
|
+struct bin_attribute {
|
|
+ struct attribute attr;
|
|
+ size_t size;
|
|
+ void *private;
|
|
+ ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t);
|
|
+ ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t);
|
|
+ int (*mmap)(struct file *, struct kobject *, struct bin_attribute *, struct vm_area_struct *);
|
|
+};
|
|
+
|
|
+struct sysfs_ops {
|
|
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
|
|
+ ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct kset_uevent_ops;
|
|
+
|
|
+struct kset {
|
|
+ struct list_head list;
|
|
+ spinlock_t list_lock;
|
|
+ struct kobject kobj;
|
|
+ const struct kset_uevent_ops *uevent_ops;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct kobj_type {
|
|
+ void (*release)(struct kobject *);
|
|
+ const struct sysfs_ops *sysfs_ops;
|
|
+ struct attribute **default_attrs;
|
|
+ const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject *);
|
|
+ const void * (*namespace)(struct kobject *);
|
|
+ void (*get_ownership)(struct kobject *, kuid_t *, kgid_t *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct kobj_uevent_env {
|
|
+ char *argv[3];
|
|
+ char *envp[32];
|
|
+ int envp_idx;
|
|
+ char buf[2048];
|
|
+ int buflen;
|
|
+};
|
|
+
|
|
+struct kset_uevent_ops {
|
|
+ int (* const filter)(struct kset *, struct kobject *);
|
|
+ const char * (* const name)(struct kset *, struct kobject *);
|
|
+ int (* const uevent)(struct kset *, struct kobject *, struct kobj_uevent_env *);
|
|
+};
|
|
+
|
|
+struct dev_pm_ops {
|
|
+ int (*prepare)(struct device *);
|
|
+ void (*complete)(struct device *);
|
|
+ int (*suspend)(struct device *);
|
|
+ int (*resume)(struct device *);
|
|
+ int (*freeze)(struct device *);
|
|
+ int (*thaw)(struct device *);
|
|
+ int (*poweroff)(struct device *);
|
|
+ int (*restore)(struct device *);
|
|
+ int (*suspend_late)(struct device *);
|
|
+ int (*resume_early)(struct device *);
|
|
+ int (*freeze_late)(struct device *);
|
|
+ int (*thaw_early)(struct device *);
|
|
+ int (*poweroff_late)(struct device *);
|
|
+ int (*restore_early)(struct device *);
|
|
+ int (*suspend_noirq)(struct device *);
|
|
+ int (*resume_noirq)(struct device *);
|
|
+ int (*freeze_noirq)(struct device *);
|
|
+ int (*thaw_noirq)(struct device *);
|
|
+ int (*poweroff_noirq)(struct device *);
|
|
+ int (*restore_noirq)(struct device *);
|
|
+ int (*runtime_suspend)(struct device *);
|
|
+ int (*runtime_resume)(struct device *);
|
|
+ int (*runtime_idle)(struct device *);
|
|
+};
|
|
+
|
|
+struct pm_domain_data;
|
|
+
|
|
+struct pm_subsys_data {
|
|
+ spinlock_t lock;
|
|
+ unsigned int refcount;
|
|
+ struct list_head clock_list;
|
|
+ struct pm_domain_data *domain_data;
|
|
+};
|
|
+
|
|
+struct wakeup_source {
|
|
+ const char *name;
|
|
+ struct list_head entry;
|
|
+ spinlock_t lock;
|
|
+ struct wake_irq *wakeirq;
|
|
+ struct timer_list timer;
|
|
+ long unsigned int timer_expires;
|
|
+ ktime_t total_time;
|
|
+ ktime_t max_time;
|
|
+ ktime_t last_time;
|
|
+ ktime_t start_prevent_time;
|
|
+ ktime_t prevent_sleep_time;
|
|
+ long unsigned int event_count;
|
|
+ long unsigned int active_count;
|
|
+ long unsigned int relax_count;
|
|
+ long unsigned int expire_count;
|
|
+ long unsigned int wakeup_count;
|
|
+ bool active: 1;
|
|
+ bool autosleep_enabled: 1;
|
|
+};
|
|
+
|
|
+struct dev_pm_domain {
|
|
+ struct dev_pm_ops ops;
|
|
+ void (*detach)(struct device *, bool);
|
|
+ int (*activate)(struct device *);
|
|
+ void (*sync)(struct device *);
|
|
+ void (*dismiss)(struct device *);
|
|
+};
|
|
+
|
|
+struct ratelimit_state {
|
|
+ raw_spinlock_t lock;
|
|
+ int interval;
|
|
+ int burst;
|
|
+ int printed;
|
|
+ int missed;
|
|
+ long unsigned int begin;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+typedef u64 dma_addr_t;
|
|
+
|
|
+enum dma_data_direction {
|
|
+ DMA_BIDIRECTIONAL = 0,
|
|
+ DMA_TO_DEVICE = 1,
|
|
+ DMA_FROM_DEVICE = 2,
|
|
+ DMA_NONE = 3,
|
|
+};
|
|
+
|
|
+struct sg_table;
|
|
+
|
|
+struct scatterlist;
|
|
+
|
|
+struct dma_map_ops {
|
|
+ void * (*alloc)(struct device *, size_t, dma_addr_t *, gfp_t, long unsigned int);
|
|
+ void (*free)(struct device *, size_t, void *, dma_addr_t, long unsigned int);
|
|
+ int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, long unsigned int);
|
|
+ int (*get_sgtable)(struct device *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int);
|
|
+ dma_addr_t (*map_page)(struct device *, struct page *, long unsigned int, size_t, enum dma_data_direction, long unsigned int);
|
|
+ void (*unmap_page)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int);
|
|
+ int (*map_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int);
|
|
+ void (*unmap_sg)(struct device *, struct scatterlist *, int, enum dma_data_direction, long unsigned int);
|
|
+ dma_addr_t (*map_resource)(struct device *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int);
|
|
+ void (*unmap_resource)(struct device *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int);
|
|
+ void (*sync_single_for_cpu)(struct device *, dma_addr_t, size_t, enum dma_data_direction);
|
|
+ void (*sync_single_for_device)(struct device *, dma_addr_t, size_t, enum dma_data_direction);
|
|
+ void (*sync_sg_for_cpu)(struct device *, struct scatterlist *, int, enum dma_data_direction);
|
|
+ void (*sync_sg_for_device)(struct device *, struct scatterlist *, int, enum dma_data_direction);
|
|
+ void (*cache_sync)(struct device *, void *, size_t, enum dma_data_direction);
|
|
+ int (*mapping_error)(struct device *, dma_addr_t);
|
|
+ int (*dma_supported)(struct device *, u64);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+};
|
|
+
|
|
+struct iommu_ops;
|
|
+
|
|
+struct subsys_private;
|
|
+
|
|
+struct bus_type {
|
|
+ const char *name;
|
|
+ const char *dev_name;
|
|
+ struct device *dev_root;
|
|
+ const struct attribute_group **bus_groups;
|
|
+ const struct attribute_group **dev_groups;
|
|
+ const struct attribute_group **drv_groups;
|
|
+ int (*match)(struct device *, struct device_driver *);
|
|
+ int (*uevent)(struct device *, struct kobj_uevent_env *);
|
|
+ int (*probe)(struct device *);
|
|
+ int (*remove)(struct device *);
|
|
+ void (*shutdown)(struct device *);
|
|
+ int (*online)(struct device *);
|
|
+ int (*offline)(struct device *);
|
|
+ int (*suspend)(struct device *, pm_message_t);
|
|
+ int (*resume)(struct device *);
|
|
+ int (*num_vf)(struct device *);
|
|
+ int (*dma_configure)(struct device *);
|
|
+ const struct dev_pm_ops *pm;
|
|
+ const struct iommu_ops *iommu_ops;
|
|
+ struct subsys_private *p;
|
|
+ struct lock_class_key lock_key;
|
|
+ bool need_parent_lock;
|
|
+};
|
|
+
|
|
+enum probe_type {
|
|
+ PROBE_DEFAULT_STRATEGY = 0,
|
|
+ PROBE_PREFER_ASYNCHRONOUS = 1,
|
|
+ PROBE_FORCE_SYNCHRONOUS = 2,
|
|
+};
|
|
+
|
|
+struct of_device_id;
|
|
+
|
|
+struct acpi_device_id;
|
|
+
|
|
+struct driver_private;
|
|
+
|
|
+struct device_driver {
|
|
+ const char *name;
|
|
+ struct bus_type *bus;
|
|
+ struct module *owner;
|
|
+ const char *mod_name;
|
|
+ bool suppress_bind_attrs;
|
|
+ enum probe_type probe_type;
|
|
+ const struct of_device_id *of_match_table;
|
|
+ const struct acpi_device_id *acpi_match_table;
|
|
+ int (*probe)(struct device *);
|
|
+ int (*remove)(struct device *);
|
|
+ void (*shutdown)(struct device *);
|
|
+ int (*suspend)(struct device *, pm_message_t);
|
|
+ int (*resume)(struct device *);
|
|
+ const struct attribute_group **groups;
|
|
+ const struct dev_pm_ops *pm;
|
|
+ void (*coredump)(struct device *);
|
|
+ struct driver_private *p;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+enum iommu_cap {
|
|
+ IOMMU_CAP_CACHE_COHERENCY = 0,
|
|
+ IOMMU_CAP_INTR_REMAP = 1,
|
|
+ IOMMU_CAP_NOEXEC = 2,
|
|
+};
|
|
+
|
|
+enum iommu_attr {
|
|
+ DOMAIN_ATTR_GEOMETRY = 0,
|
|
+ DOMAIN_ATTR_PAGING = 1,
|
|
+ DOMAIN_ATTR_WINDOWS = 2,
|
|
+ DOMAIN_ATTR_FSL_PAMU_STASH = 3,
|
|
+ DOMAIN_ATTR_FSL_PAMU_ENABLE = 4,
|
|
+ DOMAIN_ATTR_FSL_PAMUV1 = 5,
|
|
+ DOMAIN_ATTR_NESTING = 6,
|
|
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE = 7,
|
|
+ DOMAIN_ATTR_MAX = 8,
|
|
+};
|
|
+
|
|
+struct iommu_domain;
|
|
+
|
|
+struct iommu_sva_param;
|
|
+
|
|
+struct io_mm;
|
|
+
|
|
+struct iommu_resv_region;
|
|
+
|
|
+struct of_phandle_args;
|
|
+
|
|
+struct pasid_table_config;
|
|
+
|
|
+struct tlb_invalidate_info;
|
|
+
|
|
+struct page_response_msg;
|
|
+
|
|
+struct iommu_ops {
|
|
+ bool (*capable)(enum iommu_cap);
|
|
+ struct iommu_domain * (*domain_alloc)(unsigned int);
|
|
+ void (*domain_free)(struct iommu_domain *);
|
|
+ int (*attach_dev)(struct iommu_domain *, struct device *);
|
|
+ void (*detach_dev)(struct iommu_domain *, struct device *);
|
|
+ int (*sva_device_init)(struct device *, struct iommu_sva_param *);
|
|
+ void (*sva_device_shutdown)(struct device *, struct iommu_sva_param *);
|
|
+ struct io_mm * (*mm_alloc)(struct iommu_domain *, struct mm_struct *, long unsigned int);
|
|
+ void (*mm_free)(struct io_mm *);
|
|
+ int (*mm_attach)(struct iommu_domain *, struct device *, struct io_mm *, bool);
|
|
+ void (*mm_detach)(struct iommu_domain *, struct device *, struct io_mm *, bool);
|
|
+ void (*mm_invalidate)(struct iommu_domain *, struct device *, struct io_mm *, long unsigned int, size_t);
|
|
+ int (*map)(struct iommu_domain *, long unsigned int, phys_addr_t, size_t, int);
|
|
+ size_t (*unmap)(struct iommu_domain *, long unsigned int, size_t);
|
|
+ void (*flush_iotlb_all)(struct iommu_domain *);
|
|
+ void (*iotlb_range_add)(struct iommu_domain *, long unsigned int, size_t);
|
|
+ void (*iotlb_sync)(struct iommu_domain *);
|
|
+ phys_addr_t (*iova_to_phys)(struct iommu_domain *, dma_addr_t);
|
|
+ int (*add_device)(struct device *);
|
|
+ void (*remove_device)(struct device *);
|
|
+ struct iommu_group * (*device_group)(struct device *);
|
|
+ int (*domain_get_attr)(struct iommu_domain *, enum iommu_attr, void *);
|
|
+ int (*domain_set_attr)(struct iommu_domain *, enum iommu_attr, void *);
|
|
+ void (*get_resv_regions)(struct device *, struct list_head *);
|
|
+ void (*put_resv_regions)(struct device *, struct list_head *);
|
|
+ void (*apply_resv_region)(struct device *, struct iommu_domain *, struct iommu_resv_region *);
|
|
+ int (*domain_window_enable)(struct iommu_domain *, u32, phys_addr_t, u64, int);
|
|
+ void (*domain_window_disable)(struct iommu_domain *, u32);
|
|
+ int (*domain_set_windows)(struct iommu_domain *, u32);
|
|
+ u32 (*domain_get_windows)(struct iommu_domain *);
|
|
+ int (*of_xlate)(struct device *, struct of_phandle_args *);
|
|
+ bool (*is_attach_deferred)(struct iommu_domain *, struct device *);
|
|
+ int (*bind_pasid_table)(struct iommu_domain *, struct device *, struct pasid_table_config *);
|
|
+ void (*unbind_pasid_table)(struct iommu_domain *, struct device *);
|
|
+ int (*sva_invalidate)(struct iommu_domain *, struct device *, struct tlb_invalidate_info *);
|
|
+ int (*page_response)(struct device *, struct page_response_msg *);
|
|
+ long unsigned int pgsize_bitmap;
|
|
+};
|
|
+
|
|
+struct device_type {
|
|
+ const char *name;
|
|
+ const struct attribute_group **groups;
|
|
+ int (*uevent)(struct device *, struct kobj_uevent_env *);
|
|
+ char * (*devnode)(struct device *, umode_t *, kuid_t *, kgid_t *);
|
|
+ void (*release)(struct device *);
|
|
+ const struct dev_pm_ops *pm;
|
|
+};
|
|
+
|
|
+struct of_device_id {
|
|
+ char name[32];
|
|
+ char type[32];
|
|
+ char compatible[128];
|
|
+ const void *data;
|
|
+};
|
|
+
|
|
+typedef long unsigned int kernel_ulong_t;
|
|
+
|
|
+struct acpi_device_id {
|
|
+ __u8 id[9];
|
|
+ kernel_ulong_t driver_data;
|
|
+ __u32 cls;
|
|
+ __u32 cls_msk;
|
|
+};
|
|
+
|
|
+struct class {
|
|
+ const char *name;
|
|
+ struct module *owner;
|
|
+ const struct attribute_group **class_groups;
|
|
+ const struct attribute_group **dev_groups;
|
|
+ struct kobject *dev_kobj;
|
|
+ int (*dev_uevent)(struct device *, struct kobj_uevent_env *);
|
|
+ char * (*devnode)(struct device *, umode_t *);
|
|
+ void (*class_release)(struct class *);
|
|
+ void (*dev_release)(struct device *);
|
|
+ int (*shutdown_pre)(struct device *);
|
|
+ const struct kobj_ns_type_operations *ns_type;
|
|
+ const void * (*namespace)(struct device *);
|
|
+ void (*get_ownership)(struct device *, kuid_t *, kgid_t *);
|
|
+ const struct dev_pm_ops *pm;
|
|
+ struct subsys_private *p;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct device_dma_parameters {
|
|
+ unsigned int max_segment_size;
|
|
+ long unsigned int segment_boundary_mask;
|
|
+};
|
|
+
|
|
+struct node {
|
|
+ struct device dev;
|
|
+ struct list_head access_list;
|
|
+ struct work_struct node_work;
|
|
+ struct list_head cache_attrs;
|
|
+ struct device *cache_dev;
|
|
+};
|
|
+
|
|
+enum cpuhp_smt_control {
|
|
+ CPU_SMT_ENABLED = 0,
|
|
+ CPU_SMT_DISABLED = 1,
|
|
+ CPU_SMT_FORCE_DISABLED = 2,
|
|
+ CPU_SMT_NOT_SUPPORTED = 3,
|
|
+};
|
|
+
|
|
+struct cpu_signature {
|
|
+ unsigned int sig;
|
|
+ unsigned int pf;
|
|
+ unsigned int rev;
|
|
+};
|
|
+
|
|
+struct ucode_cpu_info {
|
|
+ struct cpu_signature cpu_sig;
|
|
+ int valid;
|
|
+ void *mc;
|
|
+};
|
|
+
|
|
+typedef long unsigned int pto_T__;
|
|
+
|
|
+struct kobject___2;
|
|
+
|
|
+struct kobj_attribute___2;
|
|
+
|
|
+struct atomic_notifier_head___2;
|
|
+
|
|
+typedef s32 int32_t;
|
|
+
|
|
+struct kernel_symbol {
|
|
+ int value_offset;
|
|
+ int name_offset;
|
|
+};
|
|
+
|
|
+typedef int (*initcall_t)();
|
|
+
|
|
+struct obs_kernel_param {
|
|
+ const char *str;
|
|
+ int (*setup_func)(char *);
|
|
+ int early;
|
|
+};
|
|
+
|
|
+struct _ddebug {
|
|
+ const char *modname;
|
|
+ const char *function;
|
|
+ const char *filename;
|
|
+ const char *format;
|
|
+ unsigned int lineno: 18;
|
|
+ unsigned int flags: 8;
|
|
+ union {
|
|
+ struct static_key_true dd_key_true;
|
|
+ struct static_key_false dd_key_false;
|
|
+ } key;
|
|
+};
|
|
+
|
|
+enum ftrace_dump_mode {
|
|
+ DUMP_NONE = 0,
|
|
+ DUMP_ALL = 1,
|
|
+ DUMP_ORIG = 2,
|
|
+};
|
|
+
|
|
+struct bug_entry {
|
|
+ int bug_addr_disp;
|
|
+ int file_disp;
|
|
+ short unsigned int line;
|
|
+ short unsigned int flags;
|
|
+};
|
|
+
|
|
+typedef s32 compat_time_t;
|
|
+
|
|
+struct compat_timespec {
|
|
+ compat_time_t tv_sec;
|
|
+ s32 tv_nsec;
|
|
+};
|
|
+
|
|
+struct pollfd {
|
|
+ int fd;
|
|
+ short int events;
|
|
+ short int revents;
|
|
+};
|
|
+
|
|
+typedef const int tracepoint_ptr_t;
|
|
+
|
|
+struct bpf_raw_event_map {
|
|
+ struct tracepoint *tp;
|
|
+ void *bpf_func;
|
|
+ u32 num_args;
|
|
+ u32 writable_size;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct orc_entry {
|
|
+ s16 sp_offset;
|
|
+ s16 bp_offset;
|
|
+ unsigned int sp_reg: 4;
|
|
+ unsigned int bp_reg: 4;
|
|
+ unsigned int type: 2;
|
|
+ unsigned int end: 1;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct seq_operations___2 {
|
|
+ void * (*start)(struct seq_file *, loff_t *);
|
|
+ void (*stop)(struct seq_file *, void *);
|
|
+ void * (*next)(struct seq_file *, void *, loff_t *);
|
|
+ int (*show)(struct seq_file *, void *);
|
|
+};
|
|
+
|
|
+enum perf_event_state {
|
|
+ PERF_EVENT_STATE_DEAD = -4,
|
|
+ PERF_EVENT_STATE_EXIT = -3,
|
|
+ PERF_EVENT_STATE_ERROR = -2,
|
|
+ PERF_EVENT_STATE_OFF = -1,
|
|
+ PERF_EVENT_STATE_INACTIVE = 0,
|
|
+ PERF_EVENT_STATE_ACTIVE = 1,
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ atomic_long_t a;
|
|
+} local_t;
|
|
+
|
|
+typedef struct {
|
|
+ local_t a;
|
|
+} local64_t;
|
|
+
|
|
+struct perf_event_attr {
|
|
+ __u32 type;
|
|
+ __u32 size;
|
|
+ __u64 config;
|
|
+ union {
|
|
+ __u64 sample_period;
|
|
+ __u64 sample_freq;
|
|
+ };
|
|
+ __u64 sample_type;
|
|
+ __u64 read_format;
|
|
+ __u64 disabled: 1;
|
|
+ __u64 inherit: 1;
|
|
+ __u64 pinned: 1;
|
|
+ __u64 exclusive: 1;
|
|
+ __u64 exclude_user: 1;
|
|
+ __u64 exclude_kernel: 1;
|
|
+ __u64 exclude_hv: 1;
|
|
+ __u64 exclude_idle: 1;
|
|
+ __u64 mmap: 1;
|
|
+ __u64 comm: 1;
|
|
+ __u64 freq: 1;
|
|
+ __u64 inherit_stat: 1;
|
|
+ __u64 enable_on_exec: 1;
|
|
+ __u64 task: 1;
|
|
+ __u64 watermark: 1;
|
|
+ __u64 precise_ip: 2;
|
|
+ __u64 mmap_data: 1;
|
|
+ __u64 sample_id_all: 1;
|
|
+ __u64 exclude_host: 1;
|
|
+ __u64 exclude_guest: 1;
|
|
+ __u64 exclude_callchain_kernel: 1;
|
|
+ __u64 exclude_callchain_user: 1;
|
|
+ __u64 mmap2: 1;
|
|
+ __u64 comm_exec: 1;
|
|
+ __u64 use_clockid: 1;
|
|
+ __u64 context_switch: 1;
|
|
+ __u64 write_backward: 1;
|
|
+ __u64 namespaces: 1;
|
|
+ __u64 __reserved_1: 35;
|
|
+ union {
|
|
+ __u32 wakeup_events;
|
|
+ __u32 wakeup_watermark;
|
|
+ };
|
|
+ __u32 bp_type;
|
|
+ union {
|
|
+ __u64 bp_addr;
|
|
+ __u64 kprobe_func;
|
|
+ __u64 uprobe_path;
|
|
+ __u64 config1;
|
|
+ };
|
|
+ union {
|
|
+ __u64 bp_len;
|
|
+ __u64 kprobe_addr;
|
|
+ __u64 probe_offset;
|
|
+ __u64 config2;
|
|
+ };
|
|
+ __u64 branch_sample_type;
|
|
+ __u64 sample_regs_user;
|
|
+ __u32 sample_stack_user;
|
|
+ __s32 clockid;
|
|
+ __u64 sample_regs_intr;
|
|
+ __u32 aux_watermark;
|
|
+ __u16 sample_max_stack;
|
|
+ __u16 __reserved_2;
|
|
+};
|
|
+
|
|
+struct hw_perf_event_extra {
|
|
+ u64 config;
|
|
+ unsigned int reg;
|
|
+ int alloc;
|
|
+ int idx;
|
|
+};
|
|
+
|
|
+struct arch_hw_breakpoint {
|
|
+ long unsigned int address;
|
|
+ long unsigned int mask;
|
|
+ u8 len;
|
|
+ u8 type;
|
|
+};
|
|
+
|
|
+struct hw_perf_event {
|
|
+ union {
|
|
+ struct {
|
|
+ u64 config;
|
|
+ u64 last_tag;
|
|
+ long unsigned int config_base;
|
|
+ long unsigned int event_base;
|
|
+ int event_base_rdpmc;
|
|
+ int idx;
|
|
+ int last_cpu;
|
|
+ int flags;
|
|
+ struct hw_perf_event_extra extra_reg;
|
|
+ struct hw_perf_event_extra branch_reg;
|
|
+ };
|
|
+ struct {
|
|
+ struct hrtimer hrtimer;
|
|
+ };
|
|
+ struct {
|
|
+ struct list_head tp_list;
|
|
+ };
|
|
+ struct {
|
|
+ u64 pwr_acc;
|
|
+ u64 ptsc;
|
|
+ };
|
|
+ struct {
|
|
+ struct arch_hw_breakpoint info;
|
|
+ struct list_head bp_list;
|
|
+ };
|
|
+ struct {
|
|
+ u8 iommu_bank;
|
|
+ u8 iommu_cntr;
|
|
+ u16 padding;
|
|
+ u64 conf;
|
|
+ u64 conf1;
|
|
+ };
|
|
+ };
|
|
+ struct task_struct *target;
|
|
+ void *addr_filters;
|
|
+ long unsigned int addr_filters_gen;
|
|
+ int state;
|
|
+ local64_t prev_count;
|
|
+ u64 sample_period;
|
|
+ union {
|
|
+ struct {
|
|
+ u64 last_period;
|
|
+ local64_t period_left;
|
|
+ };
|
|
+ struct {
|
|
+ u64 saved_metric;
|
|
+ u64 saved_slots;
|
|
+ };
|
|
+ };
|
|
+ u64 interrupts_seq;
|
|
+ u64 interrupts;
|
|
+ u64 freq_time_stamp;
|
|
+ u64 freq_count_stamp;
|
|
+};
|
|
+
|
|
+struct irq_work {
|
|
+ long unsigned int flags;
|
|
+ struct llist_node llnode;
|
|
+ void (*func)(struct irq_work *);
|
|
+};
|
|
+
|
|
+struct perf_addr_filters_head {
|
|
+ struct list_head list;
|
|
+ raw_spinlock_t lock;
|
|
+ unsigned int nr_file_filters;
|
|
+};
|
|
+
|
|
+struct perf_sample_data;
|
|
+
|
|
+typedef void (*perf_overflow_handler_t)(struct perf_event *, struct perf_sample_data *, struct pt_regs *);
|
|
+
|
|
+struct ftrace_ops;
|
|
+
|
|
+typedef void (*ftrace_func_t)(long unsigned int, long unsigned int, struct ftrace_ops *, struct pt_regs *);
|
|
+
|
|
+struct ftrace_hash;
|
|
+
|
|
+struct ftrace_ops_hash {
|
|
+ struct ftrace_hash *notrace_hash;
|
|
+ struct ftrace_hash *filter_hash;
|
|
+ struct mutex regex_lock;
|
|
+};
|
|
+
|
|
+struct ftrace_ops {
|
|
+ ftrace_func_t func;
|
|
+ struct ftrace_ops *next;
|
|
+ long unsigned int flags;
|
|
+ void *private;
|
|
+ ftrace_func_t saved_func;
|
|
+ struct ftrace_ops_hash local_hash;
|
|
+ struct ftrace_ops_hash *func_hash;
|
|
+ struct ftrace_ops_hash old_hash;
|
|
+ long unsigned int trampoline;
|
|
+ long unsigned int trampoline_size;
|
|
+};
|
|
+
|
|
+struct pmu;
|
|
+
|
|
+struct ring_buffer;
|
|
+
|
|
+struct perf_addr_filter_range;
|
|
+
|
|
+struct bpf_prog;
|
|
+
|
|
+struct trace_event_call;
|
|
+
|
|
+struct event_filter;
|
|
+
|
|
+struct perf_cgroup;
|
|
+
|
|
+struct perf_event {
|
|
+ struct list_head event_entry;
|
|
+ struct list_head sibling_list;
|
|
+ struct list_head active_list;
|
|
+ struct rb_node group_node;
|
|
+ u64 group_index;
|
|
+ struct list_head migrate_entry;
|
|
+ struct hlist_node hlist_entry;
|
|
+ struct list_head active_entry;
|
|
+ int nr_siblings;
|
|
+ int event_caps;
|
|
+ int group_caps;
|
|
+ struct perf_event *group_leader;
|
|
+ struct pmu *pmu;
|
|
+ void *pmu_private;
|
|
+ enum perf_event_state state;
|
|
+ unsigned int attach_state;
|
|
+ local64_t count;
|
|
+ atomic64_t child_count;
|
|
+ u64 total_time_enabled;
|
|
+ u64 total_time_running;
|
|
+ u64 tstamp;
|
|
+ u64 shadow_ctx_time;
|
|
+ struct perf_event_attr attr;
|
|
+ u16 header_size;
|
|
+ u16 id_header_size;
|
|
+ u16 read_size;
|
|
+ struct hw_perf_event hw;
|
|
+ struct perf_event_context *ctx;
|
|
+ atomic_long_t refcount;
|
|
+ atomic64_t child_total_time_enabled;
|
|
+ atomic64_t child_total_time_running;
|
|
+ struct mutex child_mutex;
|
|
+ struct list_head child_list;
|
|
+ struct perf_event *parent;
|
|
+ int oncpu;
|
|
+ int cpu;
|
|
+ struct list_head owner_entry;
|
|
+ struct task_struct *owner;
|
|
+ struct mutex mmap_mutex;
|
|
+ atomic_t mmap_count;
|
|
+ struct ring_buffer *rb;
|
|
+ struct list_head rb_entry;
|
|
+ long unsigned int rcu_batches;
|
|
+ int rcu_pending;
|
|
+ wait_queue_head_t waitq;
|
|
+ struct fasync_struct *fasync;
|
|
+ int pending_wakeup;
|
|
+ int pending_kill;
|
|
+ int pending_disable;
|
|
+ struct irq_work pending;
|
|
+ atomic_t event_limit;
|
|
+ struct perf_addr_filters_head addr_filters;
|
|
+ struct perf_addr_filter_range *addr_filter_ranges;
|
|
+ long unsigned int addr_filters_gen;
|
|
+ void (*destroy)(struct perf_event *);
|
|
+ struct callback_head callback_head;
|
|
+ struct pid_namespace *ns;
|
|
+ u64 id;
|
|
+ u64 (*clock)();
|
|
+ perf_overflow_handler_t overflow_handler;
|
|
+ void *overflow_handler_context;
|
|
+ perf_overflow_handler_t orig_overflow_handler;
|
|
+ struct bpf_prog *prog;
|
|
+ struct trace_event_call *tp_event;
|
|
+ struct event_filter *filter;
|
|
+ struct ftrace_ops ftrace_ops;
|
|
+ struct perf_cgroup *cgrp;
|
|
+ struct list_head sb_list;
|
|
+};
|
|
+
|
|
+struct lockdep_map {};
|
|
+
|
|
+struct uid_gid_extent {
|
|
+ u32 first;
|
|
+ u32 lower_first;
|
|
+ u32 count;
|
|
+};
|
|
+
|
|
+struct uid_gid_map {
|
|
+ u32 nr_extents;
|
|
+ union {
|
|
+ struct uid_gid_extent extent[5];
|
|
+ struct {
|
|
+ struct uid_gid_extent *forward;
|
|
+ struct uid_gid_extent *reverse;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct proc_ns_operations;
|
|
+
|
|
+struct ns_common {
|
|
+ atomic_long_t stashed;
|
|
+ const struct proc_ns_operations *ops;
|
|
+ unsigned int inum;
|
|
+};
|
|
+
|
|
+struct ctl_table;
|
|
+
|
|
+struct ctl_table_root;
|
|
+
|
|
+struct ctl_table_set;
|
|
+
|
|
+struct ctl_dir;
|
|
+
|
|
+struct ctl_node;
|
|
+
|
|
+struct ctl_table_header {
|
|
+ union {
|
|
+ struct {
|
|
+ struct ctl_table *ctl_table;
|
|
+ int used;
|
|
+ int count;
|
|
+ int nreg;
|
|
+ };
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ struct completion *unregistering;
|
|
+ struct ctl_table *ctl_table_arg;
|
|
+ struct ctl_table_root *root;
|
|
+ struct ctl_table_set *set;
|
|
+ struct ctl_dir *parent;
|
|
+ struct ctl_node *node;
|
|
+ struct hlist_head inodes;
|
|
+};
|
|
+
|
|
+struct ctl_dir {
|
|
+ struct ctl_table_header header;
|
|
+ struct rb_root root;
|
|
+};
|
|
+
|
|
+struct ctl_table_set {
|
|
+ int (*is_seen)(struct ctl_table_set *);
|
|
+ struct ctl_dir dir;
|
|
+};
|
|
+
|
|
+struct ucounts;
|
|
+
|
|
+struct user_namespace {
|
|
+ struct uid_gid_map uid_map;
|
|
+ struct uid_gid_map gid_map;
|
|
+ struct uid_gid_map projid_map;
|
|
+ atomic_t count;
|
|
+ struct user_namespace *parent;
|
|
+ int level;
|
|
+ kuid_t owner;
|
|
+ kgid_t group;
|
|
+ struct ns_common ns;
|
|
+ long unsigned int flags;
|
|
+ struct key *persistent_keyring_register;
|
|
+ struct rw_semaphore persistent_keyring_register_sem;
|
|
+ struct work_struct work;
|
|
+ struct ctl_table_set set;
|
|
+ struct ctl_table_header *sysctls;
|
|
+ struct ucounts *ucounts;
|
|
+ int ucount_max[9];
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+enum node_states {
|
|
+ N_POSSIBLE = 0,
|
|
+ N_ONLINE = 1,
|
|
+ N_NORMAL_MEMORY = 2,
|
|
+ N_HIGH_MEMORY = 2,
|
|
+ N_MEMORY = 3,
|
|
+ N_CPU = 4,
|
|
+ NR_NODE_STATES = 5,
|
|
+};
|
|
+
|
|
+enum lru_list {
|
|
+ LRU_INACTIVE_ANON = 0,
|
|
+ LRU_ACTIVE_ANON = 1,
|
|
+ LRU_INACTIVE_FILE = 2,
|
|
+ LRU_ACTIVE_FILE = 3,
|
|
+ LRU_UNEVICTABLE = 4,
|
|
+ NR_LRU_LISTS = 5,
|
|
+};
|
|
+
|
|
+struct delayed_work {
|
|
+ struct work_struct work;
|
|
+ struct timer_list timer;
|
|
+ struct workqueue_struct *wq;
|
|
+ int cpu;
|
|
+ long unsigned int data;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct rcu_work {
|
|
+ struct work_struct work;
|
|
+ struct callback_head rcu;
|
|
+ struct workqueue_struct *wq;
|
|
+};
|
|
+
|
|
+struct rcu_segcblist {
|
|
+ struct callback_head *head;
|
|
+ struct callback_head **tails[4];
|
|
+ long unsigned int gp_seq[4];
|
|
+ long int len;
|
|
+ long int len_lazy;
|
|
+};
|
|
+
|
|
+struct srcu_node;
|
|
+
|
|
+struct srcu_struct;
|
|
+
|
|
+struct srcu_data {
|
|
+ long unsigned int srcu_lock_count[2];
|
|
+ long unsigned int srcu_unlock_count[2];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ spinlock_t lock;
|
|
+ struct rcu_segcblist srcu_cblist;
|
|
+ long unsigned int srcu_gp_seq_needed;
|
|
+ long unsigned int srcu_gp_seq_needed_exp;
|
|
+ bool srcu_cblist_invoking;
|
|
+ struct delayed_work work;
|
|
+ struct callback_head srcu_barrier_head;
|
|
+ struct srcu_node *mynode;
|
|
+ long unsigned int grpmask;
|
|
+ int cpu;
|
|
+ struct srcu_struct *sp;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct srcu_node {
|
|
+ spinlock_t lock;
|
|
+ long unsigned int srcu_have_cbs[4];
|
|
+ long unsigned int srcu_data_have_cbs[4];
|
|
+ long unsigned int srcu_gp_seq_needed_exp;
|
|
+ struct srcu_node *srcu_parent;
|
|
+ int grplo;
|
|
+ int grphi;
|
|
+};
|
|
+
|
|
+struct srcu_struct {
|
|
+ struct srcu_node node[521];
|
|
+ struct srcu_node *level[4];
|
|
+ struct mutex srcu_cb_mutex;
|
|
+ spinlock_t lock;
|
|
+ struct mutex srcu_gp_mutex;
|
|
+ unsigned int srcu_idx;
|
|
+ long unsigned int srcu_gp_seq;
|
|
+ long unsigned int srcu_gp_seq_needed;
|
|
+ long unsigned int srcu_gp_seq_needed_exp;
|
|
+ long unsigned int srcu_last_gp_end;
|
|
+ struct srcu_data *sda;
|
|
+ long unsigned int srcu_barrier_seq;
|
|
+ struct mutex srcu_barrier_mutex;
|
|
+ struct completion srcu_barrier_completion;
|
|
+ atomic_t srcu_barrier_cpu_cnt;
|
|
+ struct delayed_work work;
|
|
+};
|
|
+
|
|
+typedef void (*smp_call_func_t)(void *);
|
|
+
|
|
+struct __call_single_data {
|
|
+ struct llist_node llist;
|
|
+ smp_call_func_t func;
|
|
+ void *info;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+typedef int proc_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
|
|
+
|
|
+struct ctl_table_poll;
|
|
+
|
|
+struct ctl_table {
|
|
+ const char *procname;
|
|
+ void *data;
|
|
+ int maxlen;
|
|
+ umode_t mode;
|
|
+ struct ctl_table *child;
|
|
+ proc_handler *proc_handler;
|
|
+ struct ctl_table_poll *poll;
|
|
+ void *extra1;
|
|
+ void *extra2;
|
|
+};
|
|
+
|
|
+struct ctl_table_poll {
|
|
+ atomic_t event;
|
|
+ wait_queue_head_t wait;
|
|
+};
|
|
+
|
|
+struct ctl_node {
|
|
+ struct rb_node node;
|
|
+ struct ctl_table_header *header;
|
|
+};
|
|
+
|
|
+struct ctl_table_root {
|
|
+ struct ctl_table_set default_set;
|
|
+ struct ctl_table_set * (*lookup)(struct ctl_table_root *);
|
|
+ void (*set_ownership)(struct ctl_table_header *, struct ctl_table *, kuid_t *, kgid_t *);
|
|
+ int (*permissions)(struct ctl_table_header *, struct ctl_table *);
|
|
+};
|
|
+
|
|
+enum umh_disable_depth {
|
|
+ UMH_ENABLED = 0,
|
|
+ UMH_FREEZING = 1,
|
|
+ UMH_DISABLED = 2,
|
|
+};
|
|
+
|
|
+struct cgroup;
|
|
+
|
|
+struct cgroup_subsys;
|
|
+
|
|
+struct cgroup_subsys_state {
|
|
+ struct cgroup *cgroup;
|
|
+ struct cgroup_subsys *ss;
|
|
+ struct percpu_ref refcnt;
|
|
+ struct list_head sibling;
|
|
+ struct list_head children;
|
|
+ struct list_head rstat_css_node;
|
|
+ int id;
|
|
+ unsigned int flags;
|
|
+ u64 serial_nr;
|
|
+ atomic_t online_cnt;
|
|
+ struct work_struct destroy_work;
|
|
+ struct rcu_work destroy_rwork;
|
|
+ struct cgroup_subsys_state *parent;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_id {
|
|
+ int id;
|
|
+ atomic_t ref;
|
|
+};
|
|
+
|
|
+struct page_counter {
|
|
+ atomic_long_t usage;
|
|
+ long unsigned int min;
|
|
+ long unsigned int low;
|
|
+ long unsigned int max;
|
|
+ struct page_counter *parent;
|
|
+ long unsigned int emin;
|
|
+ atomic_long_t min_usage;
|
|
+ atomic_long_t children_min_usage;
|
|
+ long unsigned int elow;
|
|
+ atomic_long_t low_usage;
|
|
+ atomic_long_t children_low_usage;
|
|
+ long unsigned int watermark;
|
|
+ long unsigned int failcnt;
|
|
+};
|
|
+
|
|
+struct vmpressure {
|
|
+ long unsigned int scanned;
|
|
+ long unsigned int reclaimed;
|
|
+ long unsigned int tree_scanned;
|
|
+ long unsigned int tree_reclaimed;
|
|
+ struct spinlock sr_lock;
|
|
+ struct list_head events;
|
|
+ struct mutex events_lock;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+struct cgroup_file {
|
|
+ struct kernfs_node *kn;
|
|
+ long unsigned int notified_at;
|
|
+ struct timer_list notify_timer;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_threshold_ary;
|
|
+
|
|
+struct mem_cgroup_thresholds {
|
|
+ struct mem_cgroup_threshold_ary *primary;
|
|
+ struct mem_cgroup_threshold_ary *spare;
|
|
+};
|
|
+
|
|
+struct memcg_padding {
|
|
+ char x[0];
|
|
+};
|
|
+
|
|
+enum memcg_kmem_state {
|
|
+ KMEM_NONE = 0,
|
|
+ KMEM_ALLOCATED = 1,
|
|
+ KMEM_ONLINE = 2,
|
|
+};
|
|
+
|
|
+struct fprop_global {
|
|
+ struct percpu_counter events;
|
|
+ unsigned int period;
|
|
+ seqcount_t sequence;
|
|
+};
|
|
+
|
|
+struct wb_domain {
|
|
+ spinlock_t lock;
|
|
+ struct fprop_global completions;
|
|
+ struct timer_list period_timer;
|
|
+ long unsigned int period_time;
|
|
+ long unsigned int dirty_limit_tstamp;
|
|
+ long unsigned int dirty_limit;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_stat_cpu;
|
|
+
|
|
+struct mem_cgroup_per_node;
|
|
+
|
|
+struct mem_cgroup {
|
|
+ struct cgroup_subsys_state css;
|
|
+ struct mem_cgroup_id id;
|
|
+ struct page_counter memory;
|
|
+ struct page_counter swap;
|
|
+ struct page_counter memsw;
|
|
+ struct page_counter kmem;
|
|
+ struct page_counter tcpmem;
|
|
+ long unsigned int high;
|
|
+ struct work_struct high_work;
|
|
+ long unsigned int soft_limit;
|
|
+ struct vmpressure vmpressure;
|
|
+ bool use_hierarchy;
|
|
+ bool oom_group;
|
|
+ bool oom_lock;
|
|
+ int under_oom;
|
|
+ int swappiness;
|
|
+ int oom_kill_disable;
|
|
+ struct cgroup_file events_file;
|
|
+ struct cgroup_file swap_events_file;
|
|
+ struct mutex thresholds_lock;
|
|
+ struct mem_cgroup_thresholds thresholds;
|
|
+ struct mem_cgroup_thresholds memsw_thresholds;
|
|
+ struct list_head oom_notify;
|
|
+ long unsigned int move_charge_at_immigrate;
|
|
+ spinlock_t move_lock;
|
|
+ long unsigned int move_lock_flags;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct memcg_padding _pad1_;
|
|
+ atomic_t moving_account;
|
|
+ struct task_struct *move_lock_task;
|
|
+ struct mem_cgroup_stat_cpu *stat_cpu;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct memcg_padding _pad2_;
|
|
+ atomic_long_t stat[34];
|
|
+ atomic_long_t events[85];
|
|
+ atomic_long_t memory_events[7];
|
|
+ long unsigned int socket_pressure;
|
|
+ bool tcpmem_active;
|
|
+ int tcpmem_pressure;
|
|
+ int kmemcg_id;
|
|
+ enum memcg_kmem_state kmem_state;
|
|
+ struct list_head kmem_caches;
|
|
+ int last_scanned_node;
|
|
+ nodemask_t scan_nodes;
|
|
+ atomic_t numainfo_events;
|
|
+ atomic_t numainfo_updating;
|
|
+ struct list_head cgwb_list;
|
|
+ struct wb_domain cgwb_domain;
|
|
+ struct list_head event_list;
|
|
+ spinlock_t event_list_lock;
|
|
+ struct mem_cgroup_per_node *nodeinfo[0];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct anon_vma {
|
|
+ struct anon_vma *root;
|
|
+ struct rw_semaphore rwsem;
|
|
+ atomic_t refcount;
|
|
+ unsigned int degree;
|
|
+ struct anon_vma *parent;
|
|
+ struct rb_root_cached rb_root;
|
|
+};
|
|
+
|
|
+struct mempolicy {
|
|
+ atomic_t refcnt;
|
|
+ short unsigned int mode;
|
|
+ short unsigned int flags;
|
|
+ union {
|
|
+ short int preferred_node;
|
|
+ nodemask_t nodes;
|
|
+ } v;
|
|
+ union {
|
|
+ nodemask_t cpuset_mems_allowed;
|
|
+ nodemask_t user_nodemask;
|
|
+ } w;
|
|
+};
|
|
+
|
|
+struct linux_binprm;
|
|
+
|
|
+struct coredump_params;
|
|
+
|
|
+struct linux_binfmt {
|
|
+ struct list_head lh;
|
|
+ struct module *module;
|
|
+ int (*load_binary)(struct linux_binprm *);
|
|
+ int (*load_shlib)(struct file *);
|
|
+ int (*core_dump)(struct coredump_params *);
|
|
+ long unsigned int min_coredump;
|
|
+};
|
|
+
|
|
+struct va_alignment {
|
|
+ int flags;
|
|
+ long unsigned int mask;
|
|
+ long unsigned int bits;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+typedef __u64 Elf64_Addr;
|
|
+
|
|
+typedef __u16 Elf64_Half;
|
|
+
|
|
+typedef __u64 Elf64_Off;
|
|
+
|
|
+typedef __u32 Elf64_Word;
|
|
+
|
|
+typedef __u64 Elf64_Xword;
|
|
+
|
|
+typedef __s64 Elf64_Sxword;
|
|
+
|
|
+typedef struct {
|
|
+ Elf64_Sxword d_tag;
|
|
+ union {
|
|
+ Elf64_Xword d_val;
|
|
+ Elf64_Addr d_ptr;
|
|
+ } d_un;
|
|
+} Elf64_Dyn;
|
|
+
|
|
+struct elf64_sym {
|
|
+ Elf64_Word st_name;
|
|
+ unsigned char st_info;
|
|
+ unsigned char st_other;
|
|
+ Elf64_Half st_shndx;
|
|
+ Elf64_Addr st_value;
|
|
+ Elf64_Xword st_size;
|
|
+};
|
|
+
|
|
+typedef struct elf64_sym Elf64_Sym;
|
|
+
|
|
+struct elf64_hdr {
|
|
+ unsigned char e_ident[16];
|
|
+ Elf64_Half e_type;
|
|
+ Elf64_Half e_machine;
|
|
+ Elf64_Word e_version;
|
|
+ Elf64_Addr e_entry;
|
|
+ Elf64_Off e_phoff;
|
|
+ Elf64_Off e_shoff;
|
|
+ Elf64_Word e_flags;
|
|
+ Elf64_Half e_ehsize;
|
|
+ Elf64_Half e_phentsize;
|
|
+ Elf64_Half e_phnum;
|
|
+ Elf64_Half e_shentsize;
|
|
+ Elf64_Half e_shnum;
|
|
+ Elf64_Half e_shstrndx;
|
|
+};
|
|
+
|
|
+typedef struct elf64_hdr Elf64_Ehdr;
|
|
+
|
|
+struct elf64_shdr {
|
|
+ Elf64_Word sh_name;
|
|
+ Elf64_Word sh_type;
|
|
+ Elf64_Xword sh_flags;
|
|
+ Elf64_Addr sh_addr;
|
|
+ Elf64_Off sh_offset;
|
|
+ Elf64_Xword sh_size;
|
|
+ Elf64_Word sh_link;
|
|
+ Elf64_Word sh_info;
|
|
+ Elf64_Xword sh_addralign;
|
|
+ Elf64_Xword sh_entsize;
|
|
+};
|
|
+
|
|
+typedef struct elf64_shdr Elf64_Shdr;
|
|
+
|
|
+struct seq_file {
|
|
+ char *buf;
|
|
+ size_t size;
|
|
+ size_t from;
|
|
+ size_t count;
|
|
+ size_t pad_until;
|
|
+ loff_t index;
|
|
+ loff_t read_pos;
|
|
+ u64 version;
|
|
+ struct mutex lock;
|
|
+ const struct seq_operations___2 *op;
|
|
+ int poll_event;
|
|
+ const struct file *file;
|
|
+ void *private;
|
|
+};
|
|
+
|
|
+struct kernel_param;
|
|
+
|
|
+struct kernel_param_ops {
|
|
+ unsigned int flags;
|
|
+ int (*set)(const char *, const struct kernel_param *);
|
|
+ int (*get)(char *, const struct kernel_param *);
|
|
+ void (*free)(void *);
|
|
+};
|
|
+
|
|
+struct kparam_string;
|
|
+
|
|
+struct kparam_array;
|
|
+
|
|
+struct kernel_param {
|
|
+ const char *name;
|
|
+ struct module *mod;
|
|
+ const struct kernel_param_ops *ops;
|
|
+ const u16 perm;
|
|
+ s8 level;
|
|
+ u8 flags;
|
|
+ union {
|
|
+ void *arg;
|
|
+ const struct kparam_string *str;
|
|
+ const struct kparam_array *arr;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct kparam_string {
|
|
+ unsigned int maxlen;
|
|
+ char *string;
|
|
+};
|
|
+
|
|
+struct kparam_array {
|
|
+ unsigned int max;
|
|
+ unsigned int elemsize;
|
|
+ unsigned int *num;
|
|
+ const struct kernel_param_ops *ops;
|
|
+ void *elem;
|
|
+};
|
|
+
|
|
+enum module_state {
|
|
+ MODULE_STATE_LIVE = 0,
|
|
+ MODULE_STATE_COMING = 1,
|
|
+ MODULE_STATE_GOING = 2,
|
|
+ MODULE_STATE_UNFORMED = 3,
|
|
+};
|
|
+
|
|
+struct module_param_attrs;
|
|
+
|
|
+struct module_kobject {
|
|
+ struct kobject kobj;
|
|
+ struct module *mod;
|
|
+ struct kobject *drivers_dir;
|
|
+ struct module_param_attrs *mp;
|
|
+ struct completion *kobj_completion;
|
|
+};
|
|
+
|
|
+struct latch_tree_node {
|
|
+ struct rb_node node[2];
|
|
+};
|
|
+
|
|
+struct mod_tree_node {
|
|
+ struct module *mod;
|
|
+ struct latch_tree_node node;
|
|
+};
|
|
+
|
|
+struct module_layout {
|
|
+ void *base;
|
|
+ unsigned int size;
|
|
+ unsigned int text_size;
|
|
+ unsigned int ro_size;
|
|
+ unsigned int ro_after_init_size;
|
|
+ struct mod_tree_node mtn;
|
|
+};
|
|
+
|
|
+struct mod_arch_specific {
|
|
+ unsigned int num_orcs;
|
|
+ int *orc_unwind_ip;
|
|
+ struct orc_entry *orc_unwind;
|
|
+};
|
|
+
|
|
+struct mod_kallsyms {
|
|
+ Elf64_Sym *symtab;
|
|
+ unsigned int num_symtab;
|
|
+ char *strtab;
|
|
+};
|
|
+
|
|
+enum MODULE_KLP_REL_STATE {
|
|
+ MODULE_KLP_REL_NONE = 0,
|
|
+ MODULE_KLP_REL_UNDO = 1,
|
|
+ MODULE_KLP_REL_DONE = 2,
|
|
+};
|
|
+
|
|
+struct module_attribute;
|
|
+
|
|
+struct module_sect_attrs;
|
|
+
|
|
+struct module_notes_attrs;
|
|
+
|
|
+struct trace_eval_map;
|
|
+
|
|
+struct klp_modinfo;
|
|
+
|
|
+struct error_injection_entry;
|
|
+
|
|
+struct module {
|
|
+ enum module_state state;
|
|
+ struct list_head list;
|
|
+ char name[56];
|
|
+ struct module_kobject mkobj;
|
|
+ struct module_attribute *modinfo_attrs;
|
|
+ const char *version;
|
|
+ const char *srcversion;
|
|
+ struct kobject *holders_dir;
|
|
+ const struct kernel_symbol *syms;
|
|
+ const s32 *crcs;
|
|
+ unsigned int num_syms;
|
|
+ struct mutex param_lock;
|
|
+ struct kernel_param *kp;
|
|
+ unsigned int num_kp;
|
|
+ unsigned int num_gpl_syms;
|
|
+ const struct kernel_symbol *gpl_syms;
|
|
+ const s32 *gpl_crcs;
|
|
+ bool sig_ok;
|
|
+ bool async_probe_requested;
|
|
+ const struct kernel_symbol *gpl_future_syms;
|
|
+ const s32 *gpl_future_crcs;
|
|
+ unsigned int num_gpl_future_syms;
|
|
+ unsigned int num_exentries;
|
|
+ struct exception_table_entry *extable;
|
|
+ int (*init)();
|
|
+ long: 64;
|
|
+ struct module_layout core_layout;
|
|
+ struct module_layout init_layout;
|
|
+ struct mod_arch_specific arch;
|
|
+ long unsigned int taints;
|
|
+ unsigned int num_bugs;
|
|
+ struct list_head bug_list;
|
|
+ struct bug_entry *bug_table;
|
|
+ struct mod_kallsyms *kallsyms;
|
|
+ struct mod_kallsyms core_kallsyms;
|
|
+ struct module_sect_attrs *sect_attrs;
|
|
+ struct module_notes_attrs *notes_attrs;
|
|
+ char *args;
|
|
+ void *percpu;
|
|
+ unsigned int percpu_size;
|
|
+ unsigned int num_tracepoints;
|
|
+ tracepoint_ptr_t *tracepoints_ptrs;
|
|
+ unsigned int num_bpf_raw_events;
|
|
+ struct bpf_raw_event_map *bpf_raw_events;
|
|
+ struct jump_entry *jump_entries;
|
|
+ unsigned int num_jump_entries;
|
|
+ unsigned int num_trace_bprintk_fmt;
|
|
+ const char **trace_bprintk_fmt_start;
|
|
+ struct trace_event_call **trace_events;
|
|
+ unsigned int num_trace_events;
|
|
+ struct trace_eval_map **trace_evals;
|
|
+ unsigned int num_trace_evals;
|
|
+ unsigned int num_ftrace_callsites;
|
|
+ long unsigned int *ftrace_callsites;
|
|
+ bool klp;
|
|
+ bool klp_alive;
|
|
+ struct klp_modinfo *klp_info;
|
|
+ struct list_head source_list;
|
|
+ struct list_head target_list;
|
|
+ void (*exit)();
|
|
+ atomic_t refcnt;
|
|
+ struct error_injection_entry *ei_funcs;
|
|
+ unsigned int num_ei_funcs;
|
|
+ union {
|
|
+ enum MODULE_KLP_REL_STATE klp_rel_state;
|
|
+ long int klp_rel_state_KABI;
|
|
+ };
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct error_injection_entry {
|
|
+ long unsigned int addr;
|
|
+ int etype;
|
|
+};
|
|
+
|
|
+struct module_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *);
|
|
+ ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t);
|
|
+ void (*setup)(struct module *, const char *);
|
|
+ int (*test)(struct module *);
|
|
+ void (*free)(struct module *);
|
|
+};
|
|
+
|
|
+struct klp_modinfo {
|
|
+ Elf64_Ehdr hdr;
|
|
+ Elf64_Shdr *sechdrs;
|
|
+ char *secstrings;
|
|
+ unsigned int symndx;
|
|
+};
|
|
+
|
|
+struct trace_event_functions;
|
|
+
|
|
+struct trace_event {
|
|
+ struct hlist_node node;
|
|
+ struct list_head list;
|
|
+ int type;
|
|
+ struct trace_event_functions *funcs;
|
|
+};
|
|
+
|
|
+struct trace_event_class;
|
|
+
|
|
+struct bpf_prog_array;
|
|
+
|
|
+struct trace_event_call {
|
|
+ struct list_head list;
|
|
+ struct trace_event_class *class;
|
|
+ union {
|
|
+ char *name;
|
|
+ struct tracepoint *tp;
|
|
+ };
|
|
+ struct trace_event event;
|
|
+ char *print_fmt;
|
|
+ struct event_filter *filter;
|
|
+ void *mod;
|
|
+ void *data;
|
|
+ int flags;
|
|
+ int perf_refcount;
|
|
+ struct hlist_head *perf_events;
|
|
+ struct bpf_prog_array *prog_array;
|
|
+ int (*perf_perm)(struct trace_event_call *, struct perf_event *);
|
|
+};
|
|
+
|
|
+struct trace_eval_map {
|
|
+ const char *system;
|
|
+ const char *eval_string;
|
|
+ long unsigned int eval_value;
|
|
+};
|
|
+
|
|
+struct fs_pin;
|
|
+
|
|
+struct pid_namespace {
|
|
+ struct kref kref;
|
|
+ struct idr idr;
|
|
+ struct callback_head rcu;
|
|
+ unsigned int pid_allocated;
|
|
+ struct task_struct *child_reaper;
|
|
+ struct kmem_cache *pid_cachep;
|
|
+ unsigned int level;
|
|
+ struct pid_namespace *parent;
|
|
+ struct vfsmount *proc_mnt;
|
|
+ struct dentry *proc_self;
|
|
+ struct dentry *proc_thread_self;
|
|
+ struct fs_pin *bacct;
|
|
+ struct user_namespace *user_ns;
|
|
+ struct ucounts *ucounts;
|
|
+ struct work_struct proc_work;
|
|
+ kgid_t pid_gid;
|
|
+ int hide_pid;
|
|
+ int pid_max;
|
|
+ int reboot;
|
|
+ struct ns_common ns;
|
|
+};
|
|
+
|
|
+struct rlimit {
|
|
+ __kernel_ulong_t rlim_cur;
|
|
+ __kernel_ulong_t rlim_max;
|
|
+};
|
|
+
|
|
+typedef void __signalfn_t(int);
|
|
+
|
|
+typedef __signalfn_t *__sighandler_t;
|
|
+
|
|
+typedef void __restorefn_t();
|
|
+
|
|
+typedef __restorefn_t *__sigrestore_t;
|
|
+
|
|
+struct user_struct {
|
|
+ refcount_t __count;
|
|
+ atomic_t processes;
|
|
+ atomic_t sigpending;
|
|
+ atomic_t fanotify_listeners;
|
|
+ atomic_long_t epoll_watches;
|
|
+ long unsigned int mq_bytes;
|
|
+ long unsigned int locked_shm;
|
|
+ long unsigned int unix_inflight;
|
|
+ atomic_long_t pipe_bufs;
|
|
+ struct key *uid_keyring;
|
|
+ struct key *session_keyring;
|
|
+ struct hlist_node uidhash_node;
|
|
+ kuid_t uid;
|
|
+ atomic_long_t locked_vm;
|
|
+ struct ratelimit_state ratelimit;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct sigaction {
|
|
+ __sighandler_t sa_handler;
|
|
+ long unsigned int sa_flags;
|
|
+ __sigrestore_t sa_restorer;
|
|
+ sigset_t sa_mask;
|
|
+};
|
|
+
|
|
+struct k_sigaction {
|
|
+ struct sigaction sa;
|
|
+};
|
|
+
|
|
+struct uts_namespace;
|
|
+
|
|
+struct ipc_namespace;
|
|
+
|
|
+struct mnt_namespace;
|
|
+
|
|
+struct net;
|
|
+
|
|
+struct cgroup_namespace;
|
|
+
|
|
+struct nsproxy {
|
|
+ atomic_t count;
|
|
+ struct uts_namespace *uts_ns;
|
|
+ struct ipc_namespace *ipc_ns;
|
|
+ struct mnt_namespace *mnt_ns;
|
|
+ struct pid_namespace *pid_ns_for_children;
|
|
+ struct net *net_ns;
|
|
+ struct cgroup_namespace *cgroup_ns;
|
|
+};
|
|
+
|
|
+struct cpu_itimer {
|
|
+ u64 expires;
|
|
+ u64 incr;
|
|
+};
|
|
+
|
|
+struct task_cputime_atomic {
|
|
+ atomic64_t utime;
|
|
+ atomic64_t stime;
|
|
+ atomic64_t sum_exec_runtime;
|
|
+};
|
|
+
|
|
+struct thread_group_cputimer {
|
|
+ struct task_cputime_atomic cputime_atomic;
|
|
+ bool running;
|
|
+ bool checking_timer;
|
|
+};
|
|
+
|
|
+struct pacct_struct {
|
|
+ int ac_flag;
|
|
+ long int ac_exitcode;
|
|
+ long unsigned int ac_mem;
|
|
+ u64 ac_utime;
|
|
+ u64 ac_stime;
|
|
+ long unsigned int ac_minflt;
|
|
+ long unsigned int ac_majflt;
|
|
+};
|
|
+
|
|
+struct tty_struct;
|
|
+
|
|
+struct autogroup;
|
|
+
|
|
+struct taskstats;
|
|
+
|
|
+struct tty_audit_buf;
|
|
+
|
|
+struct signal_struct {
|
|
+ atomic_t sigcnt;
|
|
+ atomic_t live;
|
|
+ int nr_threads;
|
|
+ struct list_head thread_head;
|
|
+ wait_queue_head_t wait_chldexit;
|
|
+ struct task_struct *curr_target;
|
|
+ struct sigpending shared_pending;
|
|
+ struct hlist_head multiprocess;
|
|
+ int group_exit_code;
|
|
+ int notify_count;
|
|
+ struct task_struct *group_exit_task;
|
|
+ int group_stop_count;
|
|
+ unsigned int flags;
|
|
+ unsigned int is_child_subreaper: 1;
|
|
+ unsigned int has_child_subreaper: 1;
|
|
+ int posix_timer_id;
|
|
+ struct list_head posix_timers;
|
|
+ struct hrtimer real_timer;
|
|
+ ktime_t it_real_incr;
|
|
+ struct cpu_itimer it[2];
|
|
+ struct thread_group_cputimer cputimer;
|
|
+ struct task_cputime cputime_expires;
|
|
+ struct list_head cpu_timers[3];
|
|
+ struct pid *pids[4];
|
|
+ atomic_t tick_dep_mask;
|
|
+ struct pid *tty_old_pgrp;
|
|
+ int leader;
|
|
+ struct tty_struct *tty;
|
|
+ struct autogroup *autogroup;
|
|
+ seqlock_t stats_lock;
|
|
+ u64 utime;
|
|
+ u64 stime;
|
|
+ u64 cutime;
|
|
+ u64 cstime;
|
|
+ u64 gtime;
|
|
+ u64 cgtime;
|
|
+ struct prev_cputime prev_cputime;
|
|
+ long unsigned int nvcsw;
|
|
+ long unsigned int nivcsw;
|
|
+ long unsigned int cnvcsw;
|
|
+ long unsigned int cnivcsw;
|
|
+ long unsigned int min_flt;
|
|
+ long unsigned int maj_flt;
|
|
+ long unsigned int cmin_flt;
|
|
+ long unsigned int cmaj_flt;
|
|
+ long unsigned int inblock;
|
|
+ long unsigned int oublock;
|
|
+ long unsigned int cinblock;
|
|
+ long unsigned int coublock;
|
|
+ long unsigned int maxrss;
|
|
+ long unsigned int cmaxrss;
|
|
+ struct task_io_accounting ioac;
|
|
+ long long unsigned int sum_sched_runtime;
|
|
+ struct rlimit rlim[16];
|
|
+ struct pacct_struct pacct;
|
|
+ struct taskstats *stats;
|
|
+ unsigned int audit_tty;
|
|
+ struct tty_audit_buf *tty_audit_buf;
|
|
+ bool oom_flag_origin;
|
|
+ short int oom_score_adj;
|
|
+ short int oom_score_adj_min;
|
|
+ struct mm_struct *oom_mm;
|
|
+ struct mutex cred_guard_mutex;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct sighand_struct {
|
|
+ atomic_t count;
|
|
+ struct k_sigaction action[64];
|
|
+ spinlock_t siglock;
|
|
+ wait_queue_head_t signalfd_wqh;
|
|
+};
|
|
+
|
|
+struct bio;
|
|
+
|
|
+struct bio_list {
|
|
+ struct bio *head;
|
|
+ struct bio *tail;
|
|
+};
|
|
+
|
|
+struct blk_plug {
|
|
+ struct list_head list;
|
|
+ struct list_head mq_list;
|
|
+ struct list_head cb_list;
|
|
+};
|
|
+
|
|
+struct reclaim_state {
|
|
+ long unsigned int reclaimed_slab;
|
|
+};
|
|
+
|
|
+typedef int congested_fn(void *, int);
|
|
+
|
|
+struct fprop_local_percpu {
|
|
+ struct percpu_counter events;
|
|
+ unsigned int period;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+enum wb_reason {
|
|
+ WB_REASON_BACKGROUND = 0,
|
|
+ WB_REASON_VMSCAN = 1,
|
|
+ WB_REASON_SYNC = 2,
|
|
+ WB_REASON_PERIODIC = 3,
|
|
+ WB_REASON_LAPTOP_TIMER = 4,
|
|
+ WB_REASON_FREE_MORE_MEM = 5,
|
|
+ WB_REASON_FS_FREE_SPACE = 6,
|
|
+ WB_REASON_FORKER_THREAD = 7,
|
|
+ WB_REASON_MAX = 8,
|
|
+};
|
|
+
|
|
+struct bdi_writeback_congested;
|
|
+
|
|
+struct bdi_writeback {
|
|
+ struct backing_dev_info *bdi;
|
|
+ long unsigned int state;
|
|
+ long unsigned int last_old_flush;
|
|
+ struct list_head b_dirty;
|
|
+ struct list_head b_io;
|
|
+ struct list_head b_more_io;
|
|
+ struct list_head b_dirty_time;
|
|
+ spinlock_t list_lock;
|
|
+ struct percpu_counter stat[4];
|
|
+ struct bdi_writeback_congested *congested;
|
|
+ long unsigned int bw_time_stamp;
|
|
+ long unsigned int dirtied_stamp;
|
|
+ long unsigned int written_stamp;
|
|
+ long unsigned int write_bandwidth;
|
|
+ long unsigned int avg_write_bandwidth;
|
|
+ long unsigned int dirty_ratelimit;
|
|
+ long unsigned int balanced_dirty_ratelimit;
|
|
+ struct fprop_local_percpu completions;
|
|
+ int dirty_exceeded;
|
|
+ enum wb_reason start_all_reason;
|
|
+ spinlock_t work_lock;
|
|
+ struct list_head work_list;
|
|
+ struct delayed_work dwork;
|
|
+ long unsigned int dirty_sleep;
|
|
+ struct list_head bdi_node;
|
|
+ struct percpu_ref refcnt;
|
|
+ struct fprop_local_percpu memcg_completions;
|
|
+ struct cgroup_subsys_state *memcg_css;
|
|
+ struct cgroup_subsys_state *blkcg_css;
|
|
+ struct list_head memcg_node;
|
|
+ struct list_head blkcg_node;
|
|
+ union {
|
|
+ struct work_struct release_work;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct rcu_device;
|
|
+
|
|
+struct backing_dev_info {
|
|
+ struct list_head bdi_list;
|
|
+ long unsigned int ra_pages;
|
|
+ long unsigned int io_pages;
|
|
+ congested_fn *congested_fn;
|
|
+ void *congested_data;
|
|
+ const char *name;
|
|
+ struct kref refcnt;
|
|
+ unsigned int capabilities;
|
|
+ unsigned int min_ratio;
|
|
+ unsigned int max_ratio;
|
|
+ unsigned int max_prop_frac;
|
|
+ atomic_long_t tot_write_bandwidth;
|
|
+ struct bdi_writeback wb;
|
|
+ struct list_head wb_list;
|
|
+ struct radix_tree_root cgwb_tree;
|
|
+ struct rb_root cgwb_congested_tree;
|
|
+ struct mutex cgwb_release_mutex;
|
|
+ struct rw_semaphore wb_switch_rwsem;
|
|
+ wait_queue_head_t wb_waitq;
|
|
+ union {
|
|
+ struct rcu_device *rcu_dev;
|
|
+ struct device *dev;
|
|
+ };
|
|
+ struct device *owner;
|
|
+ struct timer_list laptop_mode_wb_timer;
|
|
+ struct dentry *debug_dir;
|
|
+ struct dentry *debug_stats;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct css_set {
|
|
+ struct cgroup_subsys_state *subsys[14];
|
|
+ refcount_t refcount;
|
|
+ struct css_set *dom_cset;
|
|
+ struct cgroup *dfl_cgrp;
|
|
+ int nr_tasks;
|
|
+ struct list_head tasks;
|
|
+ struct list_head mg_tasks;
|
|
+ struct list_head dying_tasks;
|
|
+ struct list_head task_iters;
|
|
+ struct list_head e_cset_node[14];
|
|
+ struct list_head threaded_csets;
|
|
+ struct list_head threaded_csets_node;
|
|
+ struct hlist_node hlist;
|
|
+ struct list_head cgrp_links;
|
|
+ struct list_head mg_preload_node;
|
|
+ struct list_head mg_node;
|
|
+ struct cgroup *mg_src_cgrp;
|
|
+ struct cgroup *mg_dst_cgrp;
|
|
+ struct css_set *mg_dst_cset;
|
|
+ bool dead;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+typedef u32 compat_uptr_t;
|
|
+
|
|
+struct compat_robust_list {
|
|
+ compat_uptr_t next;
|
|
+};
|
|
+
|
|
+typedef s32 compat_long_t;
|
|
+
|
|
+struct compat_robust_list_head {
|
|
+ struct compat_robust_list list;
|
|
+ compat_long_t futex_offset;
|
|
+ compat_uptr_t list_op_pending;
|
|
+};
|
|
+
|
|
+struct perf_event_groups {
|
|
+ struct rb_root tree;
|
|
+ u64 index;
|
|
+};
|
|
+
|
|
+struct perf_event_context {
|
|
+ struct pmu *pmu;
|
|
+ raw_spinlock_t lock;
|
|
+ struct mutex mutex;
|
|
+ struct list_head active_ctx_list;
|
|
+ struct perf_event_groups pinned_groups;
|
|
+ struct perf_event_groups flexible_groups;
|
|
+ struct list_head event_list;
|
|
+ struct list_head pinned_active;
|
|
+ struct list_head flexible_active;
|
|
+ int nr_events;
|
|
+ int nr_active;
|
|
+ int is_active;
|
|
+ int nr_stat;
|
|
+ int nr_freq;
|
|
+ int rotate_disable;
|
|
+ atomic_t refcount;
|
|
+ struct task_struct *task;
|
|
+ u64 time;
|
|
+ u64 timestamp;
|
|
+ struct perf_event_context *parent_ctx;
|
|
+ u64 parent_gen;
|
|
+ u64 generation;
|
|
+ int pin_count;
|
|
+ int nr_cgroups;
|
|
+ void *task_ctx_data;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+struct task_delay_info {
|
|
+ raw_spinlock_t lock;
|
|
+ unsigned int flags;
|
|
+ u64 blkio_start;
|
|
+ u64 blkio_delay;
|
|
+ u64 swapin_delay;
|
|
+ u32 blkio_count;
|
|
+ u32 swapin_count;
|
|
+ u64 freepages_start;
|
|
+ u64 freepages_delay;
|
|
+ u32 freepages_count;
|
|
+};
|
|
+
|
|
+struct ftrace_ret_stack {
|
|
+ long unsigned int ret;
|
|
+ long unsigned int func;
|
|
+ long long unsigned int calltime;
|
|
+ long long unsigned int subtime;
|
|
+ long unsigned int *retp;
|
|
+};
|
|
+
|
|
+struct blkcg_gq;
|
|
+
|
|
+struct mempool_s;
|
|
+
|
|
+typedef struct mempool_s mempool_t;
|
|
+
|
|
+struct request_list {
|
|
+ struct request_queue *q;
|
|
+ struct blkcg_gq *blkg;
|
|
+ int count[2];
|
|
+ int starved[2];
|
|
+ mempool_t *rq_pool;
|
|
+ wait_queue_head_t wait[2];
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+typedef void request_fn_proc(struct request_queue *);
|
|
+
|
|
+typedef unsigned int blk_qc_t;
|
|
+
|
|
+typedef blk_qc_t make_request_fn(struct request_queue *, struct bio *);
|
|
+
|
|
+typedef bool poll_q_fn(struct request_queue *, blk_qc_t);
|
|
+
|
|
+struct request;
|
|
+
|
|
+typedef int prep_rq_fn(struct request_queue *, struct request *);
|
|
+
|
|
+typedef void unprep_rq_fn(struct request_queue *, struct request *);
|
|
+
|
|
+typedef void softirq_done_fn(struct request *);
|
|
+
|
|
+enum blk_eh_timer_return {
|
|
+ BLK_EH_DONE = 0,
|
|
+ BLK_EH_RESET_TIMER = 1,
|
|
+};
|
|
+
|
|
+typedef enum blk_eh_timer_return rq_timed_out_fn(struct request *);
|
|
+
|
|
+typedef int dma_drain_needed_fn(struct request *);
|
|
+
|
|
+typedef int lld_busy_fn(struct request_queue *);
|
|
+
|
|
+typedef int init_rq_fn(struct request_queue *, struct request *, gfp_t);
|
|
+
|
|
+typedef void exit_rq_fn(struct request_queue *, struct request *);
|
|
+
|
|
+struct blk_integrity_profile;
|
|
+
|
|
+struct blk_integrity {
|
|
+ const struct blk_integrity_profile *profile;
|
|
+ unsigned char flags;
|
|
+ unsigned char tuple_size;
|
|
+ unsigned char interval_exp;
|
|
+ unsigned char tag_size;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct blk_rq_stat {
|
|
+ u64 mean;
|
|
+ u64 min;
|
|
+ u64 max;
|
|
+ u32 nr_samples;
|
|
+ u64 batch;
|
|
+};
|
|
+
|
|
+enum blk_zoned_model {
|
|
+ BLK_ZONED_NONE = 0,
|
|
+ BLK_ZONED_HA = 1,
|
|
+ BLK_ZONED_HM = 2,
|
|
+};
|
|
+
|
|
+struct queue_limits {
|
|
+ long unsigned int bounce_pfn;
|
|
+ long unsigned int seg_boundary_mask;
|
|
+ long unsigned int virt_boundary_mask;
|
|
+ unsigned int max_hw_sectors;
|
|
+ unsigned int max_dev_sectors;
|
|
+ unsigned int chunk_sectors;
|
|
+ unsigned int max_sectors;
|
|
+ unsigned int max_segment_size;
|
|
+ unsigned int physical_block_size;
|
|
+ unsigned int alignment_offset;
|
|
+ unsigned int io_min;
|
|
+ unsigned int io_opt;
|
|
+ unsigned int max_discard_sectors;
|
|
+ unsigned int max_hw_discard_sectors;
|
|
+ unsigned int max_write_same_sectors;
|
|
+ unsigned int max_write_zeroes_sectors;
|
|
+ unsigned int discard_granularity;
|
|
+ unsigned int discard_alignment;
|
|
+ short unsigned int logical_block_size;
|
|
+ short unsigned int max_segments;
|
|
+ short unsigned int max_integrity_segments;
|
|
+ short unsigned int max_discard_segments;
|
|
+ unsigned char misaligned;
|
|
+ unsigned char discard_misaligned;
|
|
+ unsigned char cluster;
|
|
+ unsigned char raid_partial_stripes_expensive;
|
|
+ enum blk_zoned_model zoned;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct bsg_job;
|
|
+
|
|
+typedef int bsg_job_fn(struct bsg_job *);
|
|
+
|
|
+struct bsg_ops;
|
|
+
|
|
+struct bsg_class_device {
|
|
+ struct device *class_dev;
|
|
+ int minor;
|
|
+ struct request_queue *queue;
|
|
+ const struct bsg_ops *ops;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+typedef void *mempool_alloc_t(gfp_t, void *);
|
|
+
|
|
+typedef void mempool_free_t(void *, void *);
|
|
+
|
|
+struct mempool_s {
|
|
+ spinlock_t lock;
|
|
+ int min_nr;
|
|
+ int curr_nr;
|
|
+ void **elements;
|
|
+ void *pool_data;
|
|
+ mempool_alloc_t *alloc;
|
|
+ mempool_free_t *free;
|
|
+ wait_queue_head_t wait;
|
|
+};
|
|
+
|
|
+struct bio_set {
|
|
+ struct kmem_cache *bio_slab;
|
|
+ unsigned int front_pad;
|
|
+ mempool_t bio_pool;
|
|
+ mempool_t bvec_pool;
|
|
+ mempool_t bio_integrity_pool;
|
|
+ mempool_t bvec_integrity_pool;
|
|
+ spinlock_t rescue_lock;
|
|
+ struct bio_list rescue_list;
|
|
+ struct work_struct rescue_work;
|
|
+ struct workqueue_struct *rescue_workqueue;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct elevator_queue;
|
|
+
|
|
+struct blk_queue_stats;
|
|
+
|
|
+struct rq_qos;
|
|
+
|
|
+struct blk_mq_ops;
|
|
+
|
|
+struct blk_mq_ctx;
|
|
+
|
|
+struct blk_mq_hw_ctx;
|
|
+
|
|
+struct blk_queue_tag;
|
|
+
|
|
+struct blk_stat_callback;
|
|
+
|
|
+struct blk_trace;
|
|
+
|
|
+struct blk_flush_queue;
|
|
+
|
|
+struct throtl_data;
|
|
+
|
|
+struct blk_mq_tag_set;
|
|
+
|
|
+struct request_queue {
|
|
+ struct list_head queue_head;
|
|
+ struct request *last_merge;
|
|
+ struct elevator_queue *elevator;
|
|
+ int nr_rqs[2];
|
|
+ int nr_rqs_elvpriv;
|
|
+ struct blk_queue_stats *stats;
|
|
+ struct rq_qos *rq_qos;
|
|
+ struct request_list root_rl;
|
|
+ request_fn_proc *request_fn;
|
|
+ make_request_fn *make_request_fn;
|
|
+ poll_q_fn *poll_fn;
|
|
+ prep_rq_fn *prep_rq_fn;
|
|
+ unprep_rq_fn *unprep_rq_fn;
|
|
+ softirq_done_fn *softirq_done_fn;
|
|
+ rq_timed_out_fn *rq_timed_out_fn;
|
|
+ dma_drain_needed_fn *dma_drain_needed;
|
|
+ lld_busy_fn *lld_busy_fn;
|
|
+ init_rq_fn *init_rq_fn;
|
|
+ exit_rq_fn *exit_rq_fn;
|
|
+ void (*initialize_rq_fn)(struct request *);
|
|
+ const struct blk_mq_ops *mq_ops;
|
|
+ unsigned int *mq_map;
|
|
+ struct blk_mq_ctx *queue_ctx;
|
|
+ unsigned int nr_queues;
|
|
+ unsigned int queue_depth;
|
|
+ struct blk_mq_hw_ctx **queue_hw_ctx;
|
|
+ unsigned int nr_hw_queues;
|
|
+ sector_t end_sector;
|
|
+ struct request *boundary_rq;
|
|
+ struct delayed_work delay_work;
|
|
+ struct backing_dev_info *backing_dev_info;
|
|
+ void *queuedata;
|
|
+ long unsigned int queue_flags;
|
|
+ atomic_t pm_only;
|
|
+ int id;
|
|
+ gfp_t bounce_gfp;
|
|
+ spinlock_t __queue_lock;
|
|
+ spinlock_t *queue_lock;
|
|
+ struct kobject kobj;
|
|
+ struct kobject *mq_kobj;
|
|
+ struct blk_integrity integrity;
|
|
+ struct device *dev;
|
|
+ int rpm_status;
|
|
+ unsigned int nr_pending;
|
|
+ long unsigned int nr_requests;
|
|
+ unsigned int nr_congestion_on;
|
|
+ unsigned int nr_congestion_off;
|
|
+ unsigned int nr_batching;
|
|
+ unsigned int dma_drain_size;
|
|
+ void *dma_drain_buffer;
|
|
+ unsigned int dma_pad_mask;
|
|
+ unsigned int dma_alignment;
|
|
+ struct blk_queue_tag *queue_tags;
|
|
+ unsigned int nr_sorted;
|
|
+ unsigned int in_flight[2];
|
|
+ unsigned int request_fn_active;
|
|
+ unsigned int rq_timeout;
|
|
+ int poll_nsec;
|
|
+ struct blk_stat_callback *poll_cb;
|
|
+ struct blk_rq_stat poll_stat[16];
|
|
+ struct timer_list timeout;
|
|
+ struct work_struct timeout_work;
|
|
+ struct list_head timeout_list;
|
|
+ struct list_head icq_list;
|
|
+ long unsigned int blkcg_pols[1];
|
|
+ struct blkcg_gq *root_blkg;
|
|
+ struct list_head blkg_list;
|
|
+ struct queue_limits limits;
|
|
+ unsigned int sg_timeout;
|
|
+ unsigned int sg_reserved_size;
|
|
+ int node;
|
|
+ struct blk_trace *blk_trace;
|
|
+ struct mutex blk_trace_mutex;
|
|
+ struct blk_flush_queue *fq;
|
|
+ struct list_head requeue_list;
|
|
+ spinlock_t requeue_lock;
|
|
+ struct delayed_work requeue_work;
|
|
+ struct mutex sysfs_lock;
|
|
+ struct list_head unused_hctx_list;
|
|
+ spinlock_t unused_hctx_lock;
|
|
+ int bypass_depth;
|
|
+ atomic_t mq_freeze_depth;
|
|
+ bsg_job_fn *bsg_job_fn;
|
|
+ struct bsg_class_device bsg_dev;
|
|
+ struct throtl_data *td;
|
|
+ struct callback_head callback_head;
|
|
+ wait_queue_head_t mq_freeze_wq;
|
|
+ struct percpu_ref q_usage_counter;
|
|
+ struct list_head all_q_node;
|
|
+ struct blk_mq_tag_set *tag_set;
|
|
+ struct list_head tag_set_list;
|
|
+ struct bio_set bio_split;
|
|
+ struct dentry *debugfs_dir;
|
|
+ struct dentry *sched_debugfs_dir;
|
|
+ bool mq_sysfs_init_done;
|
|
+ size_t cmd_size;
|
|
+ void *rq_alloc_data;
|
|
+ struct work_struct release_work;
|
|
+ u64 write_hints[5];
|
|
+};
|
|
+
|
|
+union thread_union {
|
|
+ struct task_struct task;
|
|
+ long unsigned int stack[2048];
|
|
+};
|
|
+
|
|
+enum writeback_sync_modes {
|
|
+ WB_SYNC_NONE = 0,
|
|
+ WB_SYNC_ALL = 1,
|
|
+};
|
|
+
|
|
+struct writeback_control {
|
|
+ long int nr_to_write;
|
|
+ long int pages_skipped;
|
|
+ loff_t range_start;
|
|
+ loff_t range_end;
|
|
+ enum writeback_sync_modes sync_mode;
|
|
+ unsigned int for_kupdate: 1;
|
|
+ unsigned int for_background: 1;
|
|
+ unsigned int tagged_writepages: 1;
|
|
+ unsigned int for_reclaim: 1;
|
|
+ unsigned int range_cyclic: 1;
|
|
+ unsigned int for_sync: 1;
|
|
+ struct bdi_writeback *wb;
|
|
+ struct inode *inode;
|
|
+ int wb_id;
|
|
+ int wb_lcand_id;
|
|
+ int wb_tcand_id;
|
|
+ size_t wb_bytes;
|
|
+ size_t wb_lcand_bytes;
|
|
+ size_t wb_tcand_bytes;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct iovec;
|
|
+
|
|
+struct kvec;
|
|
+
|
|
+struct bio_vec;
|
|
+
|
|
+struct iov_iter {
|
|
+ int type;
|
|
+ size_t iov_offset;
|
|
+ size_t count;
|
|
+ union {
|
|
+ const struct iovec *iov;
|
|
+ const struct kvec *kvec;
|
|
+ const struct bio_vec *bvec;
|
|
+ struct pipe_inode_info *pipe;
|
|
+ };
|
|
+ union {
|
|
+ long unsigned int nr_segs;
|
|
+ struct {
|
|
+ int idx;
|
|
+ int start_idx;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct swap_cluster_info {
|
|
+ spinlock_t lock;
|
|
+ unsigned int data: 24;
|
|
+ unsigned int flags: 8;
|
|
+};
|
|
+
|
|
+struct swap_cluster_list {
|
|
+ struct swap_cluster_info head;
|
|
+ struct swap_cluster_info tail;
|
|
+};
|
|
+
|
|
+struct swap_extent {
|
|
+ struct list_head list;
|
|
+ long unsigned int start_page;
|
|
+ long unsigned int nr_pages;
|
|
+ sector_t start_block;
|
|
+};
|
|
+
|
|
+struct percpu_cluster;
|
|
+
|
|
+struct swap_info_struct {
|
|
+ long unsigned int flags;
|
|
+ short int prio;
|
|
+ struct plist_node list;
|
|
+ signed char type;
|
|
+ unsigned int max;
|
|
+ unsigned char *swap_map;
|
|
+ struct swap_cluster_info *cluster_info;
|
|
+ struct swap_cluster_list free_clusters;
|
|
+ unsigned int lowest_bit;
|
|
+ unsigned int highest_bit;
|
|
+ unsigned int pages;
|
|
+ unsigned int inuse_pages;
|
|
+ unsigned int cluster_next;
|
|
+ unsigned int cluster_nr;
|
|
+ struct percpu_cluster *percpu_cluster;
|
|
+ struct swap_extent *curr_swap_extent;
|
|
+ struct swap_extent first_swap_extent;
|
|
+ struct block_device *bdev;
|
|
+ struct file *swap_file;
|
|
+ unsigned int old_block_size;
|
|
+ long unsigned int *frontswap_map;
|
|
+ atomic_t frontswap_pages;
|
|
+ spinlock_t lock;
|
|
+ spinlock_t cont_lock;
|
|
+ struct work_struct discard_work;
|
|
+ struct swap_cluster_list discard_clusters;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ struct plist_node avail_lists[0];
|
|
+};
|
|
+
|
|
+struct partition_meta_info;
|
|
+
|
|
+struct disk_stats;
|
|
+
|
|
+struct hd_struct {
|
|
+ sector_t start_sect;
|
|
+ sector_t nr_sects;
|
|
+ seqcount_t nr_sects_seq;
|
|
+ sector_t alignment_offset;
|
|
+ unsigned int discard_alignment;
|
|
+ struct device __dev;
|
|
+ struct kobject *holder_dir;
|
|
+ int policy;
|
|
+ int partno;
|
|
+ struct partition_meta_info *info;
|
|
+ long unsigned int stamp;
|
|
+ atomic_t in_flight[2];
|
|
+ struct disk_stats *dkstats;
|
|
+ struct percpu_ref ref;
|
|
+ struct gendisk *disk;
|
|
+ struct rcu_work rcu_work;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct disk_part_tbl;
|
|
+
|
|
+struct block_device_operations;
|
|
+
|
|
+struct timer_rand_state;
|
|
+
|
|
+struct disk_events;
|
|
+
|
|
+struct badblocks;
|
|
+
|
|
+struct gendisk {
|
|
+ int major;
|
|
+ int first_minor;
|
|
+ int minors;
|
|
+ char disk_name[32];
|
|
+ char * (*devnode)(struct gendisk *, umode_t *);
|
|
+ unsigned int events;
|
|
+ unsigned int async_events;
|
|
+ struct disk_part_tbl *part_tbl;
|
|
+ struct hd_struct part0;
|
|
+ const struct block_device_operations *fops;
|
|
+ struct request_queue *queue;
|
|
+ void *private_data;
|
|
+ int flags;
|
|
+ struct rw_semaphore lookup_sem;
|
|
+ struct kobject *slave_dir;
|
|
+ struct timer_rand_state *random;
|
|
+ atomic_t sync_io;
|
|
+ struct disk_events *ev;
|
|
+ struct kobject integrity_kobj;
|
|
+ int node_id;
|
|
+ struct badblocks *bb;
|
|
+ struct lockdep_map lockdep_map;
|
|
+ long unsigned int *user_ro_bitmap;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct cdev {
|
|
+ struct kobject kobj;
|
|
+ struct module *owner;
|
|
+ const struct file_operations *ops;
|
|
+ struct list_head list;
|
|
+ dev_t dev;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *);
|
|
+
|
|
+struct poll_table_struct {
|
|
+ poll_queue_proc _qproc;
|
|
+ __poll_t _key;
|
|
+};
|
|
+
|
|
+typedef u8 blk_status_t;
|
|
+
|
|
+struct bvec_iter {
|
|
+ sector_t bi_sector;
|
|
+ unsigned int bi_size;
|
|
+ unsigned int bi_idx;
|
|
+ unsigned int bi_done;
|
|
+ unsigned int bi_bvec_done;
|
|
+};
|
|
+
|
|
+typedef void bio_end_io_t(struct bio *);
|
|
+
|
|
+struct bio_issue {
|
|
+ u64 value;
|
|
+};
|
|
+
|
|
+struct bio_vec {
|
|
+ struct page *bv_page;
|
|
+ unsigned int bv_len;
|
|
+ unsigned int bv_offset;
|
|
+};
|
|
+
|
|
+struct bio_integrity_payload;
|
|
+
|
|
+struct bio {
|
|
+ struct bio *bi_next;
|
|
+ struct gendisk *bi_disk;
|
|
+ unsigned int bi_opf;
|
|
+ short unsigned int bi_flags;
|
|
+ short unsigned int bi_ioprio;
|
|
+ short unsigned int bi_write_hint;
|
|
+ blk_status_t bi_status;
|
|
+ u8 bi_partno;
|
|
+ unsigned int bi_phys_segments;
|
|
+ unsigned int bi_seg_front_size;
|
|
+ unsigned int bi_seg_back_size;
|
|
+ struct bvec_iter bi_iter;
|
|
+ atomic_t __bi_remaining;
|
|
+ bio_end_io_t *bi_end_io;
|
|
+ void *bi_private;
|
|
+ struct io_context *bi_ioc;
|
|
+ struct cgroup_subsys_state *bi_css;
|
|
+ struct blkcg_gq *bi_blkg;
|
|
+ struct bio_issue bi_issue;
|
|
+ union {
|
|
+ struct bio_integrity_payload *bi_integrity;
|
|
+ };
|
|
+ short unsigned int bi_vcnt;
|
|
+ short unsigned int bi_max_vecs;
|
|
+ atomic_t __bi_cnt;
|
|
+ struct bio_vec *bi_io_vec;
|
|
+ struct bio_set *bi_pool;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ struct bio_vec bi_inline_vecs[0];
|
|
+};
|
|
+
|
|
+struct linux_binprm {
|
|
+ char buf[256];
|
|
+ struct vm_area_struct *vma;
|
|
+ long unsigned int vma_pages;
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int p;
|
|
+ unsigned int called_set_creds: 1;
|
|
+ unsigned int cap_elevated: 1;
|
|
+ unsigned int secureexec: 1;
|
|
+ unsigned int recursion_depth;
|
|
+ struct file *file;
|
|
+ struct cred *cred;
|
|
+ int unsafe;
|
|
+ unsigned int per_clear;
|
|
+ int argc;
|
|
+ int envc;
|
|
+ const char *filename;
|
|
+ const char *interp;
|
|
+ unsigned int interp_flags;
|
|
+ unsigned int interp_data;
|
|
+ long unsigned int loader;
|
|
+ long unsigned int exec;
|
|
+ struct rlimit rlim_stack;
|
|
+};
|
|
+
|
|
+struct coredump_params {
|
|
+ const siginfo_t *siginfo;
|
|
+ struct pt_regs *regs;
|
|
+ struct file *file;
|
|
+ long unsigned int limit;
|
|
+ long unsigned int mm_flags;
|
|
+ loff_t written;
|
|
+ loff_t pos;
|
|
+};
|
|
+
|
|
+struct assoc_array_ptr;
|
|
+
|
|
+struct assoc_array {
|
|
+ struct assoc_array_ptr *root;
|
|
+ long unsigned int nr_leaves_on_tree;
|
|
+};
|
|
+
|
|
+typedef int32_t key_serial_t;
|
|
+
|
|
+typedef uint32_t key_perm_t;
|
|
+
|
|
+struct key_type;
|
|
+
|
|
+struct keyring_index_key {
|
|
+ struct key_type *type;
|
|
+ const char *description;
|
|
+ size_t desc_len;
|
|
+};
|
|
+
|
|
+typedef int (*request_key_actor_t)(struct key *, void *);
|
|
+
|
|
+struct key_preparsed_payload;
|
|
+
|
|
+struct key_match_data;
|
|
+
|
|
+struct key_restriction;
|
|
+
|
|
+struct key_type {
|
|
+ const char *name;
|
|
+ size_t def_datalen;
|
|
+ int (*vet_description)(const char *);
|
|
+ int (*preparse)(struct key_preparsed_payload *);
|
|
+ void (*free_preparse)(struct key_preparsed_payload *);
|
|
+ int (*instantiate)(struct key *, struct key_preparsed_payload *);
|
|
+ int (*update)(struct key *, struct key_preparsed_payload *);
|
|
+ int (*match_preparse)(struct key_match_data *);
|
|
+ void (*match_free)(struct key_match_data *);
|
|
+ void (*revoke)(struct key *);
|
|
+ void (*destroy)(struct key *);
|
|
+ void (*describe)(const struct key *, struct seq_file *);
|
|
+ long int (*read)(const struct key *, char *, size_t);
|
|
+ request_key_actor_t request_key;
|
|
+ struct key_restriction * (*lookup_restriction)(const char *);
|
|
+ struct list_head link;
|
|
+ struct lock_class_key lock_class;
|
|
+};
|
|
+
|
|
+union key_payload {
|
|
+ void *rcu_data0;
|
|
+ void *data[4];
|
|
+};
|
|
+
|
|
+typedef int (*key_restrict_link_func_t)(struct key *, const struct key_type *, const union key_payload *, struct key *);
|
|
+
|
|
+struct key_user;
|
|
+
|
|
+struct key {
|
|
+ refcount_t usage;
|
|
+ key_serial_t serial;
|
|
+ union {
|
|
+ struct list_head graveyard_link;
|
|
+ struct rb_node serial_node;
|
|
+ };
|
|
+ struct rw_semaphore sem;
|
|
+ struct key_user *user;
|
|
+ void *security;
|
|
+ union {
|
|
+ time64_t expiry;
|
|
+ time64_t revoked_at;
|
|
+ };
|
|
+ time64_t last_used_at;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ key_perm_t perm;
|
|
+ short unsigned int quotalen;
|
|
+ short unsigned int datalen;
|
|
+ short int state;
|
|
+ long unsigned int flags;
|
|
+ union {
|
|
+ struct keyring_index_key index_key;
|
|
+ struct {
|
|
+ struct key_type *type;
|
|
+ char *description;
|
|
+ };
|
|
+ };
|
|
+ union {
|
|
+ union key_payload payload;
|
|
+ struct {
|
|
+ struct list_head name_link;
|
|
+ struct assoc_array keys;
|
|
+ };
|
|
+ };
|
|
+ struct key_restriction *restrict_link;
|
|
+};
|
|
+
|
|
+struct key_restriction {
|
|
+ key_restrict_link_func_t check;
|
|
+ struct key *key;
|
|
+ struct key_type *keytype;
|
|
+};
|
|
+
|
|
+struct stack_trace {
|
|
+ unsigned int nr_entries;
|
|
+ unsigned int max_entries;
|
|
+ long unsigned int *entries;
|
|
+ int skip;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct group_info {
|
|
+ atomic_t usage;
|
|
+ int ngroups;
|
|
+ kgid_t gid[0];
|
|
+};
|
|
+
|
|
+struct ring_buffer_event {
|
|
+ u32 type_len: 5;
|
|
+ u32 time_delta: 27;
|
|
+ u32 array[0];
|
|
+};
|
|
+
|
|
+struct seq_buf {
|
|
+ char *buffer;
|
|
+ size_t size;
|
|
+ size_t len;
|
|
+ loff_t readpos;
|
|
+};
|
|
+
|
|
+struct trace_seq {
|
|
+ unsigned char buffer[4096];
|
|
+ struct seq_buf seq;
|
|
+ int full;
|
|
+};
|
|
+
|
|
+enum ctx_state {
|
|
+ CONTEXT_DISABLED = -1,
|
|
+ CONTEXT_KERNEL = 0,
|
|
+ CONTEXT_USER = 1,
|
|
+ CONTEXT_GUEST = 2,
|
|
+};
|
|
+
|
|
+struct context_tracking {
|
|
+ bool active;
|
|
+ int recursion;
|
|
+ enum ctx_state state;
|
|
+};
|
|
+
|
|
+union perf_mem_data_src {
|
|
+ __u64 val;
|
|
+ struct {
|
|
+ __u64 mem_op: 5;
|
|
+ __u64 mem_lvl: 14;
|
|
+ __u64 mem_snoop: 5;
|
|
+ __u64 mem_lock: 2;
|
|
+ __u64 mem_dtlb: 7;
|
|
+ __u64 mem_lvl_num: 4;
|
|
+ __u64 mem_remote: 1;
|
|
+ __u64 mem_snoopx: 2;
|
|
+ __u64 mem_rsvd: 24;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct perf_branch_entry {
|
|
+ __u64 from;
|
|
+ __u64 to;
|
|
+ __u64 mispred: 1;
|
|
+ __u64 predicted: 1;
|
|
+ __u64 in_tx: 1;
|
|
+ __u64 abort: 1;
|
|
+ __u64 cycles: 16;
|
|
+ __u64 type: 4;
|
|
+ __u64 reserved: 40;
|
|
+};
|
|
+
|
|
+struct taskstats {
|
|
+ __u16 version;
|
|
+ __u32 ac_exitcode;
|
|
+ __u8 ac_flag;
|
|
+ __u8 ac_nice;
|
|
+ __u64 cpu_count;
|
|
+ __u64 cpu_delay_total;
|
|
+ __u64 blkio_count;
|
|
+ __u64 blkio_delay_total;
|
|
+ __u64 swapin_count;
|
|
+ __u64 swapin_delay_total;
|
|
+ __u64 cpu_run_real_total;
|
|
+ __u64 cpu_run_virtual_total;
|
|
+ char ac_comm[32];
|
|
+ __u8 ac_sched;
|
|
+ __u8 ac_pad[3];
|
|
+ int: 32;
|
|
+ __u32 ac_uid;
|
|
+ __u32 ac_gid;
|
|
+ __u32 ac_pid;
|
|
+ __u32 ac_ppid;
|
|
+ __u32 ac_btime;
|
|
+ __u64 ac_etime;
|
|
+ __u64 ac_utime;
|
|
+ __u64 ac_stime;
|
|
+ __u64 ac_minflt;
|
|
+ __u64 ac_majflt;
|
|
+ __u64 coremem;
|
|
+ __u64 virtmem;
|
|
+ __u64 hiwater_rss;
|
|
+ __u64 hiwater_vm;
|
|
+ __u64 read_char;
|
|
+ __u64 write_char;
|
|
+ __u64 read_syscalls;
|
|
+ __u64 write_syscalls;
|
|
+ __u64 read_bytes;
|
|
+ __u64 write_bytes;
|
|
+ __u64 cancelled_write_bytes;
|
|
+ __u64 nvcsw;
|
|
+ __u64 nivcsw;
|
|
+ __u64 ac_utimescaled;
|
|
+ __u64 ac_stimescaled;
|
|
+ __u64 cpu_scaled_run_real_total;
|
|
+ __u64 freepages_count;
|
|
+ __u64 freepages_delay_total;
|
|
+};
|
|
+
|
|
+struct new_utsname {
|
|
+ char sysname[65];
|
|
+ char nodename[65];
|
|
+ char release[65];
|
|
+ char version[65];
|
|
+ char machine[65];
|
|
+ char domainname[65];
|
|
+};
|
|
+
|
|
+struct uts_namespace {
|
|
+ struct kref kref;
|
|
+ struct new_utsname name;
|
|
+ struct user_namespace *user_ns;
|
|
+ struct ucounts *ucounts;
|
|
+ struct ns_common ns;
|
|
+};
|
|
+
|
|
+struct cgroup_namespace {
|
|
+ refcount_t count;
|
|
+ struct ns_common ns;
|
|
+ struct user_namespace *user_ns;
|
|
+ struct ucounts *ucounts;
|
|
+ struct css_set *root_cset;
|
|
+};
|
|
+
|
|
+struct proc_ns_operations {
|
|
+ const char *name;
|
|
+ const char *real_ns_name;
|
|
+ int type;
|
|
+ struct ns_common * (*get)(struct task_struct *);
|
|
+ void (*put)(struct ns_common *);
|
|
+ int (*install)(struct nsproxy *, struct ns_common *);
|
|
+ struct user_namespace * (*owner)(struct ns_common *);
|
|
+ struct ns_common * (*get_parent)(struct ns_common *);
|
|
+};
|
|
+
|
|
+struct ucounts {
|
|
+ struct hlist_node node;
|
|
+ struct user_namespace *ns;
|
|
+ kuid_t uid;
|
|
+ int count;
|
|
+ atomic_t ucount[9];
|
|
+};
|
|
+
|
|
+struct perf_guest_info_callbacks {
|
|
+ int (*is_in_guest)();
|
|
+ int (*is_user_mode)();
|
|
+ long unsigned int (*get_guest_ip)();
|
|
+};
|
|
+
|
|
+struct perf_cpu_context;
|
|
+
|
|
+struct pmu {
|
|
+ struct list_head entry;
|
|
+ struct module *module;
|
|
+ struct device *dev;
|
|
+ const struct attribute_group **attr_groups;
|
|
+ const char *name;
|
|
+ int type;
|
|
+ int capabilities;
|
|
+ int *pmu_disable_count;
|
|
+ struct perf_cpu_context *pmu_cpu_context;
|
|
+ atomic_t exclusive_cnt;
|
|
+ int task_ctx_nr;
|
|
+ int hrtimer_interval_ms;
|
|
+ unsigned int nr_addr_filters;
|
|
+ void (*pmu_enable)(struct pmu *);
|
|
+ void (*pmu_disable)(struct pmu *);
|
|
+ int (*event_init)(struct perf_event *);
|
|
+ void (*event_mapped)(struct perf_event *, struct mm_struct *);
|
|
+ void (*event_unmapped)(struct perf_event *, struct mm_struct *);
|
|
+ int (*add)(struct perf_event *, int);
|
|
+ void (*del)(struct perf_event *, int);
|
|
+ void (*start)(struct perf_event *, int);
|
|
+ void (*stop)(struct perf_event *, int);
|
|
+ void (*read)(struct perf_event *);
|
|
+ void (*start_txn)(struct pmu *, unsigned int);
|
|
+ int (*commit_txn)(struct pmu *);
|
|
+ void (*cancel_txn)(struct pmu *);
|
|
+ int (*event_idx)(struct perf_event *);
|
|
+ void (*sched_task)(struct perf_event_context *, bool);
|
|
+ size_t task_ctx_size;
|
|
+ void * (*setup_aux)(struct perf_event *, void **, int, bool);
|
|
+ void (*free_aux)(void *);
|
|
+ int (*addr_filters_validate)(struct list_head *);
|
|
+ void (*addr_filters_sync)(struct perf_event *);
|
|
+ int (*filter_match)(struct perf_event *);
|
|
+ int (*check_period)(struct perf_event *, u64);
|
|
+};
|
|
+
|
|
+struct iovec {
|
|
+ void *iov_base;
|
|
+ __kernel_size_t iov_len;
|
|
+};
|
|
+
|
|
+struct kvec {
|
|
+ void *iov_base;
|
|
+ size_t iov_len;
|
|
+};
|
|
+
|
|
+enum ftrace_tracing_type_t {
|
|
+ FTRACE_TYPE_ENTER = 0,
|
|
+ FTRACE_TYPE_RETURN = 1,
|
|
+};
|
|
+
|
|
+enum ftrace_bug_type {
|
|
+ FTRACE_BUG_UNKNOWN = 0,
|
|
+ FTRACE_BUG_INIT = 1,
|
|
+ FTRACE_BUG_NOP = 2,
|
|
+ FTRACE_BUG_CALL = 3,
|
|
+ FTRACE_BUG_UPDATE = 4,
|
|
+};
|
|
+
|
|
+struct ftrace_graph_ent {
|
|
+ long unsigned int func;
|
|
+ int depth;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct ftrace_graph_ret {
|
|
+ long unsigned int func;
|
|
+ long unsigned int overrun;
|
|
+ long long unsigned int calltime;
|
|
+ long long unsigned int rettime;
|
|
+ int depth;
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *);
|
|
+
|
|
+typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *);
|
|
+
|
|
+typedef u32 phandle;
|
|
+
|
|
+struct property;
|
|
+
|
|
+struct device_node {
|
|
+ const char *name;
|
|
+ const char *type;
|
|
+ phandle phandle;
|
|
+ const char *full_name;
|
|
+ struct fwnode_handle fwnode;
|
|
+ struct property *properties;
|
|
+ struct property *deadprops;
|
|
+ struct device_node *parent;
|
|
+ struct device_node *child;
|
|
+ struct device_node *sibling;
|
|
+ long unsigned int _flags;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct rcu_device {
|
|
+ struct device dev;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+enum cpuhp_state {
|
|
+ CPUHP_INVALID = -1,
|
|
+ CPUHP_OFFLINE = 0,
|
|
+ CPUHP_CREATE_THREADS = 1,
|
|
+ CPUHP_PERF_PREPARE = 2,
|
|
+ CPUHP_PERF_X86_PREPARE = 3,
|
|
+ CPUHP_PERF_X86_AMD_UNCORE_PREP = 4,
|
|
+ CPUHP_PERF_POWER = 5,
|
|
+ CPUHP_PERF_SUPERH = 6,
|
|
+ CPUHP_X86_HPET_DEAD = 7,
|
|
+ CPUHP_X86_APB_DEAD = 8,
|
|
+ CPUHP_X86_MCE_DEAD = 9,
|
|
+ CPUHP_VIRT_NET_DEAD = 10,
|
|
+ CPUHP_SLUB_DEAD = 11,
|
|
+ CPUHP_MM_WRITEBACK_DEAD = 12,
|
|
+ CPUHP_MM_VMSTAT_DEAD = 13,
|
|
+ CPUHP_SOFTIRQ_DEAD = 14,
|
|
+ CPUHP_NET_MVNETA_DEAD = 15,
|
|
+ CPUHP_CPUIDLE_DEAD = 16,
|
|
+ CPUHP_ARM64_FPSIMD_DEAD = 17,
|
|
+ CPUHP_ARM_OMAP_WAKE_DEAD = 18,
|
|
+ CPUHP_IRQ_POLL_DEAD = 19,
|
|
+ CPUHP_BLOCK_SOFTIRQ_DEAD = 20,
|
|
+ CPUHP_ACPI_CPUDRV_DEAD = 21,
|
|
+ CPUHP_S390_PFAULT_DEAD = 22,
|
|
+ CPUHP_BLK_MQ_DEAD = 23,
|
|
+ CPUHP_FS_BUFF_DEAD = 24,
|
|
+ CPUHP_PRINTK_DEAD = 25,
|
|
+ CPUHP_MM_MEMCQ_DEAD = 26,
|
|
+ CPUHP_PERCPU_CNT_DEAD = 27,
|
|
+ CPUHP_RADIX_DEAD = 28,
|
|
+ CPUHP_PAGE_ALLOC_DEAD = 29,
|
|
+ CPUHP_NET_DEV_DEAD = 30,
|
|
+ CPUHP_PCI_XGENE_DEAD = 31,
|
|
+ CPUHP_IOMMU_INTEL_DEAD = 32,
|
|
+ CPUHP_LUSTRE_CFS_DEAD = 33,
|
|
+ CPUHP_AP_ARM_CACHE_B15_RAC_DEAD = 34,
|
|
+ CPUHP_WORKQUEUE_PREP = 35,
|
|
+ CPUHP_POWER_NUMA_PREPARE = 36,
|
|
+ CPUHP_HRTIMERS_PREPARE = 37,
|
|
+ CPUHP_PROFILE_PREPARE = 38,
|
|
+ CPUHP_X2APIC_PREPARE = 39,
|
|
+ CPUHP_SMPCFD_PREPARE = 40,
|
|
+ CPUHP_RELAY_PREPARE = 41,
|
|
+ CPUHP_SLAB_PREPARE = 42,
|
|
+ CPUHP_MD_RAID5_PREPARE = 43,
|
|
+ CPUHP_RCUTREE_PREP = 44,
|
|
+ CPUHP_CPUIDLE_COUPLED_PREPARE = 45,
|
|
+ CPUHP_POWERPC_PMAC_PREPARE = 46,
|
|
+ CPUHP_POWERPC_MMU_CTX_PREPARE = 47,
|
|
+ CPUHP_XEN_PREPARE = 48,
|
|
+ CPUHP_XEN_EVTCHN_PREPARE = 49,
|
|
+ CPUHP_ARM_SHMOBILE_SCU_PREPARE = 50,
|
|
+ CPUHP_SH_SH3X_PREPARE = 51,
|
|
+ CPUHP_NET_FLOW_PREPARE = 52,
|
|
+ CPUHP_TOPOLOGY_PREPARE = 53,
|
|
+ CPUHP_NET_IUCV_PREPARE = 54,
|
|
+ CPUHP_ARM_BL_PREPARE = 55,
|
|
+ CPUHP_TRACE_RB_PREPARE = 56,
|
|
+ CPUHP_MM_ZS_PREPARE = 57,
|
|
+ CPUHP_MM_ZSWP_MEM_PREPARE = 58,
|
|
+ CPUHP_MM_ZSWP_POOL_PREPARE = 59,
|
|
+ CPUHP_KVM_PPC_BOOK3S_PREPARE = 60,
|
|
+ CPUHP_ZCOMP_PREPARE = 61,
|
|
+ CPUHP_TIMERS_PREPARE = 62,
|
|
+ CPUHP_MIPS_SOC_PREPARE = 63,
|
|
+ CPUHP_BP_PREPARE_DYN = 64,
|
|
+ CPUHP_BP_PREPARE_DYN_END = 84,
|
|
+ CPUHP_BRINGUP_CPU = 85,
|
|
+ CPUHP_AP_IDLE_DEAD = 86,
|
|
+ CPUHP_AP_OFFLINE = 87,
|
|
+ CPUHP_AP_SCHED_STARTING = 88,
|
|
+ CPUHP_AP_RCUTREE_DYING = 89,
|
|
+ CPUHP_AP_IRQ_GIC_STARTING = 90,
|
|
+ CPUHP_AP_IRQ_HIP04_STARTING = 91,
|
|
+ CPUHP_AP_IRQ_ARMADA_XP_STARTING = 92,
|
|
+ CPUHP_AP_IRQ_BCM2836_STARTING = 93,
|
|
+ CPUHP_AP_IRQ_MIPS_GIC_STARTING = 94,
|
|
+ CPUHP_AP_ARM_MVEBU_COHERENCY = 95,
|
|
+ CPUHP_AP_MICROCODE_LOADER = 96,
|
|
+ CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING = 97,
|
|
+ CPUHP_AP_PERF_X86_STARTING = 98,
|
|
+ CPUHP_AP_PERF_X86_AMD_IBS_STARTING = 99,
|
|
+ CPUHP_AP_PERF_X86_CQM_STARTING = 100,
|
|
+ CPUHP_AP_PERF_X86_CSTATE_STARTING = 101,
|
|
+ CPUHP_AP_PERF_XTENSA_STARTING = 102,
|
|
+ CPUHP_AP_MIPS_OP_LOONGSON3_STARTING = 103,
|
|
+ CPUHP_AP_ARM_SDEI_STARTING = 104,
|
|
+ CPUHP_AP_ARM_VFP_STARTING = 105,
|
|
+ CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING = 106,
|
|
+ CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING = 107,
|
|
+ CPUHP_AP_PERF_ARM_ACPI_STARTING = 108,
|
|
+ CPUHP_AP_PERF_ARM_STARTING = 109,
|
|
+ CPUHP_AP_ARM_L2X0_STARTING = 110,
|
|
+ CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING = 111,
|
|
+ CPUHP_AP_ARM_ARCH_TIMER_STARTING = 112,
|
|
+ CPUHP_AP_ARM_GLOBAL_TIMER_STARTING = 113,
|
|
+ CPUHP_AP_JCORE_TIMER_STARTING = 114,
|
|
+ CPUHP_AP_ARM_TWD_STARTING = 115,
|
|
+ CPUHP_AP_QCOM_TIMER_STARTING = 116,
|
|
+ CPUHP_AP_ARMADA_TIMER_STARTING = 117,
|
|
+ CPUHP_AP_MARCO_TIMER_STARTING = 118,
|
|
+ CPUHP_AP_MIPS_GIC_TIMER_STARTING = 119,
|
|
+ CPUHP_AP_ARC_TIMER_STARTING = 120,
|
|
+ CPUHP_AP_RISCV_TIMER_STARTING = 121,
|
|
+ CPUHP_AP_KVM_STARTING = 122,
|
|
+ CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING = 123,
|
|
+ CPUHP_AP_KVM_ARM_VGIC_STARTING = 124,
|
|
+ CPUHP_AP_KVM_ARM_TIMER_STARTING = 125,
|
|
+ CPUHP_AP_DUMMY_TIMER_STARTING = 126,
|
|
+ CPUHP_AP_ARM_XEN_STARTING = 127,
|
|
+ CPUHP_AP_ARM_CORESIGHT_STARTING = 128,
|
|
+ CPUHP_AP_ARM64_ISNDEP_STARTING = 129,
|
|
+ CPUHP_AP_SMPCFD_DYING = 130,
|
|
+ CPUHP_AP_X86_TBOOT_DYING = 131,
|
|
+ CPUHP_AP_ARM_CACHE_B15_RAC_DYING = 132,
|
|
+ CPUHP_AP_ONLINE = 133,
|
|
+ CPUHP_TEARDOWN_CPU = 134,
|
|
+ CPUHP_AP_ONLINE_IDLE = 135,
|
|
+ CPUHP_AP_SMPBOOT_THREADS = 136,
|
|
+ CPUHP_AP_X86_VDSO_VMA_ONLINE = 137,
|
|
+ CPUHP_AP_IRQ_AFFINITY_ONLINE = 138,
|
|
+ CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS = 139,
|
|
+ CPUHP_AP_PERF_ONLINE = 140,
|
|
+ CPUHP_AP_PERF_X86_ONLINE = 141,
|
|
+ CPUHP_AP_PERF_X86_UNCORE_ONLINE = 142,
|
|
+ CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE = 143,
|
|
+ CPUHP_AP_PERF_X86_AMD_POWER_ONLINE = 144,
|
|
+ CPUHP_AP_PERF_X86_RAPL_ONLINE = 145,
|
|
+ CPUHP_AP_PERF_X86_CQM_ONLINE = 146,
|
|
+ CPUHP_AP_PERF_X86_CSTATE_ONLINE = 147,
|
|
+ CPUHP_AP_PERF_S390_CF_ONLINE = 148,
|
|
+ CPUHP_AP_PERF_S390_SF_ONLINE = 149,
|
|
+ CPUHP_AP_PERF_ARM_CCI_ONLINE = 150,
|
|
+ CPUHP_AP_PERF_ARM_CCN_ONLINE = 151,
|
|
+ CPUHP_AP_PERF_ARM_HISI_DDRC_ONLINE = 152,
|
|
+ CPUHP_AP_PERF_ARM_HISI_HHA_ONLINE = 153,
|
|
+ CPUHP_AP_PERF_ARM_HISI_L3_ONLINE = 154,
|
|
+ CPUHP_AP_PERF_ARM_L2X0_ONLINE = 155,
|
|
+ CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE = 156,
|
|
+ CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE = 157,
|
|
+ CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE = 158,
|
|
+ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE = 159,
|
|
+ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE = 160,
|
|
+ CPUHP_AP_WATCHDOG_ONLINE = 161,
|
|
+ CPUHP_AP_WORKQUEUE_ONLINE = 162,
|
|
+ CPUHP_AP_RCUTREE_ONLINE = 163,
|
|
+ CPUHP_AP_BASE_CACHEINFO_ONLINE = 164,
|
|
+ CPUHP_AP_ONLINE_DYN = 165,
|
|
+ CPUHP_AP_ONLINE_DYN_END = 195,
|
|
+ CPUHP_AP_X86_HPET_ONLINE = 196,
|
|
+ CPUHP_AP_X86_KVM_CLK_ONLINE = 197,
|
|
+ CPUHP_AP_ACTIVE = 198,
|
|
+ CPUHP_ONLINE = 199,
|
|
+};
|
|
+
|
|
+struct perf_regs {
|
|
+ __u64 abi;
|
|
+ struct pt_regs *regs;
|
|
+};
|
|
+
|
|
+struct kernel_cpustat {
|
|
+ u64 cpustat[12];
|
|
+};
|
|
+
|
|
+struct kernel_stat {
|
|
+ long unsigned int irqs_sum;
|
|
+ unsigned int softirqs[10];
|
|
+};
|
|
+
|
|
+struct u64_stats_sync {};
|
|
+
|
|
+struct cgroup_bpf {
|
|
+ struct bpf_prog_array *effective[21];
|
|
+ struct list_head progs[21];
|
|
+ u32 flags[21];
|
|
+ struct bpf_prog_array *inactive;
|
|
+};
|
|
+
|
|
+struct cgroup_base_stat {
|
|
+ struct task_cputime cputime;
|
|
+};
|
|
+
|
|
+struct cgroup_root;
|
|
+
|
|
+struct cgroup_rstat_cpu;
|
|
+
|
|
+struct cgroup {
|
|
+ struct cgroup_subsys_state self;
|
|
+ long unsigned int flags;
|
|
+ int id;
|
|
+ int level;
|
|
+ int max_depth;
|
|
+ int nr_descendants;
|
|
+ int nr_dying_descendants;
|
|
+ int max_descendants;
|
|
+ int nr_populated_csets;
|
|
+ int nr_populated_domain_children;
|
|
+ int nr_populated_threaded_children;
|
|
+ int nr_threaded_children;
|
|
+ struct kernfs_node *kn;
|
|
+ struct cgroup_file procs_file;
|
|
+ struct cgroup_file events_file;
|
|
+ u16 subtree_control;
|
|
+ u16 subtree_ss_mask;
|
|
+ u16 old_subtree_control;
|
|
+ u16 old_subtree_ss_mask;
|
|
+ struct cgroup_subsys_state *subsys[14];
|
|
+ struct cgroup_root *root;
|
|
+ struct list_head cset_links;
|
|
+ struct list_head e_csets[14];
|
|
+ struct cgroup *dom_cgrp;
|
|
+ struct cgroup *old_dom_cgrp;
|
|
+ struct cgroup_rstat_cpu *rstat_cpu;
|
|
+ struct list_head rstat_css_list;
|
|
+ struct cgroup_base_stat pending_bstat;
|
|
+ struct cgroup_base_stat bstat;
|
|
+ struct prev_cputime prev_cputime;
|
|
+ struct list_head pidlists;
|
|
+ struct mutex pidlist_mutex;
|
|
+ wait_queue_head_t offline_waitq;
|
|
+ struct work_struct release_agent_work;
|
|
+ struct cgroup_bpf bpf;
|
|
+ atomic_t congestion_count;
|
|
+ int ancestor_ids[0];
|
|
+};
|
|
+
|
|
+struct cgroup_taskset;
|
|
+
|
|
+struct cftype;
|
|
+
|
|
+struct cgroup_subsys {
|
|
+ struct cgroup_subsys_state * (*css_alloc)(struct cgroup_subsys_state *);
|
|
+ int (*css_online)(struct cgroup_subsys_state *);
|
|
+ void (*css_offline)(struct cgroup_subsys_state *);
|
|
+ void (*css_released)(struct cgroup_subsys_state *);
|
|
+ void (*css_free)(struct cgroup_subsys_state *);
|
|
+ void (*css_reset)(struct cgroup_subsys_state *);
|
|
+ void (*css_rstat_flush)(struct cgroup_subsys_state *, int);
|
|
+ int (*css_extra_stat_show)(struct seq_file *, struct cgroup_subsys_state *);
|
|
+ int (*can_attach)(struct cgroup_taskset *);
|
|
+ void (*cancel_attach)(struct cgroup_taskset *);
|
|
+ void (*attach)(struct cgroup_taskset *);
|
|
+ void (*post_attach)();
|
|
+ int (*can_fork)(struct task_struct *);
|
|
+ void (*cancel_fork)(struct task_struct *);
|
|
+ void (*fork)(struct task_struct *);
|
|
+ void (*exit)(struct task_struct *);
|
|
+ void (*release)(struct task_struct *);
|
|
+ void (*bind)(struct cgroup_subsys_state *);
|
|
+ bool early_init: 1;
|
|
+ bool implicit_on_dfl: 1;
|
|
+ bool threaded: 1;
|
|
+ bool broken_hierarchy: 1;
|
|
+ bool warned_broken_hierarchy: 1;
|
|
+ int id;
|
|
+ const char *name;
|
|
+ const char *legacy_name;
|
|
+ struct cgroup_root *root;
|
|
+ struct idr css_idr;
|
|
+ struct list_head cfts;
|
|
+ struct cftype *dfl_cftypes;
|
|
+ struct cftype *legacy_cftypes;
|
|
+ unsigned int depends_on;
|
|
+};
|
|
+
|
|
+struct cgroup_rstat_cpu {
|
|
+ struct u64_stats_sync bsync;
|
|
+ struct cgroup_base_stat bstat;
|
|
+ struct cgroup_base_stat last_bstat;
|
|
+ struct cgroup *updated_children;
|
|
+ struct cgroup *updated_next;
|
|
+};
|
|
+
|
|
+struct cgroup_root {
|
|
+ struct kernfs_root *kf_root;
|
|
+ unsigned int subsys_mask;
|
|
+ int hierarchy_id;
|
|
+ struct cgroup cgrp;
|
|
+ int cgrp_ancestor_id_storage;
|
|
+ atomic_t nr_cgrps;
|
|
+ struct list_head root_list;
|
|
+ unsigned int flags;
|
|
+ struct idr cgroup_idr;
|
|
+ char release_agent_path[4096];
|
|
+ char name[64];
|
|
+};
|
|
+
|
|
+struct cftype {
|
|
+ char name[64];
|
|
+ long unsigned int private;
|
|
+ size_t max_write_len;
|
|
+ unsigned int flags;
|
|
+ unsigned int file_offset;
|
|
+ struct cgroup_subsys *ss;
|
|
+ struct list_head node;
|
|
+ struct kernfs_ops *kf_ops;
|
|
+ int (*open)(struct kernfs_open_file *);
|
|
+ void (*release)(struct kernfs_open_file *);
|
|
+ u64 (*read_u64)(struct cgroup_subsys_state *, struct cftype *);
|
|
+ s64 (*read_s64)(struct cgroup_subsys_state *, struct cftype *);
|
|
+ int (*seq_show)(struct seq_file *, void *);
|
|
+ void * (*seq_start)(struct seq_file *, loff_t *);
|
|
+ void * (*seq_next)(struct seq_file *, void *, loff_t *);
|
|
+ void (*seq_stop)(struct seq_file *, void *);
|
|
+ int (*write_u64)(struct cgroup_subsys_state *, struct cftype *, u64);
|
|
+ int (*write_s64)(struct cgroup_subsys_state *, struct cftype *, s64);
|
|
+ ssize_t (*write)(struct kernfs_open_file *, char *, size_t, loff_t);
|
|
+};
|
|
+
|
|
+struct perf_callchain_entry {
|
|
+ __u64 nr;
|
|
+ __u64 ip[0];
|
|
+};
|
|
+
|
|
+typedef long unsigned int (*perf_copy_f)(void *, const void *, long unsigned int, long unsigned int);
|
|
+
|
|
+struct perf_raw_frag {
|
|
+ union {
|
|
+ struct perf_raw_frag *next;
|
|
+ long unsigned int pad;
|
|
+ };
|
|
+ perf_copy_f copy;
|
|
+ void *data;
|
|
+ u32 size;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct perf_raw_record {
|
|
+ struct perf_raw_frag frag;
|
|
+ u32 size;
|
|
+};
|
|
+
|
|
+struct perf_branch_stack {
|
|
+ __u64 nr;
|
|
+ struct perf_branch_entry entries[0];
|
|
+};
|
|
+
|
|
+struct perf_cpu_context {
|
|
+ struct perf_event_context ctx;
|
|
+ struct perf_event_context *task_ctx;
|
|
+ int active_oncpu;
|
|
+ int exclusive;
|
|
+ raw_spinlock_t hrtimer_lock;
|
|
+ struct hrtimer hrtimer;
|
|
+ ktime_t hrtimer_interval;
|
|
+ unsigned int hrtimer_active;
|
|
+ struct perf_cgroup *cgrp;
|
|
+ struct list_head cgrp_cpuctx_entry;
|
|
+ struct list_head sched_cb_entry;
|
|
+ int sched_cb_usage;
|
|
+ int online;
|
|
+};
|
|
+
|
|
+struct perf_addr_filter_range {
|
|
+ long unsigned int start;
|
|
+ long unsigned int size;
|
|
+};
|
|
+
|
|
+struct perf_sample_data {
|
|
+ u64 addr;
|
|
+ struct perf_raw_record *raw;
|
|
+ struct perf_branch_stack *br_stack;
|
|
+ u64 period;
|
|
+ u64 weight;
|
|
+ u64 txn;
|
|
+ union perf_mem_data_src data_src;
|
|
+ u64 type;
|
|
+ u64 ip;
|
|
+ struct {
|
|
+ u32 pid;
|
|
+ u32 tid;
|
|
+ } tid_entry;
|
|
+ u64 time;
|
|
+ u64 id;
|
|
+ u64 stream_id;
|
|
+ struct {
|
|
+ u32 cpu;
|
|
+ u32 reserved;
|
|
+ } cpu_entry;
|
|
+ struct perf_callchain_entry *callchain;
|
|
+ struct perf_regs regs_user;
|
|
+ struct pt_regs regs_user_copy;
|
|
+ struct perf_regs regs_intr;
|
|
+ u64 stack_user_size;
|
|
+ u64 phys_addr;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct perf_cgroup_info;
|
|
+
|
|
+struct perf_cgroup {
|
|
+ struct cgroup_subsys_state css;
|
|
+ struct perf_cgroup_info *info;
|
|
+};
|
|
+
|
|
+struct perf_cgroup_info {
|
|
+ u64 time;
|
|
+ u64 timestamp;
|
|
+};
|
|
+
|
|
+struct trace_entry {
|
|
+ short unsigned int type;
|
|
+ unsigned char flags;
|
|
+ unsigned char preempt_count;
|
|
+ int pid;
|
|
+};
|
|
+
|
|
+struct trace_array;
|
|
+
|
|
+struct tracer;
|
|
+
|
|
+struct trace_buffer;
|
|
+
|
|
+struct ring_buffer_iter;
|
|
+
|
|
+struct trace_iterator {
|
|
+ struct trace_array *tr;
|
|
+ struct tracer *trace;
|
|
+ struct trace_buffer *trace_buffer;
|
|
+ void *private;
|
|
+ int cpu_file;
|
|
+ struct mutex mutex;
|
|
+ struct ring_buffer_iter **buffer_iter;
|
|
+ long unsigned int iter_flags;
|
|
+ struct trace_seq tmp_seq;
|
|
+ cpumask_var_t started;
|
|
+ bool snapshot;
|
|
+ struct trace_seq seq;
|
|
+ struct trace_entry *ent;
|
|
+ long unsigned int lost_events;
|
|
+ int leftover;
|
|
+ int ent_size;
|
|
+ int cpu;
|
|
+ u64 ts;
|
|
+ loff_t pos;
|
|
+ long int idx;
|
|
+};
|
|
+
|
|
+enum print_line_t {
|
|
+ TRACE_TYPE_PARTIAL_LINE = 0,
|
|
+ TRACE_TYPE_HANDLED = 1,
|
|
+ TRACE_TYPE_UNHANDLED = 2,
|
|
+ TRACE_TYPE_NO_CONSUME = 3,
|
|
+};
|
|
+
|
|
+typedef enum print_line_t (*trace_print_func)(struct trace_iterator *, int, struct trace_event *);
|
|
+
|
|
+struct trace_event_functions {
|
|
+ trace_print_func trace;
|
|
+ trace_print_func raw;
|
|
+ trace_print_func hex;
|
|
+ trace_print_func binary;
|
|
+};
|
|
+
|
|
+enum trace_reg {
|
|
+ TRACE_REG_REGISTER = 0,
|
|
+ TRACE_REG_UNREGISTER = 1,
|
|
+ TRACE_REG_PERF_REGISTER = 2,
|
|
+ TRACE_REG_PERF_UNREGISTER = 3,
|
|
+ TRACE_REG_PERF_OPEN = 4,
|
|
+ TRACE_REG_PERF_CLOSE = 5,
|
|
+ TRACE_REG_PERF_ADD = 6,
|
|
+ TRACE_REG_PERF_DEL = 7,
|
|
+};
|
|
+
|
|
+struct trace_event_class {
|
|
+ const char *system;
|
|
+ void *probe;
|
|
+ void *perf_probe;
|
|
+ int (*reg)(struct trace_event_call *, enum trace_reg, void *);
|
|
+ int (*define_fields)(struct trace_event_call *);
|
|
+ struct list_head * (*get_fields)(struct trace_event_call *);
|
|
+ struct list_head fields;
|
|
+ int (*raw_init)(struct trace_event_call *);
|
|
+};
|
|
+
|
|
+struct trace_event_file;
|
|
+
|
|
+struct trace_event_buffer {
|
|
+ struct ring_buffer *buffer;
|
|
+ struct ring_buffer_event *event;
|
|
+ struct trace_event_file *trace_file;
|
|
+ void *entry;
|
|
+ long unsigned int flags;
|
|
+ int pc;
|
|
+};
|
|
+
|
|
+struct trace_subsystem_dir;
|
|
+
|
|
+struct trace_event_file {
|
|
+ struct list_head list;
|
|
+ struct trace_event_call *event_call;
|
|
+ struct event_filter *filter;
|
|
+ struct dentry *dir;
|
|
+ struct trace_array *tr;
|
|
+ struct trace_subsystem_dir *system;
|
|
+ struct list_head triggers;
|
|
+ long unsigned int flags;
|
|
+ atomic_t sm_ref;
|
|
+ atomic_t tm_ref;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TRACE_EVENT_FL_FILTERED = 1,
|
|
+ TRACE_EVENT_FL_CAP_ANY = 2,
|
|
+ TRACE_EVENT_FL_NO_SET_FILTER = 4,
|
|
+ TRACE_EVENT_FL_IGNORE_ENABLE = 8,
|
|
+ TRACE_EVENT_FL_TRACEPOINT = 16,
|
|
+ TRACE_EVENT_FL_KPROBE = 32,
|
|
+ TRACE_EVENT_FL_UPROBE = 64,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ EVENT_FILE_FL_ENABLED = 1,
|
|
+ EVENT_FILE_FL_RECORDED_CMD = 2,
|
|
+ EVENT_FILE_FL_RECORDED_TGID = 4,
|
|
+ EVENT_FILE_FL_FILTERED = 8,
|
|
+ EVENT_FILE_FL_NO_SET_FILTER = 16,
|
|
+ EVENT_FILE_FL_SOFT_MODE = 32,
|
|
+ EVENT_FILE_FL_SOFT_DISABLED = 64,
|
|
+ EVENT_FILE_FL_TRIGGER_MODE = 128,
|
|
+ EVENT_FILE_FL_TRIGGER_COND = 256,
|
|
+ EVENT_FILE_FL_PID_FILTER = 512,
|
|
+ EVENT_FILE_FL_WAS_ENABLED = 1024,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FILTER_OTHER = 0,
|
|
+ FILTER_STATIC_STRING = 1,
|
|
+ FILTER_DYN_STRING = 2,
|
|
+ FILTER_PTR_STRING = 3,
|
|
+ FILTER_TRACE_FN = 4,
|
|
+ FILTER_COMM = 5,
|
|
+ FILTER_CPU = 6,
|
|
+};
|
|
+
|
|
+struct acpi_table_header {
|
|
+ char signature[4];
|
|
+ u32 length;
|
|
+ u8 revision;
|
|
+ u8 checksum;
|
|
+ char oem_id[6];
|
|
+ char oem_table_id[8];
|
|
+ u32 oem_revision;
|
|
+ char asl_compiler_id[4];
|
|
+ u32 asl_compiler_revision;
|
|
+};
|
|
+
|
|
+struct acpi_generic_address {
|
|
+ u8 space_id;
|
|
+ u8 bit_width;
|
|
+ u8 bit_offset;
|
|
+ u8 access_width;
|
|
+ u64 address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_table_fadt {
|
|
+ struct acpi_table_header header;
|
|
+ u32 facs;
|
|
+ u32 dsdt;
|
|
+ u8 model;
|
|
+ u8 preferred_profile;
|
|
+ u16 sci_interrupt;
|
|
+ u32 smi_command;
|
|
+ u8 acpi_enable;
|
|
+ u8 acpi_disable;
|
|
+ u8 s4_bios_request;
|
|
+ u8 pstate_control;
|
|
+ u32 pm1a_event_block;
|
|
+ u32 pm1b_event_block;
|
|
+ u32 pm1a_control_block;
|
|
+ u32 pm1b_control_block;
|
|
+ u32 pm2_control_block;
|
|
+ u32 pm_timer_block;
|
|
+ u32 gpe0_block;
|
|
+ u32 gpe1_block;
|
|
+ u8 pm1_event_length;
|
|
+ u8 pm1_control_length;
|
|
+ u8 pm2_control_length;
|
|
+ u8 pm_timer_length;
|
|
+ u8 gpe0_block_length;
|
|
+ u8 gpe1_block_length;
|
|
+ u8 gpe1_base;
|
|
+ u8 cst_control;
|
|
+ u16 c2_latency;
|
|
+ u16 c3_latency;
|
|
+ u16 flush_size;
|
|
+ u16 flush_stride;
|
|
+ u8 duty_offset;
|
|
+ u8 duty_width;
|
|
+ u8 day_alarm;
|
|
+ u8 month_alarm;
|
|
+ u8 century;
|
|
+ u16 boot_flags;
|
|
+ u8 reserved;
|
|
+ u32 flags;
|
|
+ struct acpi_generic_address reset_register;
|
|
+ u8 reset_value;
|
|
+ u16 arm_boot_flags;
|
|
+ u8 minor_revision;
|
|
+ u64 Xfacs;
|
|
+ u64 Xdsdt;
|
|
+ struct acpi_generic_address xpm1a_event_block;
|
|
+ struct acpi_generic_address xpm1b_event_block;
|
|
+ struct acpi_generic_address xpm1a_control_block;
|
|
+ struct acpi_generic_address xpm1b_control_block;
|
|
+ struct acpi_generic_address xpm2_control_block;
|
|
+ struct acpi_generic_address xpm_timer_block;
|
|
+ struct acpi_generic_address xgpe0_block;
|
|
+ struct acpi_generic_address xgpe1_block;
|
|
+ struct acpi_generic_address sleep_control;
|
|
+ struct acpi_generic_address sleep_status;
|
|
+ u64 hypervisor_id;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum acpi_irq_model_id {
|
|
+ ACPI_IRQ_MODEL_PIC = 0,
|
|
+ ACPI_IRQ_MODEL_IOAPIC = 1,
|
|
+ ACPI_IRQ_MODEL_IOSAPIC = 2,
|
|
+ ACPI_IRQ_MODEL_PLATFORM = 3,
|
|
+ ACPI_IRQ_MODEL_GIC = 4,
|
|
+ ACPI_IRQ_MODEL_COUNT = 5,
|
|
+};
|
|
+
|
|
+enum con_scroll {
|
|
+ SM_UP = 0,
|
|
+ SM_DOWN = 1,
|
|
+};
|
|
+
|
|
+struct vc_data;
|
|
+
|
|
+struct console_font;
|
|
+
|
|
+struct consw {
|
|
+ struct module *owner;
|
|
+ const char * (*con_startup)();
|
|
+ void (*con_init)(struct vc_data *, int);
|
|
+ void (*con_deinit)(struct vc_data *);
|
|
+ void (*con_clear)(struct vc_data *, int, int, int, int);
|
|
+ void (*con_putc)(struct vc_data *, int, int, int);
|
|
+ void (*con_putcs)(struct vc_data *, const short unsigned int *, int, int, int);
|
|
+ void (*con_cursor)(struct vc_data *, int);
|
|
+ bool (*con_scroll)(struct vc_data *, unsigned int, unsigned int, enum con_scroll, unsigned int);
|
|
+ int (*con_switch)(struct vc_data *);
|
|
+ int (*con_blank)(struct vc_data *, int, int);
|
|
+ int (*con_font_set)(struct vc_data *, struct console_font *, unsigned int);
|
|
+ int (*con_font_get)(struct vc_data *, struct console_font *);
|
|
+ int (*con_font_default)(struct vc_data *, struct console_font *, char *);
|
|
+ int (*con_font_copy)(struct vc_data *, int);
|
|
+ int (*con_resize)(struct vc_data *, unsigned int, unsigned int, unsigned int);
|
|
+ void (*con_set_palette)(struct vc_data *, const unsigned char *);
|
|
+ void (*con_scrolldelta)(struct vc_data *, int);
|
|
+ int (*con_set_origin)(struct vc_data *);
|
|
+ void (*con_save_screen)(struct vc_data *);
|
|
+ u8 (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
|
|
+ void (*con_invert_region)(struct vc_data *, u16 *, int);
|
|
+ u16 * (*con_screen_pos)(struct vc_data *, int);
|
|
+ long unsigned int (*con_getxy)(struct vc_data *, long unsigned int, int *, int *);
|
|
+ void (*con_flush_scrollback)(struct vc_data *);
|
|
+ int (*con_debug_enter)(struct vc_data *);
|
|
+ int (*con_debug_leave)(struct vc_data *);
|
|
+};
|
|
+
|
|
+struct tty_driver;
|
|
+
|
|
+struct console {
|
|
+ char name[16];
|
|
+ void (*write)(struct console *, const char *, unsigned int);
|
|
+ int (*read)(struct console *, char *, unsigned int);
|
|
+ struct tty_driver * (*device)(struct console *, int *);
|
|
+ void (*unblank)();
|
|
+ int (*setup)(struct console *, char *);
|
|
+ int (*match)(struct console *, char *, int, char *);
|
|
+ short int flags;
|
|
+ short int index;
|
|
+ int cflag;
|
|
+ void *data;
|
|
+ struct console *next;
|
|
+};
|
|
+
|
|
+enum wb_stat_item {
|
|
+ WB_RECLAIMABLE = 0,
|
|
+ WB_WRITEBACK = 1,
|
|
+ WB_DIRTIED = 2,
|
|
+ WB_WRITTEN = 3,
|
|
+ NR_WB_STAT_ITEMS = 4,
|
|
+};
|
|
+
|
|
+struct bdi_writeback_congested {
|
|
+ long unsigned int state;
|
|
+ refcount_t refcnt;
|
|
+ struct backing_dev_info *__bdi;
|
|
+ int blkcg_id;
|
|
+ struct rb_node rb_node;
|
|
+};
|
|
+
|
|
+struct bio_integrity_payload {
|
|
+ struct bio *bip_bio;
|
|
+ struct bvec_iter bip_iter;
|
|
+ short unsigned int bip_slab;
|
|
+ short unsigned int bip_vcnt;
|
|
+ short unsigned int bip_max_vcnt;
|
|
+ short unsigned int bip_flags;
|
|
+ struct work_struct bip_work;
|
|
+ struct bio_vec *bip_vec;
|
|
+ struct bio_vec bip_inline_vecs[0];
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+enum stat_group {
|
|
+ STAT_READ = 0,
|
|
+ STAT_WRITE = 1,
|
|
+ STAT_DISCARD = 2,
|
|
+ NR_STAT_GROUPS = 3,
|
|
+};
|
|
+
|
|
+enum cpu_idle_type {
|
|
+ CPU_IDLE = 0,
|
|
+ CPU_NOT_IDLE = 1,
|
|
+ CPU_NEWLY_IDLE = 2,
|
|
+ CPU_MAX_IDLE_TYPES = 3,
|
|
+};
|
|
+
|
|
+enum reboot_mode {
|
|
+ REBOOT_COLD = 0,
|
|
+ REBOOT_WARM = 1,
|
|
+ REBOOT_HARD = 2,
|
|
+ REBOOT_SOFT = 3,
|
|
+ REBOOT_GPIO = 4,
|
|
+};
|
|
+
|
|
+enum reboot_type {
|
|
+ BOOT_TRIPLE = 116,
|
|
+ BOOT_KBD = 107,
|
|
+ BOOT_BIOS = 98,
|
|
+ BOOT_ACPI = 97,
|
|
+ BOOT_EFI = 101,
|
|
+ BOOT_CF9_FORCE = 112,
|
|
+ BOOT_CF9_SAFE = 113,
|
|
+};
|
|
+
|
|
+typedef long unsigned int efi_status_t;
|
|
+
|
|
+typedef u8 efi_bool_t;
|
|
+
|
|
+typedef u16 efi_char16_t;
|
|
+
|
|
+typedef u64 efi_physical_addr_t;
|
|
+
|
|
+typedef void *efi_handle_t;
|
|
+
|
|
+typedef guid_t efi_guid_t;
|
|
+
|
|
+typedef struct {
|
|
+ u64 signature;
|
|
+ u32 revision;
|
|
+ u32 headersize;
|
|
+ u32 crc32;
|
|
+ u32 reserved;
|
|
+} efi_table_hdr_t;
|
|
+
|
|
+typedef struct {
|
|
+ u32 type;
|
|
+ u32 pad;
|
|
+ u64 phys_addr;
|
|
+ u64 virt_addr;
|
|
+ u64 num_pages;
|
|
+ u64 attribute;
|
|
+} efi_memory_desc_t;
|
|
+
|
|
+typedef struct {
|
|
+ efi_guid_t guid;
|
|
+ u32 headersize;
|
|
+ u32 flags;
|
|
+ u32 imagesize;
|
|
+} efi_capsule_header_t;
|
|
+
|
|
+typedef struct {
|
|
+ u16 year;
|
|
+ u8 month;
|
|
+ u8 day;
|
|
+ u8 hour;
|
|
+ u8 minute;
|
|
+ u8 second;
|
|
+ u8 pad1;
|
|
+ u32 nanosecond;
|
|
+ s16 timezone;
|
|
+ u8 daylight;
|
|
+ u8 pad2;
|
|
+} efi_time_t;
|
|
+
|
|
+typedef struct {
|
|
+ u32 resolution;
|
|
+ u32 accuracy;
|
|
+ u8 sets_to_zero;
|
|
+} efi_time_cap_t;
|
|
+
|
|
+typedef struct {
|
|
+ efi_table_hdr_t hdr;
|
|
+ void *raise_tpl;
|
|
+ void *restore_tpl;
|
|
+ efi_status_t (*allocate_pages)(int, int, long unsigned int, efi_physical_addr_t *);
|
|
+ efi_status_t (*free_pages)(efi_physical_addr_t, long unsigned int);
|
|
+ efi_status_t (*get_memory_map)(long unsigned int *, void *, long unsigned int *, long unsigned int *, u32 *);
|
|
+ efi_status_t (*allocate_pool)(int, long unsigned int, void **);
|
|
+ efi_status_t (*free_pool)(void *);
|
|
+ void *create_event;
|
|
+ void *set_timer;
|
|
+ void *wait_for_event;
|
|
+ void *signal_event;
|
|
+ void *close_event;
|
|
+ void *check_event;
|
|
+ void *install_protocol_interface;
|
|
+ void *reinstall_protocol_interface;
|
|
+ void *uninstall_protocol_interface;
|
|
+ efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
|
|
+ void *__reserved;
|
|
+ void *register_protocol_notify;
|
|
+ efi_status_t (*locate_handle)(int, efi_guid_t *, void *, long unsigned int *, efi_handle_t *);
|
|
+ void *locate_device_path;
|
|
+ efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
|
|
+ void *load_image;
|
|
+ void *start_image;
|
|
+ void *exit;
|
|
+ void *unload_image;
|
|
+ efi_status_t (*exit_boot_services)(efi_handle_t, long unsigned int);
|
|
+ void *get_next_monotonic_count;
|
|
+ void *stall;
|
|
+ void *set_watchdog_timer;
|
|
+ void *connect_controller;
|
|
+ void *disconnect_controller;
|
|
+ void *open_protocol;
|
|
+ void *close_protocol;
|
|
+ void *open_protocol_information;
|
|
+ void *protocols_per_handle;
|
|
+ void *locate_handle_buffer;
|
|
+ efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
|
|
+ void *install_multiple_protocol_interfaces;
|
|
+ void *uninstall_multiple_protocol_interfaces;
|
|
+ void *calculate_crc32;
|
|
+ void *copy_mem;
|
|
+ void *set_mem;
|
|
+ void *create_event_ex;
|
|
+} efi_boot_services_t;
|
|
+
|
|
+typedef efi_status_t efi_get_time_t(efi_time_t *, efi_time_cap_t *);
|
|
+
|
|
+typedef efi_status_t efi_set_time_t(efi_time_t *);
|
|
+
|
|
+typedef efi_status_t efi_get_wakeup_time_t(efi_bool_t *, efi_bool_t *, efi_time_t *);
|
|
+
|
|
+typedef efi_status_t efi_set_wakeup_time_t(efi_bool_t, efi_time_t *);
|
|
+
|
|
+typedef efi_status_t efi_get_variable_t(efi_char16_t *, efi_guid_t *, u32 *, long unsigned int *, void *);
|
|
+
|
|
+typedef efi_status_t efi_get_next_variable_t(long unsigned int *, efi_char16_t *, efi_guid_t *);
|
|
+
|
|
+typedef efi_status_t efi_set_variable_t(efi_char16_t *, efi_guid_t *, u32, long unsigned int, void *);
|
|
+
|
|
+typedef efi_status_t efi_get_next_high_mono_count_t(u32 *);
|
|
+
|
|
+typedef void efi_reset_system_t(int, efi_status_t, long unsigned int, efi_char16_t *);
|
|
+
|
|
+typedef efi_status_t efi_set_virtual_address_map_t(long unsigned int, long unsigned int, u32, efi_memory_desc_t *);
|
|
+
|
|
+typedef efi_status_t efi_query_variable_info_t(u32, u64 *, u64 *, u64 *);
|
|
+
|
|
+typedef efi_status_t efi_update_capsule_t(efi_capsule_header_t **, long unsigned int, long unsigned int);
|
|
+
|
|
+typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **, long unsigned int, u64 *, int *);
|
|
+
|
|
+typedef struct {
|
|
+ efi_table_hdr_t hdr;
|
|
+ efi_get_time_t *get_time;
|
|
+ efi_set_time_t *set_time;
|
|
+ efi_get_wakeup_time_t *get_wakeup_time;
|
|
+ efi_set_wakeup_time_t *set_wakeup_time;
|
|
+ efi_set_virtual_address_map_t *set_virtual_address_map;
|
|
+ void *convert_pointer;
|
|
+ efi_get_variable_t *get_variable;
|
|
+ efi_get_next_variable_t *get_next_variable;
|
|
+ efi_set_variable_t *set_variable;
|
|
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
|
|
+ efi_reset_system_t *reset_system;
|
|
+ efi_update_capsule_t *update_capsule;
|
|
+ efi_query_capsule_caps_t *query_capsule_caps;
|
|
+ efi_query_variable_info_t *query_variable_info;
|
|
+} efi_runtime_services_t;
|
|
+
|
|
+typedef struct {
|
|
+ efi_table_hdr_t hdr;
|
|
+ long unsigned int fw_vendor;
|
|
+ u32 fw_revision;
|
|
+ long unsigned int con_in_handle;
|
|
+ long unsigned int con_in;
|
|
+ long unsigned int con_out_handle;
|
|
+ long unsigned int con_out;
|
|
+ long unsigned int stderr_handle;
|
|
+ long unsigned int stderr;
|
|
+ efi_runtime_services_t *runtime;
|
|
+ efi_boot_services_t *boottime;
|
|
+ long unsigned int nr_tables;
|
|
+ long unsigned int tables;
|
|
+} efi_system_table_t;
|
|
+
|
|
+struct efi_memory_map {
|
|
+ phys_addr_t phys_map;
|
|
+ void *map;
|
|
+ void *map_end;
|
|
+ int nr_map;
|
|
+ long unsigned int desc_version;
|
|
+ long unsigned int desc_size;
|
|
+ bool late;
|
|
+};
|
|
+
|
|
+struct efi {
|
|
+ efi_system_table_t *systab;
|
|
+ unsigned int runtime_version;
|
|
+ long unsigned int mps;
|
|
+ long unsigned int acpi;
|
|
+ long unsigned int acpi20;
|
|
+ long unsigned int smbios;
|
|
+ long unsigned int smbios3;
|
|
+ long unsigned int sal_systab;
|
|
+ long unsigned int boot_info;
|
|
+ long unsigned int hcdp;
|
|
+ long unsigned int uga;
|
|
+ long unsigned int uv_systab;
|
|
+ long unsigned int fw_vendor;
|
|
+ long unsigned int runtime;
|
|
+ long unsigned int config_table;
|
|
+ long unsigned int esrt;
|
|
+ long unsigned int properties_table;
|
|
+ long unsigned int mem_attr_table;
|
|
+ long unsigned int rng_seed;
|
|
+ long unsigned int tpm_log;
|
|
+ long unsigned int mem_reserve;
|
|
+ efi_get_time_t *get_time;
|
|
+ efi_set_time_t *set_time;
|
|
+ efi_get_wakeup_time_t *get_wakeup_time;
|
|
+ efi_set_wakeup_time_t *set_wakeup_time;
|
|
+ efi_get_variable_t *get_variable;
|
|
+ efi_get_next_variable_t *get_next_variable;
|
|
+ efi_set_variable_t *set_variable;
|
|
+ efi_set_variable_t *set_variable_nonblocking;
|
|
+ efi_query_variable_info_t *query_variable_info;
|
|
+ efi_query_variable_info_t *query_variable_info_nonblocking;
|
|
+ efi_update_capsule_t *update_capsule;
|
|
+ efi_query_capsule_caps_t *query_capsule_caps;
|
|
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
|
|
+ efi_reset_system_t *reset_system;
|
|
+ efi_set_virtual_address_map_t *set_virtual_address_map;
|
|
+ struct efi_memory_map memmap;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct property {
|
|
+ char *name;
|
|
+ int length;
|
|
+ void *value;
|
|
+ struct property *next;
|
|
+};
|
|
+
|
|
+enum memcg_stat_item {
|
|
+ MEMCG_CACHE = 28,
|
|
+ MEMCG_RSS = 29,
|
|
+ MEMCG_RSS_HUGE = 30,
|
|
+ MEMCG_SWAP = 31,
|
|
+ MEMCG_SOCK = 32,
|
|
+ MEMCG_KERNEL_STACK_KB = 33,
|
|
+ MEMCG_NR_STAT = 34,
|
|
+};
|
|
+
|
|
+enum memcg_memory_event {
|
|
+ MEMCG_LOW = 0,
|
|
+ MEMCG_HIGH = 1,
|
|
+ MEMCG_MAX = 2,
|
|
+ MEMCG_OOM = 3,
|
|
+ MEMCG_OOM_KILL = 4,
|
|
+ MEMCG_SWAP_MAX = 5,
|
|
+ MEMCG_SWAP_FAIL = 6,
|
|
+ MEMCG_NR_MEMORY_EVENTS = 7,
|
|
+};
|
|
+
|
|
+enum mem_cgroup_events_target {
|
|
+ MEM_CGROUP_TARGET_THRESH = 0,
|
|
+ MEM_CGROUP_TARGET_SOFTLIMIT = 1,
|
|
+ MEM_CGROUP_TARGET_NUMAINFO = 2,
|
|
+ MEM_CGROUP_NTARGETS = 3,
|
|
+};
|
|
+
|
|
+struct mem_cgroup_stat_cpu {
|
|
+ long int count[34];
|
|
+ long unsigned int events[85];
|
|
+ long unsigned int nr_page_events;
|
|
+ long unsigned int targets[3];
|
|
+};
|
|
+
|
|
+struct mem_cgroup_reclaim_iter {
|
|
+ struct mem_cgroup *position;
|
|
+ unsigned int generation;
|
|
+};
|
|
+
|
|
+struct lruvec_stat {
|
|
+ long int count[28];
|
|
+};
|
|
+
|
|
+struct memcg_shrinker_map {
|
|
+ struct callback_head rcu;
|
|
+ long unsigned int map[0];
|
|
+};
|
|
+
|
|
+struct mem_cgroup_per_node {
|
|
+ struct lruvec lruvec;
|
|
+ struct lruvec_stat *lruvec_stat_cpu;
|
|
+ atomic_long_t lruvec_stat[28];
|
|
+ long unsigned int lru_zone_size[25];
|
|
+ struct mem_cgroup_reclaim_iter iter[13];
|
|
+ struct memcg_shrinker_map *shrinker_map;
|
|
+ struct rb_node tree_node;
|
|
+ long unsigned int usage_in_excess;
|
|
+ bool on_tree;
|
|
+ bool congested;
|
|
+ struct mem_cgroup *memcg;
|
|
+};
|
|
+
|
|
+struct eventfd_ctx;
|
|
+
|
|
+struct mem_cgroup_threshold {
|
|
+ struct eventfd_ctx *eventfd;
|
|
+ long unsigned int threshold;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_threshold_ary {
|
|
+ int current_threshold;
|
|
+ unsigned int size;
|
|
+ struct mem_cgroup_threshold entries[0];
|
|
+};
|
|
+
|
|
+enum kgdb_bptype {
|
|
+ BP_BREAKPOINT = 0,
|
|
+ BP_HARDWARE_BREAKPOINT = 1,
|
|
+ BP_WRITE_WATCHPOINT = 2,
|
|
+ BP_READ_WATCHPOINT = 3,
|
|
+ BP_ACCESS_WATCHPOINT = 4,
|
|
+ BP_POKE_BREAKPOINT = 5,
|
|
+};
|
|
+
|
|
+struct dbg_reg_def_t {
|
|
+ char *name;
|
|
+ int size;
|
|
+ int offset;
|
|
+};
|
|
+
|
|
+struct kgdb_arch {
|
|
+ unsigned char gdb_bpt_instr[1];
|
|
+ long unsigned int flags;
|
|
+ int (*set_breakpoint)(long unsigned int, char *);
|
|
+ int (*remove_breakpoint)(long unsigned int, char *);
|
|
+ int (*set_hw_breakpoint)(long unsigned int, int, enum kgdb_bptype);
|
|
+ int (*remove_hw_breakpoint)(long unsigned int, int, enum kgdb_bptype);
|
|
+ void (*disable_hw_break)(struct pt_regs *);
|
|
+ void (*remove_all_hw_break)();
|
|
+ void (*correct_hw_break)();
|
|
+ void (*enable_nmi)(bool);
|
|
+};
|
|
+
|
|
+struct kgdb_io {
|
|
+ const char *name;
|
|
+ int (*read_char)();
|
|
+ void (*write_char)(u8);
|
|
+ void (*flush)();
|
|
+ int (*init)();
|
|
+ void (*pre_exception)();
|
|
+ void (*post_exception)();
|
|
+ int is_console;
|
|
+};
|
|
+
|
|
+struct percpu_cluster {
|
|
+ struct swap_cluster_info index;
|
|
+ unsigned int next;
|
|
+};
|
|
+
|
|
+struct disk_stats {
|
|
+ u64 nsecs[3];
|
|
+ long unsigned int sectors[3];
|
|
+ long unsigned int ios[3];
|
|
+ long unsigned int merges[3];
|
|
+ long unsigned int io_ticks;
|
|
+ long unsigned int time_in_queue;
|
|
+};
|
|
+
|
|
+struct partition_meta_info {
|
|
+ char uuid[37];
|
|
+ u8 volname[64];
|
|
+};
|
|
+
|
|
+struct disk_part_tbl {
|
|
+ struct callback_head callback_head;
|
|
+ int len;
|
|
+ struct hd_struct *last_lookup;
|
|
+ struct hd_struct *part[0];
|
|
+};
|
|
+
|
|
+struct blk_integrity_iter;
|
|
+
|
|
+typedef blk_status_t integrity_processing_fn(struct blk_integrity_iter *);
|
|
+
|
|
+struct blk_integrity_profile {
|
|
+ integrity_processing_fn *generate_fn;
|
|
+ integrity_processing_fn *verify_fn;
|
|
+ const char *name;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct hd_geometry;
|
|
+
|
|
+struct pr_ops;
|
|
+
|
|
+struct block_device_operations {
|
|
+ int (*open)(struct block_device *, fmode_t);
|
|
+ void (*release)(struct gendisk *, fmode_t);
|
|
+ int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
|
|
+ int (*ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int);
|
|
+ int (*compat_ioctl)(struct block_device *, fmode_t, unsigned int, long unsigned int);
|
|
+ unsigned int (*check_events)(struct gendisk *, unsigned int);
|
|
+ int (*media_changed)(struct gendisk *);
|
|
+ void (*unlock_native_capacity)(struct gendisk *);
|
|
+ int (*revalidate_disk)(struct gendisk *);
|
|
+ int (*getgeo)(struct block_device *, struct hd_geometry *);
|
|
+ void (*swap_slot_free_notify)(struct block_device *, long unsigned int);
|
|
+ struct module *owner;
|
|
+ const struct pr_ops *pr_ops;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct sg_io_v4 {
|
|
+ __s32 guard;
|
|
+ __u32 protocol;
|
|
+ __u32 subprotocol;
|
|
+ __u32 request_len;
|
|
+ __u64 request;
|
|
+ __u64 request_tag;
|
|
+ __u32 request_attr;
|
|
+ __u32 request_priority;
|
|
+ __u32 request_extra;
|
|
+ __u32 max_response_len;
|
|
+ __u64 response;
|
|
+ __u32 dout_iovec_count;
|
|
+ __u32 dout_xfer_len;
|
|
+ __u32 din_iovec_count;
|
|
+ __u32 din_xfer_len;
|
|
+ __u64 dout_xferp;
|
|
+ __u64 din_xferp;
|
|
+ __u32 timeout;
|
|
+ __u32 flags;
|
|
+ __u64 usr_ptr;
|
|
+ __u32 spare_in;
|
|
+ __u32 driver_status;
|
|
+ __u32 transport_status;
|
|
+ __u32 device_status;
|
|
+ __u32 retry_delay;
|
|
+ __u32 info;
|
|
+ __u32 duration;
|
|
+ __u32 response_len;
|
|
+ __s32 din_resid;
|
|
+ __s32 dout_resid;
|
|
+ __u64 generated_tag;
|
|
+ __u32 spare_out;
|
|
+ __u32 padding;
|
|
+};
|
|
+
|
|
+struct bsg_ops {
|
|
+ int (*check_proto)(struct sg_io_v4 *);
|
|
+ int (*fill_hdr)(struct request *, struct sg_io_v4 *, fmode_t);
|
|
+ int (*complete_rq)(struct request *, struct sg_io_v4 *);
|
|
+ void (*free_rq)(struct request *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+typedef __u32 req_flags_t;
|
|
+
|
|
+typedef void rq_end_io_fn(struct request *, blk_status_t);
|
|
+
|
|
+enum mq_rq_state {
|
|
+ MQ_RQ_IDLE = 0,
|
|
+ MQ_RQ_IN_FLIGHT = 1,
|
|
+ MQ_RQ_COMPLETE = 2,
|
|
+};
|
|
+
|
|
+struct request {
|
|
+ struct request_queue *q;
|
|
+ struct blk_mq_ctx *mq_ctx;
|
|
+ int cpu;
|
|
+ unsigned int cmd_flags;
|
|
+ req_flags_t rq_flags;
|
|
+ int internal_tag;
|
|
+ unsigned int __data_len;
|
|
+ int tag;
|
|
+ sector_t __sector;
|
|
+ struct bio *bio;
|
|
+ struct bio *biotail;
|
|
+ struct list_head queuelist;
|
|
+ union {
|
|
+ struct hlist_node hash;
|
|
+ struct list_head ipi_list;
|
|
+ };
|
|
+ union {
|
|
+ struct rb_node rb_node;
|
|
+ struct bio_vec special_vec;
|
|
+ void *completion_data;
|
|
+ int error_count;
|
|
+ };
|
|
+ union {
|
|
+ struct {
|
|
+ struct io_cq *icq;
|
|
+ void *priv[2];
|
|
+ } elv;
|
|
+ struct {
|
|
+ unsigned int seq;
|
|
+ struct list_head list;
|
|
+ rq_end_io_fn *saved_end_io;
|
|
+ } flush;
|
|
+ };
|
|
+ struct gendisk *rq_disk;
|
|
+ struct hd_struct *part;
|
|
+ u64 start_time_ns;
|
|
+ u64 io_start_time_ns;
|
|
+ short unsigned int wbt_flags;
|
|
+ short unsigned int nr_phys_segments;
|
|
+ short unsigned int nr_integrity_segments;
|
|
+ short unsigned int write_hint;
|
|
+ short unsigned int ioprio;
|
|
+ void *special;
|
|
+ unsigned int extra_len;
|
|
+ enum mq_rq_state state;
|
|
+ refcount_t ref;
|
|
+ unsigned int timeout;
|
|
+ long unsigned int __deadline;
|
|
+ struct list_head timeout_list;
|
|
+ union {
|
|
+ struct __call_single_data csd;
|
|
+ u64 fifo_time;
|
|
+ };
|
|
+ rq_end_io_fn *end_io;
|
|
+ void *end_io_data;
|
|
+ struct request *next_rq;
|
|
+ struct request_list *rl;
|
|
+};
|
|
+
|
|
+enum elv_merge {
|
|
+ ELEVATOR_NO_MERGE = 0,
|
|
+ ELEVATOR_FRONT_MERGE = 1,
|
|
+ ELEVATOR_BACK_MERGE = 2,
|
|
+ ELEVATOR_DISCARD_MERGE = 3,
|
|
+};
|
|
+
|
|
+typedef enum elv_merge elevator_merge_fn(struct request_queue *, struct request **, struct bio *);
|
|
+
|
|
+typedef void elevator_merge_req_fn(struct request_queue *, struct request *, struct request *);
|
|
+
|
|
+typedef void elevator_merged_fn(struct request_queue *, struct request *, enum elv_merge);
|
|
+
|
|
+typedef int elevator_allow_bio_merge_fn(struct request_queue *, struct request *, struct bio *);
|
|
+
|
|
+typedef int elevator_allow_rq_merge_fn(struct request_queue *, struct request *, struct request *);
|
|
+
|
|
+typedef void elevator_bio_merged_fn(struct request_queue *, struct request *, struct bio *);
|
|
+
|
|
+typedef int elevator_dispatch_fn(struct request_queue *, int);
|
|
+
|
|
+typedef void elevator_add_req_fn(struct request_queue *, struct request *);
|
|
+
|
|
+typedef struct request *elevator_request_list_fn(struct request_queue *, struct request *);
|
|
+
|
|
+typedef void elevator_completed_req_fn(struct request_queue *, struct request *);
|
|
+
|
|
+typedef int elevator_may_queue_fn(struct request_queue *, unsigned int);
|
|
+
|
|
+typedef void elevator_init_icq_fn(struct io_cq *);
|
|
+
|
|
+typedef void elevator_exit_icq_fn(struct io_cq *);
|
|
+
|
|
+typedef int elevator_set_req_fn(struct request_queue *, struct request *, struct bio *, gfp_t);
|
|
+
|
|
+typedef void elevator_put_req_fn(struct request *);
|
|
+
|
|
+typedef void elevator_activate_req_fn(struct request_queue *, struct request *);
|
|
+
|
|
+typedef void elevator_deactivate_req_fn(struct request_queue *, struct request *);
|
|
+
|
|
+struct elevator_type;
|
|
+
|
|
+typedef int elevator_init_fn(struct request_queue *, struct elevator_type *);
|
|
+
|
|
+typedef void elevator_exit_fn(struct elevator_queue *);
|
|
+
|
|
+typedef void elevator_registered_fn(struct request_queue *);
|
|
+
|
|
+struct elevator_ops {
|
|
+ elevator_merge_fn *elevator_merge_fn;
|
|
+ elevator_merged_fn *elevator_merged_fn;
|
|
+ elevator_merge_req_fn *elevator_merge_req_fn;
|
|
+ elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
|
|
+ elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
|
|
+ elevator_bio_merged_fn *elevator_bio_merged_fn;
|
|
+ elevator_dispatch_fn *elevator_dispatch_fn;
|
|
+ elevator_add_req_fn *elevator_add_req_fn;
|
|
+ elevator_activate_req_fn *elevator_activate_req_fn;
|
|
+ elevator_deactivate_req_fn *elevator_deactivate_req_fn;
|
|
+ elevator_completed_req_fn *elevator_completed_req_fn;
|
|
+ elevator_request_list_fn *elevator_former_req_fn;
|
|
+ elevator_request_list_fn *elevator_latter_req_fn;
|
|
+ elevator_init_icq_fn *elevator_init_icq_fn;
|
|
+ elevator_exit_icq_fn *elevator_exit_icq_fn;
|
|
+ elevator_set_req_fn *elevator_set_req_fn;
|
|
+ elevator_put_req_fn *elevator_put_req_fn;
|
|
+ elevator_may_queue_fn *elevator_may_queue_fn;
|
|
+ elevator_init_fn *elevator_init_fn;
|
|
+ elevator_exit_fn *elevator_exit_fn;
|
|
+ elevator_registered_fn *elevator_registered_fn;
|
|
+};
|
|
+
|
|
+struct blk_mq_alloc_data;
|
|
+
|
|
+struct elevator_mq_ops {
|
|
+ int (*init_sched)(struct request_queue *, struct elevator_type *);
|
|
+ void (*exit_sched)(struct elevator_queue *);
|
|
+ int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
|
|
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
|
|
+ void (*depth_updated)(struct blk_mq_hw_ctx *);
|
|
+ bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
|
|
+ bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
|
|
+ int (*request_merge)(struct request_queue *, struct request **, struct bio *);
|
|
+ void (*request_merged)(struct request_queue *, struct request *, enum elv_merge);
|
|
+ void (*requests_merged)(struct request_queue *, struct request *, struct request *);
|
|
+ void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *);
|
|
+ void (*prepare_request)(struct request *, struct bio *);
|
|
+ void (*finish_request)(struct request *);
|
|
+ void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
|
|
+ struct request * (*dispatch_request)(struct blk_mq_hw_ctx *);
|
|
+ bool (*has_work)(struct blk_mq_hw_ctx *);
|
|
+ void (*completed_request)(struct request *);
|
|
+ void (*started_request)(struct request *);
|
|
+ void (*requeue_request)(struct request *);
|
|
+ struct request * (*former_request)(struct request_queue *, struct request *);
|
|
+ struct request * (*next_request)(struct request_queue *, struct request *);
|
|
+ void (*init_icq)(struct io_cq *);
|
|
+ void (*exit_icq)(struct io_cq *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct elv_fs_entry;
|
|
+
|
|
+struct blk_mq_debugfs_attr;
|
|
+
|
|
+struct elevator_type {
|
|
+ struct kmem_cache *icq_cache;
|
|
+ union {
|
|
+ struct elevator_ops sq;
|
|
+ struct elevator_mq_ops mq;
|
|
+ } ops;
|
|
+ size_t icq_size;
|
|
+ size_t icq_align;
|
|
+ struct elv_fs_entry *elevator_attrs;
|
|
+ char elevator_name[16];
|
|
+ const char *elevator_alias;
|
|
+ struct module *elevator_owner;
|
|
+ bool uses_mq;
|
|
+ const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
|
|
+ const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
|
|
+ char icq_cache_name[22];
|
|
+ struct list_head list;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct elevator_queue {
|
|
+ struct elevator_type *type;
|
|
+ void *elevator_data;
|
|
+ struct kobject kobj;
|
|
+ struct mutex sysfs_lock;
|
|
+ unsigned int registered: 1;
|
|
+ unsigned int uses_mq: 1;
|
|
+ struct hlist_head hash[64];
|
|
+};
|
|
+
|
|
+struct elv_fs_entry {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct elevator_queue *, char *);
|
|
+ ssize_t (*store)(struct elevator_queue *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct blk_mq_debugfs_attr {
|
|
+ const char *name;
|
|
+ umode_t mode;
|
|
+ int (*show)(void *, struct seq_file *);
|
|
+ ssize_t (*write)(void *, const char *, size_t, loff_t *);
|
|
+ const struct seq_operations___2 *seq_ops;
|
|
+};
|
|
+
|
|
+struct blk_queue_tag {
|
|
+ struct request **tag_index;
|
|
+ long unsigned int *tag_map;
|
|
+ int max_depth;
|
|
+ int real_max_depth;
|
|
+ atomic_t refcnt;
|
|
+ int alloc_policy;
|
|
+ int next_tag;
|
|
+};
|
|
+
|
|
+struct blk_mq_queue_data;
|
|
+
|
|
+typedef blk_status_t queue_rq_fn(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
|
|
+
|
|
+typedef bool get_budget_fn(struct blk_mq_hw_ctx *);
|
|
+
|
|
+typedef void put_budget_fn(struct blk_mq_hw_ctx *);
|
|
+
|
|
+typedef enum blk_eh_timer_return timeout_fn(struct request *, bool);
|
|
+
|
|
+typedef int poll_fn(struct blk_mq_hw_ctx *, unsigned int);
|
|
+
|
|
+typedef int init_hctx_fn(struct blk_mq_hw_ctx *, void *, unsigned int);
|
|
+
|
|
+typedef void exit_hctx_fn(struct blk_mq_hw_ctx *, unsigned int);
|
|
+
|
|
+typedef int init_request_fn(struct blk_mq_tag_set *, struct request *, unsigned int, unsigned int);
|
|
+
|
|
+typedef void exit_request_fn(struct blk_mq_tag_set *, struct request *, unsigned int);
|
|
+
|
|
+typedef void cleanup_rq_fn(struct request *);
|
|
+
|
|
+typedef int map_queues_fn(struct blk_mq_tag_set *);
|
|
+
|
|
+struct blk_mq_ops {
|
|
+ queue_rq_fn *queue_rq;
|
|
+ get_budget_fn *get_budget;
|
|
+ put_budget_fn *put_budget;
|
|
+ timeout_fn *timeout;
|
|
+ poll_fn *poll;
|
|
+ softirq_done_fn *complete;
|
|
+ init_hctx_fn *init_hctx;
|
|
+ exit_hctx_fn *exit_hctx;
|
|
+ init_request_fn *init_request;
|
|
+ exit_request_fn *exit_request;
|
|
+ void (*initialize_rq_fn)(struct request *);
|
|
+ cleanup_rq_fn *cleanup_rq;
|
|
+ map_queues_fn *map_queues;
|
|
+ void (*show_rq)(struct seq_file *, struct request *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct blk_integrity_iter {
|
|
+ void *prot_buf;
|
|
+ void *data_buf;
|
|
+ sector_t seed;
|
|
+ unsigned int data_size;
|
|
+ short unsigned int interval;
|
|
+ const char *disk_name;
|
|
+};
|
|
+
|
|
+enum pr_type {
|
|
+ PR_WRITE_EXCLUSIVE = 1,
|
|
+ PR_EXCLUSIVE_ACCESS = 2,
|
|
+ PR_WRITE_EXCLUSIVE_REG_ONLY = 3,
|
|
+ PR_EXCLUSIVE_ACCESS_REG_ONLY = 4,
|
|
+ PR_WRITE_EXCLUSIVE_ALL_REGS = 5,
|
|
+ PR_EXCLUSIVE_ACCESS_ALL_REGS = 6,
|
|
+};
|
|
+
|
|
+struct pr_ops {
|
|
+ int (*pr_register)(struct block_device *, u64, u64, u32);
|
|
+ int (*pr_reserve)(struct block_device *, u64, enum pr_type, u32);
|
|
+ int (*pr_release)(struct block_device *, u64, enum pr_type);
|
|
+ int (*pr_preempt)(struct block_device *, u64, u64, enum pr_type, bool);
|
|
+ int (*pr_clear)(struct block_device *, u64);
|
|
+};
|
|
+
|
|
+struct trace_event_raw_initcall_level {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_level;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_initcall_start {
|
|
+ struct trace_entry ent;
|
|
+ initcall_t func;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_initcall_finish {
|
|
+ struct trace_entry ent;
|
|
+ initcall_t func;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_initcall_level {
|
|
+ u32 level;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_initcall_start {};
|
|
+
|
|
+struct trace_event_data_offsets_initcall_finish {};
|
|
+
|
|
+struct blacklist_entry {
|
|
+ struct list_head next;
|
|
+ char *buf;
|
|
+};
|
|
+
|
|
+enum page_cache_mode {
|
|
+ _PAGE_CACHE_MODE_WB = 0,
|
|
+ _PAGE_CACHE_MODE_WC = 1,
|
|
+ _PAGE_CACHE_MODE_UC_MINUS = 2,
|
|
+ _PAGE_CACHE_MODE_UC = 3,
|
|
+ _PAGE_CACHE_MODE_WT = 4,
|
|
+ _PAGE_CACHE_MODE_WP = 5,
|
|
+ _PAGE_CACHE_MODE_NUM = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ UNAME26 = 131072,
|
|
+ ADDR_NO_RANDOMIZE = 262144,
|
|
+ FDPIC_FUNCPTRS = 524288,
|
|
+ MMAP_PAGE_ZERO = 1048576,
|
|
+ ADDR_COMPAT_LAYOUT = 2097152,
|
|
+ READ_IMPLIES_EXEC = 4194304,
|
|
+ ADDR_LIMIT_32BIT = 8388608,
|
|
+ SHORT_INODE = 16777216,
|
|
+ WHOLE_SECONDS = 33554432,
|
|
+ STICKY_TIMEOUTS = 67108864,
|
|
+ ADDR_LIMIT_3GB = 134217728,
|
|
+};
|
|
+
|
|
+enum tlb_infos {
|
|
+ ENTRIES = 0,
|
|
+ NR_INFO = 1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MM_FILEPAGES = 0,
|
|
+ MM_ANONPAGES = 1,
|
|
+ MM_SWAPENTS = 2,
|
|
+ MM_SHMEMPAGES = 3,
|
|
+ NR_MM_COUNTERS = 4,
|
|
+};
|
|
+
|
|
+typedef __u32 Elf32_Word;
|
|
+
|
|
+struct elf32_note {
|
|
+ Elf32_Word n_namesz;
|
|
+ Elf32_Word n_descsz;
|
|
+ Elf32_Word n_type;
|
|
+};
|
|
+
|
|
+enum hrtimer_base_type {
|
|
+ HRTIMER_BASE_MONOTONIC = 0,
|
|
+ HRTIMER_BASE_REALTIME = 1,
|
|
+ HRTIMER_BASE_BOOTTIME = 2,
|
|
+ HRTIMER_BASE_TAI = 3,
|
|
+ HRTIMER_BASE_MONOTONIC_SOFT = 4,
|
|
+ HRTIMER_BASE_REALTIME_SOFT = 5,
|
|
+ HRTIMER_BASE_BOOTTIME_SOFT = 6,
|
|
+ HRTIMER_BASE_TAI_SOFT = 7,
|
|
+ HRTIMER_MAX_CLOCK_BASES = 8,
|
|
+};
|
|
+
|
|
+enum rseq_cs_flags_bit {
|
|
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT = 0,
|
|
+ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT = 1,
|
|
+ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT = 2,
|
|
+};
|
|
+
|
|
+enum perf_event_task_context {
|
|
+ perf_invalid_context = -1,
|
|
+ perf_hw_context = 0,
|
|
+ perf_sw_context = 1,
|
|
+ perf_nr_task_contexts = 2,
|
|
+};
|
|
+
|
|
+enum rseq_event_mask_bits {
|
|
+ RSEQ_EVENT_PREEMPT_BIT = 0,
|
|
+ RSEQ_EVENT_SIGNAL_BIT = 1,
|
|
+ RSEQ_EVENT_MIGRATE_BIT = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PROC_ROOT_INO = 1,
|
|
+ PROC_IPC_INIT_INO = -268435457,
|
|
+ PROC_UTS_INIT_INO = -268435458,
|
|
+ PROC_USER_INIT_INO = -268435459,
|
|
+ PROC_PID_INIT_INO = -268435460,
|
|
+ PROC_CGROUP_INIT_INO = -268435461,
|
|
+};
|
|
+
|
|
+typedef __u16 __le16;
|
|
+
|
|
+typedef __u16 __be16;
|
|
+
|
|
+typedef __u32 __be32;
|
|
+
|
|
+typedef __u64 __be64;
|
|
+
|
|
+typedef __u32 __wsum;
|
|
+
|
|
+typedef u64 uint64_t;
|
|
+
|
|
+typedef unsigned int slab_flags_t;
|
|
+
|
|
+struct llist_head {
|
|
+ struct llist_node *first;
|
|
+};
|
|
+
|
|
+typedef struct __call_single_data call_single_data_t;
|
|
+
|
|
+struct ida {
|
|
+ struct radix_tree_root ida_rt;
|
|
+};
|
|
+
|
|
+typedef __u64 __addrpair;
|
|
+
|
|
+typedef __u32 __portpair;
|
|
+
|
|
+typedef struct {
|
|
+ struct net *net;
|
|
+} possible_net_t;
|
|
+
|
|
+struct in6_addr {
|
|
+ union {
|
|
+ __u8 u6_addr8[16];
|
|
+ __be16 u6_addr16[8];
|
|
+ __be32 u6_addr32[4];
|
|
+ } in6_u;
|
|
+};
|
|
+
|
|
+struct hlist_nulls_node {
|
|
+ struct hlist_nulls_node *next;
|
|
+ struct hlist_nulls_node **pprev;
|
|
+};
|
|
+
|
|
+struct proto;
|
|
+
|
|
+struct inet_timewait_death_row;
|
|
+
|
|
+struct sock_common {
|
|
+ union {
|
|
+ __addrpair skc_addrpair;
|
|
+ struct {
|
|
+ __be32 skc_daddr;
|
|
+ __be32 skc_rcv_saddr;
|
|
+ };
|
|
+ };
|
|
+ union {
|
|
+ unsigned int skc_hash;
|
|
+ __u16 skc_u16hashes[2];
|
|
+ };
|
|
+ union {
|
|
+ __portpair skc_portpair;
|
|
+ struct {
|
|
+ __be16 skc_dport;
|
|
+ __u16 skc_num;
|
|
+ };
|
|
+ };
|
|
+ short unsigned int skc_family;
|
|
+ volatile unsigned char skc_state;
|
|
+ unsigned char skc_reuse: 4;
|
|
+ unsigned char skc_reuseport: 1;
|
|
+ unsigned char skc_ipv6only: 1;
|
|
+ unsigned char skc_net_refcnt: 1;
|
|
+ int skc_bound_dev_if;
|
|
+ union {
|
|
+ struct hlist_node skc_bind_node;
|
|
+ struct hlist_node skc_portaddr_node;
|
|
+ };
|
|
+ struct proto *skc_prot;
|
|
+ possible_net_t skc_net;
|
|
+ struct in6_addr skc_v6_daddr;
|
|
+ struct in6_addr skc_v6_rcv_saddr;
|
|
+ atomic64_t skc_cookie;
|
|
+ union {
|
|
+ long unsigned int skc_flags;
|
|
+ struct sock *skc_listener;
|
|
+ struct inet_timewait_death_row *skc_tw_dr;
|
|
+ };
|
|
+ int skc_dontcopy_begin[0];
|
|
+ union {
|
|
+ struct hlist_node skc_node;
|
|
+ struct hlist_nulls_node skc_nulls_node;
|
|
+ };
|
|
+ short unsigned int skc_tx_queue_mapping;
|
|
+ short unsigned int skc_rx_queue_mapping;
|
|
+ union {
|
|
+ int skc_incoming_cpu;
|
|
+ u32 skc_rcv_wnd;
|
|
+ u32 skc_tw_rcv_nxt;
|
|
+ };
|
|
+ refcount_t skc_refcnt;
|
|
+ int skc_dontcopy_end[0];
|
|
+ union {
|
|
+ u32 skc_rxhash;
|
|
+ u32 skc_window_clamp;
|
|
+ u32 skc_tw_snd_nxt;
|
|
+ };
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ spinlock_t slock;
|
|
+ int owned;
|
|
+ wait_queue_head_t wq;
|
|
+} socket_lock_t;
|
|
+
|
|
+struct sk_buff;
|
|
+
|
|
+struct sk_buff_head {
|
|
+ struct sk_buff *next;
|
|
+ struct sk_buff *prev;
|
|
+ __u32 qlen;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+typedef u64 netdev_features_t;
|
|
+
|
|
+struct sock_cgroup_data {
|
|
+ union {
|
|
+ struct {
|
|
+ u8 is_data;
|
|
+ u8 padding;
|
|
+ u16 prioidx;
|
|
+ u32 classid;
|
|
+ };
|
|
+ u64 val;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct sk_filter;
|
|
+
|
|
+struct socket_wq;
|
|
+
|
|
+struct xfrm_policy;
|
|
+
|
|
+struct dst_entry;
|
|
+
|
|
+struct socket;
|
|
+
|
|
+struct net_device;
|
|
+
|
|
+struct sock_reuseport;
|
|
+
|
|
+struct sock {
|
|
+ struct sock_common __sk_common;
|
|
+ socket_lock_t sk_lock;
|
|
+ atomic_t sk_drops;
|
|
+ int sk_rcvlowat;
|
|
+ struct sk_buff_head sk_error_queue;
|
|
+ struct sk_buff_head sk_receive_queue;
|
|
+ struct {
|
|
+ atomic_t rmem_alloc;
|
|
+ int len;
|
|
+ struct sk_buff *head;
|
|
+ struct sk_buff *tail;
|
|
+ } sk_backlog;
|
|
+ int sk_forward_alloc;
|
|
+ unsigned int sk_ll_usec;
|
|
+ unsigned int sk_napi_id;
|
|
+ int sk_rcvbuf;
|
|
+ struct sk_filter *sk_filter;
|
|
+ union {
|
|
+ struct socket_wq *sk_wq;
|
|
+ struct socket_wq *sk_wq_raw;
|
|
+ };
|
|
+ struct xfrm_policy *sk_policy[2];
|
|
+ struct dst_entry *sk_rx_dst;
|
|
+ struct dst_entry *sk_dst_cache;
|
|
+ atomic_t sk_omem_alloc;
|
|
+ int sk_sndbuf;
|
|
+ int sk_wmem_queued;
|
|
+ refcount_t sk_wmem_alloc;
|
|
+ long unsigned int sk_tsq_flags;
|
|
+ union {
|
|
+ struct sk_buff *sk_send_head;
|
|
+ struct rb_root tcp_rtx_queue;
|
|
+ };
|
|
+ struct sk_buff_head sk_write_queue;
|
|
+ __s32 sk_peek_off;
|
|
+ int sk_write_pending;
|
|
+ __u32 sk_dst_pending_confirm;
|
|
+ u32 sk_pacing_status;
|
|
+ long int sk_sndtimeo;
|
|
+ struct timer_list sk_timer;
|
|
+ __u32 sk_priority;
|
|
+ __u32 sk_mark;
|
|
+ u32 sk_pacing_rate;
|
|
+ u32 sk_max_pacing_rate;
|
|
+ struct page_frag sk_frag;
|
|
+ netdev_features_t sk_route_caps;
|
|
+ netdev_features_t sk_route_nocaps;
|
|
+ netdev_features_t sk_route_forced_caps;
|
|
+ int sk_gso_type;
|
|
+ unsigned int sk_gso_max_size;
|
|
+ gfp_t sk_allocation;
|
|
+ __u32 sk_txhash;
|
|
+ unsigned int __sk_flags_offset[0];
|
|
+ unsigned int sk_padding: 1;
|
|
+ unsigned int sk_kern_sock: 1;
|
|
+ unsigned int sk_no_check_tx: 1;
|
|
+ unsigned int sk_no_check_rx: 1;
|
|
+ unsigned int sk_userlocks: 4;
|
|
+ unsigned int sk_protocol: 8;
|
|
+ unsigned int sk_type: 16;
|
|
+ u16 sk_gso_max_segs;
|
|
+ u8 sk_pacing_shift;
|
|
+ long unsigned int sk_lingertime;
|
|
+ struct proto *sk_prot_creator;
|
|
+ rwlock_t sk_callback_lock;
|
|
+ int sk_err;
|
|
+ int sk_err_soft;
|
|
+ u32 sk_ack_backlog;
|
|
+ u32 sk_max_ack_backlog;
|
|
+ kuid_t sk_uid;
|
|
+ struct pid *sk_peer_pid;
|
|
+ const struct cred *sk_peer_cred;
|
|
+ long int sk_rcvtimeo;
|
|
+ ktime_t sk_stamp;
|
|
+ u16 sk_tsflags;
|
|
+ u8 sk_shutdown;
|
|
+ u32 sk_tskey;
|
|
+ atomic_t sk_zckey;
|
|
+ u8 sk_clockid;
|
|
+ u8 sk_txtime_deadline_mode: 1;
|
|
+ u8 sk_txtime_report_errors: 1;
|
|
+ u8 sk_txtime_unused: 6;
|
|
+ struct socket *sk_socket;
|
|
+ void *sk_user_data;
|
|
+ void *sk_security;
|
|
+ struct sock_cgroup_data sk_cgrp_data;
|
|
+ struct mem_cgroup *sk_memcg;
|
|
+ void (*sk_state_change)(struct sock *);
|
|
+ void (*sk_data_ready)(struct sock *);
|
|
+ void (*sk_write_space)(struct sock *);
|
|
+ void (*sk_error_report)(struct sock *);
|
|
+ int (*sk_backlog_rcv)(struct sock *, struct sk_buff *);
|
|
+ struct sk_buff * (*sk_validate_xmit_skb)(struct sock *, struct net_device *, struct sk_buff *);
|
|
+ void (*sk_destruct)(struct sock *);
|
|
+ struct sock_reuseport *sk_reuseport_cb;
|
|
+ struct callback_head sk_rcu;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+};
|
|
+
|
|
+struct rhash_head {
|
|
+ struct rhash_head *next;
|
|
+};
|
|
+
|
|
+struct rhashtable;
|
|
+
|
|
+struct rhashtable_compare_arg {
|
|
+ struct rhashtable *ht;
|
|
+ const void *key;
|
|
+};
|
|
+
|
|
+typedef u32 (*rht_hashfn_t)(const void *, u32, u32);
|
|
+
|
|
+typedef u32 (*rht_obj_hashfn_t)(const void *, u32, u32);
|
|
+
|
|
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *, const void *);
|
|
+
|
|
+struct rhashtable_params {
|
|
+ u16 nelem_hint;
|
|
+ u16 key_len;
|
|
+ u16 key_offset;
|
|
+ u16 head_offset;
|
|
+ unsigned int max_size;
|
|
+ u16 min_size;
|
|
+ bool automatic_shrinking;
|
|
+ u8 locks_mul;
|
|
+ rht_hashfn_t hashfn;
|
|
+ rht_obj_hashfn_t obj_hashfn;
|
|
+ rht_obj_cmpfn_t obj_cmpfn;
|
|
+};
|
|
+
|
|
+struct bucket_table;
|
|
+
|
|
+struct rhashtable {
|
|
+ struct bucket_table *tbl;
|
|
+ unsigned int key_len;
|
|
+ unsigned int max_elems;
|
|
+ struct rhashtable_params p;
|
|
+ bool rhlist;
|
|
+ struct work_struct run_work;
|
|
+ struct mutex mutex;
|
|
+ spinlock_t lock;
|
|
+ atomic_t nelems;
|
|
+};
|
|
+
|
|
+struct fs_struct {
|
|
+ int users;
|
|
+ spinlock_t lock;
|
|
+ seqcount_t seq;
|
|
+ int umask;
|
|
+ int in_exec;
|
|
+ struct path root;
|
|
+ struct path pwd;
|
|
+};
|
|
+
|
|
+struct pipe_buffer;
|
|
+
|
|
+struct pipe_inode_info {
|
|
+ struct mutex mutex;
|
|
+ wait_queue_head_t wait;
|
|
+ unsigned int nrbufs;
|
|
+ unsigned int curbuf;
|
|
+ unsigned int buffers;
|
|
+ unsigned int readers;
|
|
+ unsigned int writers;
|
|
+ unsigned int files;
|
|
+ unsigned int waiting_writers;
|
|
+ unsigned int r_counter;
|
|
+ unsigned int w_counter;
|
|
+ struct page *tmp_page;
|
|
+ struct fasync_struct *fasync_readers;
|
|
+ struct fasync_struct *fasync_writers;
|
|
+ struct pipe_buffer *bufs;
|
|
+ struct user_struct *user;
|
|
+};
|
|
+
|
|
+typedef short unsigned int __kernel_sa_family_t;
|
|
+
|
|
+struct __kernel_sockaddr_storage {
|
|
+ __kernel_sa_family_t ss_family;
|
|
+ char __data[126];
|
|
+};
|
|
+
|
|
+typedef __kernel_sa_family_t sa_family_t;
|
|
+
|
|
+struct sockaddr {
|
|
+ sa_family_t sa_family;
|
|
+ char sa_data[14];
|
|
+};
|
|
+
|
|
+struct msghdr {
|
|
+ void *msg_name;
|
|
+ int msg_namelen;
|
|
+ struct iov_iter msg_iter;
|
|
+ void *msg_control;
|
|
+ __kernel_size_t msg_controllen;
|
|
+ unsigned int msg_flags;
|
|
+ struct kiocb *msg_iocb;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int clock_rate;
|
|
+ unsigned int clock_type;
|
|
+ short unsigned int loopback;
|
|
+} sync_serial_settings;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int clock_rate;
|
|
+ unsigned int clock_type;
|
|
+ short unsigned int loopback;
|
|
+ unsigned int slot_map;
|
|
+} te1_settings;
|
|
+
|
|
+typedef struct {
|
|
+ short unsigned int encoding;
|
|
+ short unsigned int parity;
|
|
+} raw_hdlc_proto;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int t391;
|
|
+ unsigned int t392;
|
|
+ unsigned int n391;
|
|
+ unsigned int n392;
|
|
+ unsigned int n393;
|
|
+ short unsigned int lmi;
|
|
+ short unsigned int dce;
|
|
+} fr_proto;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int dlci;
|
|
+} fr_proto_pvc;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int dlci;
|
|
+ char master[16];
|
|
+} fr_proto_pvc_info;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int interval;
|
|
+ unsigned int timeout;
|
|
+} cisco_proto;
|
|
+
|
|
+struct ifmap {
|
|
+ long unsigned int mem_start;
|
|
+ long unsigned int mem_end;
|
|
+ short unsigned int base_addr;
|
|
+ unsigned char irq;
|
|
+ unsigned char dma;
|
|
+ unsigned char port;
|
|
+};
|
|
+
|
|
+struct if_settings {
|
|
+ unsigned int type;
|
|
+ unsigned int size;
|
|
+ union {
|
|
+ raw_hdlc_proto *raw_hdlc;
|
|
+ cisco_proto *cisco;
|
|
+ fr_proto *fr;
|
|
+ fr_proto_pvc *fr_pvc;
|
|
+ fr_proto_pvc_info *fr_pvc_info;
|
|
+ sync_serial_settings *sync;
|
|
+ te1_settings *te1;
|
|
+ } ifs_ifsu;
|
|
+};
|
|
+
|
|
+struct ifreq {
|
|
+ union {
|
|
+ char ifrn_name[16];
|
|
+ } ifr_ifrn;
|
|
+ union {
|
|
+ struct sockaddr ifru_addr;
|
|
+ struct sockaddr ifru_dstaddr;
|
|
+ struct sockaddr ifru_broadaddr;
|
|
+ struct sockaddr ifru_netmask;
|
|
+ struct sockaddr ifru_hwaddr;
|
|
+ short int ifru_flags;
|
|
+ int ifru_ivalue;
|
|
+ int ifru_mtu;
|
|
+ struct ifmap ifru_map;
|
|
+ char ifru_slave[16];
|
|
+ char ifru_newname[16];
|
|
+ void *ifru_data;
|
|
+ struct if_settings ifru_settings;
|
|
+ } ifr_ifru;
|
|
+};
|
|
+
|
|
+struct vfsmount {
|
|
+ struct dentry *mnt_root;
|
|
+ struct super_block *mnt_sb;
|
|
+ int mnt_flags;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ size_t written;
|
|
+ size_t count;
|
|
+ union {
|
|
+ char *buf;
|
|
+ void *data;
|
|
+ } arg;
|
|
+ int error;
|
|
+} read_descriptor_t;
|
|
+
|
|
+struct posix_acl_entry {
|
|
+ short int e_tag;
|
|
+ short unsigned int e_perm;
|
|
+ union {
|
|
+ kuid_t e_uid;
|
|
+ kgid_t e_gid;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct posix_acl {
|
|
+ refcount_t a_refcount;
|
|
+ struct callback_head a_rcu;
|
|
+ unsigned int a_count;
|
|
+ struct posix_acl_entry a_entries[0];
|
|
+};
|
|
+
|
|
+typedef unsigned char cc_t;
|
|
+
|
|
+typedef unsigned int speed_t;
|
|
+
|
|
+typedef unsigned int tcflag_t;
|
|
+
|
|
+struct ktermios {
|
|
+ tcflag_t c_iflag;
|
|
+ tcflag_t c_oflag;
|
|
+ tcflag_t c_cflag;
|
|
+ tcflag_t c_lflag;
|
|
+ cc_t c_line;
|
|
+ cc_t c_cc[19];
|
|
+ speed_t c_ispeed;
|
|
+ speed_t c_ospeed;
|
|
+};
|
|
+
|
|
+struct winsize {
|
|
+ short unsigned int ws_row;
|
|
+ short unsigned int ws_col;
|
|
+ short unsigned int ws_xpixel;
|
|
+ short unsigned int ws_ypixel;
|
|
+};
|
|
+
|
|
+struct termiox {
|
|
+ __u16 x_hflag;
|
|
+ __u16 x_cflag;
|
|
+ __u16 x_rflag[5];
|
|
+ __u16 x_sflag;
|
|
+};
|
|
+
|
|
+struct serial_icounter_struct;
|
|
+
|
|
+struct tty_operations {
|
|
+ struct tty_struct * (*lookup)(struct tty_driver *, struct file *, int);
|
|
+ int (*install)(struct tty_driver *, struct tty_struct *);
|
|
+ void (*remove)(struct tty_driver *, struct tty_struct *);
|
|
+ int (*open)(struct tty_struct *, struct file *);
|
|
+ void (*close)(struct tty_struct *, struct file *);
|
|
+ void (*shutdown)(struct tty_struct *);
|
|
+ void (*cleanup)(struct tty_struct *);
|
|
+ int (*write)(struct tty_struct *, const unsigned char *, int);
|
|
+ int (*put_char)(struct tty_struct *, unsigned char);
|
|
+ void (*flush_chars)(struct tty_struct *);
|
|
+ int (*write_room)(struct tty_struct *);
|
|
+ int (*chars_in_buffer)(struct tty_struct *);
|
|
+ int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int);
|
|
+ long int (*compat_ioctl)(struct tty_struct *, unsigned int, long unsigned int);
|
|
+ void (*set_termios)(struct tty_struct *, struct ktermios *);
|
|
+ void (*throttle)(struct tty_struct *);
|
|
+ void (*unthrottle)(struct tty_struct *);
|
|
+ void (*stop)(struct tty_struct *);
|
|
+ void (*start)(struct tty_struct *);
|
|
+ void (*hangup)(struct tty_struct *);
|
|
+ int (*break_ctl)(struct tty_struct *, int);
|
|
+ void (*flush_buffer)(struct tty_struct *);
|
|
+ void (*set_ldisc)(struct tty_struct *);
|
|
+ void (*wait_until_sent)(struct tty_struct *, int);
|
|
+ void (*send_xchar)(struct tty_struct *, char);
|
|
+ int (*tiocmget)(struct tty_struct *);
|
|
+ int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int);
|
|
+ int (*resize)(struct tty_struct *, struct winsize *);
|
|
+ int (*set_termiox)(struct tty_struct *, struct termiox *);
|
|
+ int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *);
|
|
+ void (*show_fdinfo)(struct tty_struct *, struct seq_file *);
|
|
+ int (*poll_init)(struct tty_driver *, int, char *);
|
|
+ int (*poll_get_char)(struct tty_driver *, int);
|
|
+ void (*poll_put_char)(struct tty_driver *, int, char);
|
|
+ int (*proc_show)(struct seq_file *, void *);
|
|
+};
|
|
+
|
|
+struct ld_semaphore {
|
|
+ atomic_long_t count;
|
|
+ raw_spinlock_t wait_lock;
|
|
+ unsigned int wait_readers;
|
|
+ struct list_head read_wait;
|
|
+ struct list_head write_wait;
|
|
+};
|
|
+
|
|
+struct tty_ldisc;
|
|
+
|
|
+struct tty_port;
|
|
+
|
|
+struct tty_struct {
|
|
+ int magic;
|
|
+ struct kref kref;
|
|
+ struct device *dev;
|
|
+ struct tty_driver *driver;
|
|
+ const struct tty_operations *ops;
|
|
+ int index;
|
|
+ struct ld_semaphore ldisc_sem;
|
|
+ struct tty_ldisc *ldisc;
|
|
+ struct mutex atomic_write_lock;
|
|
+ struct mutex legacy_mutex;
|
|
+ struct mutex throttle_mutex;
|
|
+ struct rw_semaphore termios_rwsem;
|
|
+ struct mutex winsize_mutex;
|
|
+ spinlock_t ctrl_lock;
|
|
+ spinlock_t flow_lock;
|
|
+ struct ktermios termios;
|
|
+ struct ktermios termios_locked;
|
|
+ struct termiox *termiox;
|
|
+ char name[64];
|
|
+ struct pid *pgrp;
|
|
+ struct pid *session;
|
|
+ long unsigned int flags;
|
|
+ int count;
|
|
+ struct winsize winsize;
|
|
+ long unsigned int stopped: 1;
|
|
+ long unsigned int flow_stopped: 1;
|
|
+ int: 30;
|
|
+ long unsigned int unused: 62;
|
|
+ int hw_stopped;
|
|
+ long unsigned int ctrl_status: 8;
|
|
+ long unsigned int packet: 1;
|
|
+ int: 23;
|
|
+ long unsigned int unused_ctrl: 55;
|
|
+ unsigned int receive_room;
|
|
+ int flow_change;
|
|
+ struct tty_struct *link;
|
|
+ struct fasync_struct *fasync;
|
|
+ wait_queue_head_t write_wait;
|
|
+ wait_queue_head_t read_wait;
|
|
+ struct work_struct hangup_work;
|
|
+ void *disc_data;
|
|
+ void *driver_data;
|
|
+ spinlock_t files_lock;
|
|
+ struct list_head tty_files;
|
|
+ int closing;
|
|
+ unsigned char *write_buf;
|
|
+ int write_cnt;
|
|
+ struct work_struct SAK_work;
|
|
+ struct tty_port *port;
|
|
+};
|
|
+
|
|
+struct proc_dir_entry;
|
|
+
|
|
+struct tty_driver {
|
|
+ int magic;
|
|
+ struct kref kref;
|
|
+ struct cdev **cdevs;
|
|
+ struct module *owner;
|
|
+ const char *driver_name;
|
|
+ const char *name;
|
|
+ int name_base;
|
|
+ int major;
|
|
+ int minor_start;
|
|
+ unsigned int num;
|
|
+ short int type;
|
|
+ short int subtype;
|
|
+ struct ktermios init_termios;
|
|
+ long unsigned int flags;
|
|
+ struct proc_dir_entry *proc_entry;
|
|
+ struct tty_driver *other;
|
|
+ struct tty_struct **ttys;
|
|
+ struct tty_port **ports;
|
|
+ struct ktermios **termios;
|
|
+ void *driver_state;
|
|
+ const struct tty_operations *ops;
|
|
+ struct list_head tty_drivers;
|
|
+};
|
|
+
|
|
+struct tty_buffer {
|
|
+ union {
|
|
+ struct tty_buffer *next;
|
|
+ struct llist_node free;
|
|
+ };
|
|
+ int used;
|
|
+ int size;
|
|
+ int commit;
|
|
+ int read;
|
|
+ int flags;
|
|
+ long unsigned int data[0];
|
|
+};
|
|
+
|
|
+struct tty_bufhead {
|
|
+ struct tty_buffer *head;
|
|
+ struct work_struct work;
|
|
+ struct mutex lock;
|
|
+ atomic_t priority;
|
|
+ struct tty_buffer sentinel;
|
|
+ struct llist_head free;
|
|
+ atomic_t mem_used;
|
|
+ int mem_limit;
|
|
+ struct tty_buffer *tail;
|
|
+};
|
|
+
|
|
+struct tty_port_operations;
|
|
+
|
|
+struct tty_port_client_operations;
|
|
+
|
|
+struct tty_port {
|
|
+ struct tty_bufhead buf;
|
|
+ struct tty_struct *tty;
|
|
+ struct tty_struct *itty;
|
|
+ const struct tty_port_operations *ops;
|
|
+ const struct tty_port_client_operations *client_ops;
|
|
+ spinlock_t lock;
|
|
+ int blocked_open;
|
|
+ int count;
|
|
+ wait_queue_head_t open_wait;
|
|
+ wait_queue_head_t delta_msr_wait;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int iflags;
|
|
+ unsigned char console: 1;
|
|
+ unsigned char low_latency: 1;
|
|
+ struct mutex mutex;
|
|
+ struct mutex buf_mutex;
|
|
+ unsigned char *xmit_buf;
|
|
+ unsigned int close_delay;
|
|
+ unsigned int closing_wait;
|
|
+ int drain_delay;
|
|
+ struct kref kref;
|
|
+ void *client_data;
|
|
+};
|
|
+
|
|
+struct tty_ldisc_ops {
|
|
+ int magic;
|
|
+ char *name;
|
|
+ int num;
|
|
+ int flags;
|
|
+ int (*open)(struct tty_struct *);
|
|
+ void (*close)(struct tty_struct *);
|
|
+ void (*flush_buffer)(struct tty_struct *);
|
|
+ ssize_t (*read)(struct tty_struct *, struct file *, unsigned char *, size_t);
|
|
+ ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t);
|
|
+ int (*ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int);
|
|
+ long int (*compat_ioctl)(struct tty_struct *, struct file *, unsigned int, long unsigned int);
|
|
+ void (*set_termios)(struct tty_struct *, struct ktermios *);
|
|
+ __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *);
|
|
+ int (*hangup)(struct tty_struct *);
|
|
+ void (*receive_buf)(struct tty_struct *, const unsigned char *, char *, int);
|
|
+ void (*write_wakeup)(struct tty_struct *);
|
|
+ void (*dcd_change)(struct tty_struct *, unsigned int);
|
|
+ int (*receive_buf2)(struct tty_struct *, const unsigned char *, char *, int);
|
|
+ struct module *owner;
|
|
+ int refcount;
|
|
+};
|
|
+
|
|
+struct tty_ldisc {
|
|
+ struct tty_ldisc_ops *ops;
|
|
+ struct tty_struct *tty;
|
|
+};
|
|
+
|
|
+struct tty_port_operations {
|
|
+ int (*carrier_raised)(struct tty_port *);
|
|
+ void (*dtr_rts)(struct tty_port *, int);
|
|
+ void (*shutdown)(struct tty_port *);
|
|
+ int (*activate)(struct tty_port *, struct tty_struct *);
|
|
+ void (*destruct)(struct tty_port *);
|
|
+};
|
|
+
|
|
+struct tty_port_client_operations {
|
|
+ int (*receive_buf)(struct tty_port *, const unsigned char *, const unsigned char *, size_t);
|
|
+ void (*write_wakeup)(struct tty_port *);
|
|
+};
|
|
+
|
|
+struct prot_inuse;
|
|
+
|
|
+struct netns_core {
|
|
+ struct ctl_table_header *sysctl_hdr;
|
|
+ int sysctl_somaxconn;
|
|
+ int *sock_inuse;
|
|
+ struct prot_inuse *prot_inuse;
|
|
+};
|
|
+
|
|
+struct tcp_mib;
|
|
+
|
|
+struct ipstats_mib;
|
|
+
|
|
+struct linux_mib;
|
|
+
|
|
+struct udp_mib;
|
|
+
|
|
+struct icmp_mib;
|
|
+
|
|
+struct icmpmsg_mib;
|
|
+
|
|
+struct icmpv6_mib;
|
|
+
|
|
+struct icmpv6msg_mib;
|
|
+
|
|
+struct linux_xfrm_mib;
|
|
+
|
|
+struct netns_mib {
|
|
+ struct tcp_mib *tcp_statistics;
|
|
+ struct ipstats_mib *ip_statistics;
|
|
+ struct linux_mib *net_statistics;
|
|
+ struct udp_mib *udp_statistics;
|
|
+ struct udp_mib *udplite_statistics;
|
|
+ struct icmp_mib *icmp_statistics;
|
|
+ struct icmpmsg_mib *icmpmsg_statistics;
|
|
+ struct proc_dir_entry *proc_net_devsnmp6;
|
|
+ struct udp_mib *udp_stats_in6;
|
|
+ struct udp_mib *udplite_stats_in6;
|
|
+ struct ipstats_mib *ipv6_statistics;
|
|
+ struct icmpv6_mib *icmpv6_statistics;
|
|
+ struct icmpv6msg_mib *icmpv6msg_statistics;
|
|
+ struct linux_xfrm_mib *xfrm_statistics;
|
|
+};
|
|
+
|
|
+struct netns_packet {
|
|
+ struct mutex sklist_lock;
|
|
+ struct hlist_head sklist;
|
|
+};
|
|
+
|
|
+struct netns_unix {
|
|
+ int sysctl_max_dgram_qlen;
|
|
+ struct ctl_table_header *ctl;
|
|
+};
|
|
+
|
|
+struct inet_frags;
|
|
+
|
|
+struct netns_frags {
|
|
+ long int high_thresh;
|
|
+ long int low_thresh;
|
|
+ int timeout;
|
|
+ int max_dist;
|
|
+ struct inet_frags *f;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct rhashtable rhashtable;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ atomic_long_t mem;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct local_ports {
|
|
+ seqlock_t lock;
|
|
+ int range[2];
|
|
+ bool warned;
|
|
+};
|
|
+
|
|
+struct inet_hashinfo;
|
|
+
|
|
+struct inet_timewait_death_row {
|
|
+ atomic_t tw_count;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct inet_hashinfo *hashinfo;
|
|
+ int sysctl_max_tw_buckets;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct ping_group_range {
|
|
+ seqlock_t lock;
|
|
+ kgid_t range[2];
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ u64 key[2];
|
|
+} siphash_key_t;
|
|
+
|
|
+struct ipv4_devconf;
|
|
+
|
|
+struct ip_ra_chain;
|
|
+
|
|
+struct fib_rules_ops;
|
|
+
|
|
+struct fib_table;
|
|
+
|
|
+struct inet_peer_base;
|
|
+
|
|
+struct xt_table;
|
|
+
|
|
+struct tcp_congestion_ops;
|
|
+
|
|
+struct tcp_fastopen_context;
|
|
+
|
|
+struct fib_notifier_ops;
|
|
+
|
|
+struct netns_ipv4 {
|
|
+ struct ctl_table_header *forw_hdr;
|
|
+ struct ctl_table_header *frags_hdr;
|
|
+ struct ctl_table_header *ipv4_hdr;
|
|
+ struct ctl_table_header *route_hdr;
|
|
+ struct ctl_table_header *xfrm4_hdr;
|
|
+ struct ipv4_devconf *devconf_all;
|
|
+ struct ipv4_devconf *devconf_dflt;
|
|
+ struct ip_ra_chain *ra_chain;
|
|
+ struct mutex ra_mutex;
|
|
+ struct fib_rules_ops *rules_ops;
|
|
+ bool fib_has_custom_rules;
|
|
+ unsigned int fib_rules_require_fldissect;
|
|
+ struct fib_table *fib_main;
|
|
+ struct fib_table *fib_default;
|
|
+ bool fib_has_custom_local_routes;
|
|
+ int fib_num_tclassid_users;
|
|
+ struct hlist_head *fib_table_hash;
|
|
+ bool fib_offload_disabled;
|
|
+ struct sock *fibnl;
|
|
+ struct sock **icmp_sk;
|
|
+ struct sock *mc_autojoin_sk;
|
|
+ struct inet_peer_base *peers;
|
|
+ struct sock **tcp_sk;
|
|
+ struct netns_frags frags;
|
|
+ struct xt_table *iptable_filter;
|
|
+ struct xt_table *iptable_mangle;
|
|
+ struct xt_table *iptable_raw;
|
|
+ struct xt_table *arptable_filter;
|
|
+ struct xt_table *iptable_security;
|
|
+ struct xt_table *nat_table;
|
|
+ int sysctl_icmp_echo_ignore_all;
|
|
+ int sysctl_icmp_echo_ignore_broadcasts;
|
|
+ int sysctl_icmp_ignore_bogus_error_responses;
|
|
+ int sysctl_icmp_ratelimit;
|
|
+ int sysctl_icmp_ratemask;
|
|
+ int sysctl_icmp_errors_use_inbound_ifaddr;
|
|
+ struct local_ports ip_local_ports;
|
|
+ int sysctl_tcp_ecn;
|
|
+ int sysctl_tcp_ecn_fallback;
|
|
+ int sysctl_ip_default_ttl;
|
|
+ int sysctl_ip_no_pmtu_disc;
|
|
+ int sysctl_ip_fwd_use_pmtu;
|
|
+ int sysctl_ip_fwd_update_priority;
|
|
+ int sysctl_ip_nonlocal_bind;
|
|
+ int sysctl_ip_dynaddr;
|
|
+ int sysctl_ip_early_demux;
|
|
+ int sysctl_raw_l3mdev_accept;
|
|
+ int sysctl_tcp_early_demux;
|
|
+ int sysctl_udp_early_demux;
|
|
+ int sysctl_fwmark_reflect;
|
|
+ int sysctl_tcp_fwmark_accept;
|
|
+ int sysctl_tcp_l3mdev_accept;
|
|
+ int sysctl_tcp_mtu_probing;
|
|
+ int sysctl_tcp_base_mss;
|
|
+ int sysctl_tcp_min_snd_mss;
|
|
+ int sysctl_tcp_probe_threshold;
|
|
+ u32 sysctl_tcp_probe_interval;
|
|
+ int sysctl_tcp_keepalive_time;
|
|
+ int sysctl_tcp_keepalive_probes;
|
|
+ int sysctl_tcp_keepalive_intvl;
|
|
+ int sysctl_tcp_syn_retries;
|
|
+ int sysctl_tcp_synack_retries;
|
|
+ int sysctl_tcp_syncookies;
|
|
+ int sysctl_tcp_reordering;
|
|
+ int sysctl_tcp_retries1;
|
|
+ int sysctl_tcp_retries2;
|
|
+ int sysctl_tcp_orphan_retries;
|
|
+ int sysctl_tcp_fin_timeout;
|
|
+ unsigned int sysctl_tcp_notsent_lowat;
|
|
+ int sysctl_tcp_tw_reuse;
|
|
+ int sysctl_tcp_sack;
|
|
+ int sysctl_tcp_window_scaling;
|
|
+ int sysctl_tcp_timestamps;
|
|
+ int sysctl_tcp_early_retrans;
|
|
+ int sysctl_tcp_recovery;
|
|
+ int sysctl_tcp_thin_linear_timeouts;
|
|
+ int sysctl_tcp_slow_start_after_idle;
|
|
+ int sysctl_tcp_retrans_collapse;
|
|
+ int sysctl_tcp_stdurg;
|
|
+ int sysctl_tcp_rfc1337;
|
|
+ int sysctl_tcp_abort_on_overflow;
|
|
+ int sysctl_tcp_fack;
|
|
+ int sysctl_tcp_max_reordering;
|
|
+ int sysctl_tcp_dsack;
|
|
+ int sysctl_tcp_app_win;
|
|
+ int sysctl_tcp_adv_win_scale;
|
|
+ int sysctl_tcp_frto;
|
|
+ int sysctl_tcp_nometrics_save;
|
|
+ int sysctl_tcp_moderate_rcvbuf;
|
|
+ int sysctl_tcp_tso_win_divisor;
|
|
+ int sysctl_tcp_workaround_signed_windows;
|
|
+ int sysctl_tcp_limit_output_bytes;
|
|
+ int sysctl_tcp_challenge_ack_limit;
|
|
+ int sysctl_tcp_min_tso_segs;
|
|
+ int sysctl_tcp_min_rtt_wlen;
|
|
+ int sysctl_tcp_autocorking;
|
|
+ int sysctl_tcp_invalid_ratelimit;
|
|
+ int sysctl_tcp_pacing_ss_ratio;
|
|
+ int sysctl_tcp_pacing_ca_ratio;
|
|
+ int sysctl_tcp_wmem[3];
|
|
+ int sysctl_tcp_rmem[3];
|
|
+ int sysctl_tcp_comp_sack_nr;
|
|
+ long unsigned int sysctl_tcp_comp_sack_delay_ns;
|
|
+ long: 64;
|
|
+ struct inet_timewait_death_row tcp_death_row;
|
|
+ int sysctl_max_syn_backlog;
|
|
+ int sysctl_tcp_fastopen;
|
|
+ const struct tcp_congestion_ops *tcp_congestion_control;
|
|
+ struct tcp_fastopen_context *tcp_fastopen_ctx;
|
|
+ spinlock_t tcp_fastopen_ctx_lock;
|
|
+ unsigned int sysctl_tcp_fastopen_blackhole_timeout;
|
|
+ atomic_t tfo_active_disable_times;
|
|
+ long unsigned int tfo_active_disable_stamp;
|
|
+ int sysctl_udp_wmem_min;
|
|
+ int sysctl_udp_rmem_min;
|
|
+ int sysctl_udp_l3mdev_accept;
|
|
+ int sysctl_igmp_max_memberships;
|
|
+ int sysctl_igmp_max_msf;
|
|
+ int sysctl_igmp_llm_reports;
|
|
+ int sysctl_igmp_qrv;
|
|
+ struct ping_group_range ping_group_range;
|
|
+ atomic_t dev_addr_genid;
|
|
+ long unsigned int *sysctl_local_reserved_ports;
|
|
+ int sysctl_ip_prot_sock;
|
|
+ struct list_head mr_tables;
|
|
+ struct fib_rules_ops *mr_rules_ops;
|
|
+ int sysctl_fib_multipath_use_neigh;
|
|
+ int sysctl_fib_multipath_hash_policy;
|
|
+ struct fib_notifier_ops *notifier_ops;
|
|
+ unsigned int fib_seq;
|
|
+ struct fib_notifier_ops *ipmr_notifier_ops;
|
|
+ unsigned int ipmr_seq;
|
|
+ atomic_t rt_genid;
|
|
+ siphash_key_t ip_id_key;
|
|
+};
|
|
+
|
|
+struct netns_sysctl_ipv6 {
|
|
+ struct ctl_table_header *hdr;
|
|
+ struct ctl_table_header *route_hdr;
|
|
+ struct ctl_table_header *icmp_hdr;
|
|
+ struct ctl_table_header *frags_hdr;
|
|
+ struct ctl_table_header *xfrm6_hdr;
|
|
+ int bindv6only;
|
|
+ int flush_delay;
|
|
+ int ip6_rt_max_size;
|
|
+ int ip6_rt_gc_min_interval;
|
|
+ int ip6_rt_gc_timeout;
|
|
+ int ip6_rt_gc_interval;
|
|
+ int ip6_rt_gc_elasticity;
|
|
+ int ip6_rt_mtu_expires;
|
|
+ int ip6_rt_min_advmss;
|
|
+ int multipath_hash_policy;
|
|
+ int flowlabel_consistency;
|
|
+ int auto_flowlabels;
|
|
+ int icmpv6_time;
|
|
+ int icmpv6_echo_ignore_all;
|
|
+ int anycast_src_echo_reply;
|
|
+ int ip_nonlocal_bind;
|
|
+ int fwmark_reflect;
|
|
+ int idgen_retries;
|
|
+ int idgen_delay;
|
|
+ int flowlabel_state_ranges;
|
|
+ int flowlabel_reflect;
|
|
+ int max_dst_opts_cnt;
|
|
+ int max_hbh_opts_cnt;
|
|
+ int max_dst_opts_len;
|
|
+ int max_hbh_opts_len;
|
|
+ int seg6_flowlabel;
|
|
+};
|
|
+
|
|
+struct neighbour;
|
|
+
|
|
+struct dst_ops {
|
|
+ short unsigned int family;
|
|
+ unsigned int gc_thresh;
|
|
+ int (*gc)(struct dst_ops *);
|
|
+ struct dst_entry * (*check)(struct dst_entry *, __u32);
|
|
+ unsigned int (*default_advmss)(const struct dst_entry *);
|
|
+ unsigned int (*mtu)(const struct dst_entry *);
|
|
+ u32 * (*cow_metrics)(struct dst_entry *, long unsigned int);
|
|
+ void (*destroy)(struct dst_entry *);
|
|
+ void (*ifdown)(struct dst_entry *, struct net_device *, int);
|
|
+ struct dst_entry * (*negative_advice)(struct dst_entry *);
|
|
+ void (*link_failure)(struct sk_buff *);
|
|
+ void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff *, u32, bool);
|
|
+ void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff *);
|
|
+ int (*local_out)(struct net *, struct sock *, struct sk_buff *);
|
|
+ struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff *, const void *);
|
|
+ void (*confirm_neigh)(const struct dst_entry *, const void *);
|
|
+ struct kmem_cache *kmem_cachep;
|
|
+ struct percpu_counter pcpuc_entries;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct ipv6_devconf;
|
|
+
|
|
+struct fib6_info;
|
|
+
|
|
+struct rt6_info;
|
|
+
|
|
+struct rt6_statistics;
|
|
+
|
|
+struct fib6_table;
|
|
+
|
|
+struct seg6_pernet_data;
|
|
+
|
|
+struct netns_ipv6 {
|
|
+ struct netns_sysctl_ipv6 sysctl;
|
|
+ struct ipv6_devconf *devconf_all;
|
|
+ struct ipv6_devconf *devconf_dflt;
|
|
+ struct inet_peer_base *peers;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_frags frags;
|
|
+ struct xt_table *ip6table_filter;
|
|
+ struct xt_table *ip6table_mangle;
|
|
+ struct xt_table *ip6table_raw;
|
|
+ struct xt_table *ip6table_security;
|
|
+ struct xt_table *ip6table_nat;
|
|
+ struct fib6_info *fib6_null_entry;
|
|
+ struct rt6_info *ip6_null_entry;
|
|
+ struct rt6_statistics *rt6_stats;
|
|
+ struct timer_list ip6_fib_timer;
|
|
+ struct hlist_head *fib_table_hash;
|
|
+ struct fib6_table *fib6_main_tbl;
|
|
+ struct list_head fib6_walkers;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct dst_ops ip6_dst_ops;
|
|
+ rwlock_t fib6_walker_lock;
|
|
+ spinlock_t fib6_gc_lock;
|
|
+ unsigned int ip6_rt_gc_expire;
|
|
+ long unsigned int ip6_rt_last_gc;
|
|
+ unsigned int fib6_rules_require_fldissect;
|
|
+ bool fib6_has_custom_rules;
|
|
+ struct rt6_info *ip6_prohibit_entry;
|
|
+ struct rt6_info *ip6_blk_hole_entry;
|
|
+ struct fib6_table *fib6_local_tbl;
|
|
+ struct fib_rules_ops *fib6_rules_ops;
|
|
+ struct sock **icmp_sk;
|
|
+ struct sock *ndisc_sk;
|
|
+ struct sock *tcp_sk;
|
|
+ struct sock *igmp_sk;
|
|
+ struct sock *mc_autojoin_sk;
|
|
+ struct list_head mr6_tables;
|
|
+ struct fib_rules_ops *mr6_rules_ops;
|
|
+ atomic_t dev_addr_genid;
|
|
+ atomic_t fib6_sernum;
|
|
+ struct seg6_pernet_data *seg6_data;
|
|
+ struct fib_notifier_ops *notifier_ops;
|
|
+ struct fib_notifier_ops *ip6mr_notifier_ops;
|
|
+ unsigned int ipmr_seq;
|
|
+ struct {
|
|
+ struct hlist_head head;
|
|
+ spinlock_t lock;
|
|
+ u32 seq;
|
|
+ } ip6addrlbl_table;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct netns_sysctl_lowpan {
|
|
+ struct ctl_table_header *frags_hdr;
|
|
+};
|
|
+
|
|
+struct netns_ieee802154_lowpan {
|
|
+ struct netns_sysctl_lowpan sysctl;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_frags frags;
|
|
+};
|
|
+
|
|
+struct sctp_mib;
|
|
+
|
|
+struct netns_sctp {
|
|
+ struct sctp_mib *sctp_statistics;
|
|
+ struct proc_dir_entry *proc_net_sctp;
|
|
+ struct ctl_table_header *sysctl_header;
|
|
+ struct sock *ctl_sock;
|
|
+ struct list_head local_addr_list;
|
|
+ struct list_head addr_waitq;
|
|
+ struct timer_list addr_wq_timer;
|
|
+ struct list_head auto_asconf_splist;
|
|
+ spinlock_t addr_wq_lock;
|
|
+ spinlock_t local_addr_lock;
|
|
+ unsigned int rto_initial;
|
|
+ unsigned int rto_min;
|
|
+ unsigned int rto_max;
|
|
+ int rto_alpha;
|
|
+ int rto_beta;
|
|
+ int max_burst;
|
|
+ int cookie_preserve_enable;
|
|
+ char *sctp_hmac_alg;
|
|
+ unsigned int valid_cookie_life;
|
|
+ unsigned int sack_timeout;
|
|
+ unsigned int hb_interval;
|
|
+ int max_retrans_association;
|
|
+ int max_retrans_path;
|
|
+ int max_retrans_init;
|
|
+ int pf_retrans;
|
|
+ int pf_enable;
|
|
+ int sndbuf_policy;
|
|
+ int rcvbuf_policy;
|
|
+ int default_auto_asconf;
|
|
+ int addip_enable;
|
|
+ int addip_noauth;
|
|
+ int prsctp_enable;
|
|
+ int reconf_enable;
|
|
+ int auth_enable;
|
|
+ int intl_enable;
|
|
+ int scope_policy;
|
|
+ int rwnd_upd_shift;
|
|
+ long unsigned int max_autoclose;
|
|
+};
|
|
+
|
|
+struct nf_queue_handler;
|
|
+
|
|
+struct nf_logger;
|
|
+
|
|
+struct nf_hook_entries;
|
|
+
|
|
+struct netns_nf {
|
|
+ struct proc_dir_entry *proc_netfilter;
|
|
+ const struct nf_queue_handler *queue_handler;
|
|
+ const struct nf_logger *nf_loggers[13];
|
|
+ struct ctl_table_header *nf_log_dir_header;
|
|
+ struct nf_hook_entries *hooks_ipv4[5];
|
|
+ struct nf_hook_entries *hooks_ipv6[5];
|
|
+ struct nf_hook_entries *hooks_arp[3];
|
|
+ struct nf_hook_entries *hooks_bridge[5];
|
|
+ bool defrag_ipv4;
|
|
+ bool defrag_ipv6;
|
|
+};
|
|
+
|
|
+struct ebt_table;
|
|
+
|
|
+struct netns_xt {
|
|
+ struct list_head tables[13];
|
|
+ bool notrack_deprecated_warning;
|
|
+ bool clusterip_deprecated_warning;
|
|
+ struct ebt_table *broute_table;
|
|
+ struct ebt_table *frame_filter;
|
|
+ struct ebt_table *frame_nat;
|
|
+};
|
|
+
|
|
+struct nf_ct_event_notifier;
|
|
+
|
|
+struct nf_exp_event_notifier;
|
|
+
|
|
+struct nf_proto_net {
|
|
+ struct ctl_table_header *ctl_table_header;
|
|
+ struct ctl_table *ctl_table;
|
|
+ unsigned int users;
|
|
+};
|
|
+
|
|
+struct nf_generic_net {
|
|
+ struct nf_proto_net pn;
|
|
+ unsigned int timeout;
|
|
+};
|
|
+
|
|
+struct nf_tcp_net {
|
|
+ struct nf_proto_net pn;
|
|
+ unsigned int timeouts[14];
|
|
+ unsigned int tcp_loose;
|
|
+ unsigned int tcp_be_liberal;
|
|
+ unsigned int tcp_max_retrans;
|
|
+};
|
|
+
|
|
+struct nf_udp_net {
|
|
+ struct nf_proto_net pn;
|
|
+ unsigned int timeouts[2];
|
|
+};
|
|
+
|
|
+struct nf_icmp_net {
|
|
+ struct nf_proto_net pn;
|
|
+ unsigned int timeout;
|
|
+};
|
|
+
|
|
+struct nf_dccp_net {
|
|
+ struct nf_proto_net pn;
|
|
+ int dccp_loose;
|
|
+ unsigned int dccp_timeout[10];
|
|
+};
|
|
+
|
|
+struct nf_sctp_net {
|
|
+ struct nf_proto_net pn;
|
|
+ unsigned int timeouts[10];
|
|
+};
|
|
+
|
|
+struct nf_ip_net {
|
|
+ struct nf_generic_net generic;
|
|
+ struct nf_tcp_net tcp;
|
|
+ struct nf_udp_net udp;
|
|
+ struct nf_icmp_net icmp;
|
|
+ struct nf_icmp_net icmpv6;
|
|
+ struct nf_dccp_net dccp;
|
|
+ struct nf_sctp_net sctp;
|
|
+};
|
|
+
|
|
+struct ct_pcpu;
|
|
+
|
|
+struct ip_conntrack_stat;
|
|
+
|
|
+struct netns_ct {
|
|
+ atomic_t count;
|
|
+ unsigned int expect_count;
|
|
+ struct delayed_work ecache_dwork;
|
|
+ bool ecache_dwork_pending;
|
|
+ struct ctl_table_header *sysctl_header;
|
|
+ struct ctl_table_header *acct_sysctl_header;
|
|
+ struct ctl_table_header *tstamp_sysctl_header;
|
|
+ struct ctl_table_header *event_sysctl_header;
|
|
+ struct ctl_table_header *helper_sysctl_header;
|
|
+ unsigned int sysctl_log_invalid;
|
|
+ int sysctl_events;
|
|
+ int sysctl_acct;
|
|
+ int sysctl_auto_assign_helper;
|
|
+ bool auto_assign_helper_warned;
|
|
+ int sysctl_tstamp;
|
|
+ int sysctl_checksum;
|
|
+ struct ct_pcpu *pcpu_lists;
|
|
+ struct ip_conntrack_stat *stat;
|
|
+ struct nf_ct_event_notifier *nf_conntrack_event_cb;
|
|
+ struct nf_exp_event_notifier *nf_expect_event_cb;
|
|
+ struct nf_ip_net nf_ct_proto;
|
|
+ unsigned int labels_used;
|
|
+};
|
|
+
|
|
+struct netns_nftables {
|
|
+ struct list_head tables;
|
|
+ struct list_head commit_list;
|
|
+ struct mutex commit_mutex;
|
|
+ unsigned int base_seq;
|
|
+ u8 gencursor;
|
|
+ u8 validate_state;
|
|
+};
|
|
+
|
|
+struct netns_nf_frag {
|
|
+ struct netns_frags frags;
|
|
+};
|
|
+
|
|
+struct xfrm_policy_hash {
|
|
+ struct hlist_head *table;
|
|
+ unsigned int hmask;
|
|
+ u8 dbits4;
|
|
+ u8 sbits4;
|
|
+ u8 dbits6;
|
|
+ u8 sbits6;
|
|
+};
|
|
+
|
|
+struct xfrm_policy_hthresh {
|
|
+ struct work_struct work;
|
|
+ seqlock_t lock;
|
|
+ u8 lbits4;
|
|
+ u8 rbits4;
|
|
+ u8 lbits6;
|
|
+ u8 rbits6;
|
|
+};
|
|
+
|
|
+struct netns_xfrm {
|
|
+ struct list_head state_all;
|
|
+ struct hlist_head *state_bydst;
|
|
+ struct hlist_head *state_bysrc;
|
|
+ struct hlist_head *state_byspi;
|
|
+ unsigned int state_hmask;
|
|
+ unsigned int state_num;
|
|
+ struct work_struct state_hash_work;
|
|
+ struct list_head policy_all;
|
|
+ struct hlist_head *policy_byidx;
|
|
+ unsigned int policy_idx_hmask;
|
|
+ struct hlist_head policy_inexact[3];
|
|
+ struct xfrm_policy_hash policy_bydst[3];
|
|
+ unsigned int policy_count[6];
|
|
+ struct work_struct policy_hash_work;
|
|
+ struct xfrm_policy_hthresh policy_hthresh;
|
|
+ struct sock *nlsk;
|
|
+ struct sock *nlsk_stash;
|
|
+ u32 sysctl_aevent_etime;
|
|
+ u32 sysctl_aevent_rseqth;
|
|
+ int sysctl_larval_drop;
|
|
+ u32 sysctl_acq_expires;
|
|
+ struct ctl_table_header *sysctl_hdr;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct dst_ops xfrm4_dst_ops;
|
|
+ struct dst_ops xfrm6_dst_ops;
|
|
+ spinlock_t xfrm_state_lock;
|
|
+ spinlock_t xfrm_policy_lock;
|
|
+ struct mutex xfrm_cfg_mutex;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct netns_ipvs;
|
|
+
|
|
+struct mpls_route;
|
|
+
|
|
+struct netns_mpls {
|
|
+ int ip_ttl_propagate;
|
|
+ int default_ttl;
|
|
+ size_t platform_labels;
|
|
+ struct mpls_route **platform_label;
|
|
+ struct ctl_table_header *ctl;
|
|
+};
|
|
+
|
|
+struct can_dev_rcv_lists;
|
|
+
|
|
+struct s_stats;
|
|
+
|
|
+struct s_pstats;
|
|
+
|
|
+struct netns_can {
|
|
+ struct proc_dir_entry *proc_dir;
|
|
+ struct proc_dir_entry *pde_version;
|
|
+ struct proc_dir_entry *pde_stats;
|
|
+ struct proc_dir_entry *pde_reset_stats;
|
|
+ struct proc_dir_entry *pde_rcvlist_all;
|
|
+ struct proc_dir_entry *pde_rcvlist_fil;
|
|
+ struct proc_dir_entry *pde_rcvlist_inv;
|
|
+ struct proc_dir_entry *pde_rcvlist_sff;
|
|
+ struct proc_dir_entry *pde_rcvlist_eff;
|
|
+ struct proc_dir_entry *pde_rcvlist_err;
|
|
+ struct proc_dir_entry *bcmproc_dir;
|
|
+ struct can_dev_rcv_lists *can_rx_alldev_list;
|
|
+ spinlock_t can_rcvlists_lock;
|
|
+ struct timer_list can_stattimer;
|
|
+ struct s_stats *can_stats;
|
|
+ struct s_pstats *can_pstats;
|
|
+ struct hlist_head cgw_list;
|
|
+};
|
|
+
|
|
+struct uevent_sock;
|
|
+
|
|
+struct net_generic;
|
|
+
|
|
+struct net {
|
|
+ refcount_t passive;
|
|
+ refcount_t count;
|
|
+ spinlock_t rules_mod_lock;
|
|
+ u32 hash_mix;
|
|
+ atomic64_t cookie_gen;
|
|
+ struct list_head list;
|
|
+ struct list_head exit_list;
|
|
+ struct llist_node cleanup_list;
|
|
+ struct user_namespace *user_ns;
|
|
+ struct ucounts *ucounts;
|
|
+ spinlock_t nsid_lock;
|
|
+ struct idr netns_ids;
|
|
+ struct ns_common ns;
|
|
+ struct proc_dir_entry *proc_net;
|
|
+ struct proc_dir_entry *proc_net_stat;
|
|
+ struct ctl_table_set sysctls;
|
|
+ struct sock *rtnl;
|
|
+ struct sock *genl_sock;
|
|
+ struct uevent_sock *uevent_sock;
|
|
+ struct list_head dev_base_head;
|
|
+ struct hlist_head *dev_name_head;
|
|
+ struct hlist_head *dev_index_head;
|
|
+ unsigned int dev_base_seq;
|
|
+ int ifindex;
|
|
+ unsigned int dev_unreg_count;
|
|
+ struct list_head rules_ops;
|
|
+ struct list_head fib_notifier_ops;
|
|
+ struct net_device *loopback_dev;
|
|
+ struct netns_core core;
|
|
+ struct netns_mib mib;
|
|
+ struct netns_packet packet;
|
|
+ struct netns_unix unx;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_ipv4 ipv4;
|
|
+ struct netns_ipv6 ipv6;
|
|
+ struct netns_ieee802154_lowpan ieee802154_lowpan;
|
|
+ struct netns_sctp sctp;
|
|
+ struct netns_nf nf;
|
|
+ struct netns_xt xt;
|
|
+ struct netns_ct ct;
|
|
+ struct netns_nftables nft;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_nf_frag nf_frag;
|
|
+ struct ctl_table_header *nf_frag_frags_hdr;
|
|
+ struct sock *nfnl;
|
|
+ struct sock *nfnl_stash;
|
|
+ struct list_head nfct_timeout_list;
|
|
+ struct net_generic *gen;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_xfrm xfrm;
|
|
+ struct netns_ipvs *ipvs;
|
|
+ struct netns_mpls mpls;
|
|
+ struct netns_can can;
|
|
+ struct sock *diag_nlsk;
|
|
+ atomic_t fnhe_genid;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ __PERCPU_REF_ATOMIC = 1,
|
|
+ __PERCPU_REF_DEAD = 2,
|
|
+ __PERCPU_REF_ATOMIC_DEAD = 3,
|
|
+ __PERCPU_REF_FLAG_BITS = 2,
|
|
+};
|
|
+
|
|
+struct bpf_insn {
|
|
+ __u8 code;
|
|
+ __u8 dst_reg: 4;
|
|
+ __u8 src_reg: 4;
|
|
+ __s16 off;
|
|
+ __s32 imm;
|
|
+};
|
|
+
|
|
+enum bpf_prog_type {
|
|
+ BPF_PROG_TYPE_UNSPEC = 0,
|
|
+ BPF_PROG_TYPE_SOCKET_FILTER = 1,
|
|
+ BPF_PROG_TYPE_KPROBE = 2,
|
|
+ BPF_PROG_TYPE_SCHED_CLS = 3,
|
|
+ BPF_PROG_TYPE_SCHED_ACT = 4,
|
|
+ BPF_PROG_TYPE_TRACEPOINT = 5,
|
|
+ BPF_PROG_TYPE_XDP = 6,
|
|
+ BPF_PROG_TYPE_PERF_EVENT = 7,
|
|
+ BPF_PROG_TYPE_CGROUP_SKB = 8,
|
|
+ BPF_PROG_TYPE_CGROUP_SOCK = 9,
|
|
+ BPF_PROG_TYPE_LWT_IN = 10,
|
|
+ BPF_PROG_TYPE_LWT_OUT = 11,
|
|
+ BPF_PROG_TYPE_LWT_XMIT = 12,
|
|
+ BPF_PROG_TYPE_SOCK_OPS = 13,
|
|
+ BPF_PROG_TYPE_SK_SKB = 14,
|
|
+ BPF_PROG_TYPE_CGROUP_DEVICE = 15,
|
|
+ BPF_PROG_TYPE_SK_MSG = 16,
|
|
+ BPF_PROG_TYPE_RAW_TRACEPOINT = 17,
|
|
+ BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18,
|
|
+ BPF_PROG_TYPE_LWT_SEG6LOCAL = 19,
|
|
+ BPF_PROG_TYPE_LIRC_MODE2 = 20,
|
|
+ BPF_PROG_TYPE_SK_REUSEPORT = 21,
|
|
+ BPF_PROG_TYPE_FLOW_DISSECTOR = 22,
|
|
+ BPF_PROG_TYPE_CGROUP_SYSCTL = 23,
|
|
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24,
|
|
+};
|
|
+
|
|
+enum bpf_attach_type {
|
|
+ BPF_CGROUP_INET_INGRESS = 0,
|
|
+ BPF_CGROUP_INET_EGRESS = 1,
|
|
+ BPF_CGROUP_INET_SOCK_CREATE = 2,
|
|
+ BPF_CGROUP_SOCK_OPS = 3,
|
|
+ BPF_SK_SKB_STREAM_PARSER = 4,
|
|
+ BPF_SK_SKB_STREAM_VERDICT = 5,
|
|
+ BPF_CGROUP_DEVICE = 6,
|
|
+ BPF_SK_MSG_VERDICT = 7,
|
|
+ BPF_CGROUP_INET4_BIND = 8,
|
|
+ BPF_CGROUP_INET6_BIND = 9,
|
|
+ BPF_CGROUP_INET4_CONNECT = 10,
|
|
+ BPF_CGROUP_INET6_CONNECT = 11,
|
|
+ BPF_CGROUP_INET4_POST_BIND = 12,
|
|
+ BPF_CGROUP_INET6_POST_BIND = 13,
|
|
+ BPF_CGROUP_UDP4_SENDMSG = 14,
|
|
+ BPF_CGROUP_UDP6_SENDMSG = 15,
|
|
+ BPF_LIRC_MODE2 = 16,
|
|
+ BPF_CGROUP_UDP4_RECVMSG = 19,
|
|
+ BPF_CGROUP_UDP6_RECVMSG = 20,
|
|
+ __MAX_BPF_ATTACH_TYPE = 21,
|
|
+};
|
|
+
|
|
+struct sock_filter {
|
|
+ __u16 code;
|
|
+ __u8 jt;
|
|
+ __u8 jf;
|
|
+ __u32 k;
|
|
+};
|
|
+
|
|
+struct bpf_prog_aux;
|
|
+
|
|
+struct sock_fprog_kern;
|
|
+
|
|
+struct bpf_prog {
|
|
+ u16 pages;
|
|
+ u16 jited: 1;
|
|
+ u16 jit_requested: 1;
|
|
+ u16 undo_set_mem: 1;
|
|
+ u16 gpl_compatible: 1;
|
|
+ u16 cb_access: 1;
|
|
+ u16 dst_needed: 1;
|
|
+ u16 blinded: 1;
|
|
+ u16 is_func: 1;
|
|
+ u16 kprobe_override: 1;
|
|
+ u16 has_callchain_buf: 1;
|
|
+ enum bpf_prog_type type;
|
|
+ enum bpf_attach_type expected_attach_type;
|
|
+ u32 len;
|
|
+ u32 jited_len;
|
|
+ u8 tag[8];
|
|
+ struct bpf_prog_aux *aux;
|
|
+ struct sock_fprog_kern *orig_prog;
|
|
+ unsigned int (*bpf_func)(const void *, const struct bpf_insn *);
|
|
+ union {
|
|
+ struct sock_filter insns[0];
|
|
+ struct bpf_insn insnsi[0];
|
|
+ };
|
|
+};
|
|
+
|
|
+enum pageflags {
|
|
+ PG_locked = 0,
|
|
+ PG_error = 1,
|
|
+ PG_referenced = 2,
|
|
+ PG_uptodate = 3,
|
|
+ PG_dirty = 4,
|
|
+ PG_lru = 5,
|
|
+ PG_active = 6,
|
|
+ PG_waiters = 7,
|
|
+ PG_slab = 8,
|
|
+ PG_owner_priv_1 = 9,
|
|
+ PG_arch_1 = 10,
|
|
+ PG_reserved = 11,
|
|
+ PG_private = 12,
|
|
+ PG_private_2 = 13,
|
|
+ PG_writeback = 14,
|
|
+ PG_head = 15,
|
|
+ PG_mappedtodisk = 16,
|
|
+ PG_reclaim = 17,
|
|
+ PG_swapbacked = 18,
|
|
+ PG_unevictable = 19,
|
|
+ PG_mlocked = 20,
|
|
+ PG_uncached = 21,
|
|
+ PG_hwpoison = 22,
|
|
+ PG_young = 23,
|
|
+ PG_idle = 24,
|
|
+ PG_percpu_ref = 25,
|
|
+ __NR_PAGEFLAGS = 26,
|
|
+ PG_checked = 9,
|
|
+ PG_swapcache = 9,
|
|
+ PG_fscache = 13,
|
|
+ PG_pinned = 9,
|
|
+ PG_savepinned = 4,
|
|
+ PG_foreign = 9,
|
|
+ PG_slob_free = 12,
|
|
+ PG_double_map = 13,
|
|
+ PG_isolated = 17,
|
|
+};
|
|
+
|
|
+struct blkg_rwstat {
|
|
+ struct percpu_counter cpu_cnt[5];
|
|
+ atomic64_t aux_cnt[5];
|
|
+};
|
|
+
|
|
+struct blkcg;
|
|
+
|
|
+struct blkg_policy_data;
|
|
+
|
|
+struct blkcg_gq {
|
|
+ struct request_queue *q;
|
|
+ struct list_head q_node;
|
|
+ struct hlist_node blkcg_node;
|
|
+ struct blkcg *blkcg;
|
|
+ struct bdi_writeback_congested *wb_congested;
|
|
+ struct blkcg_gq *parent;
|
|
+ struct request_list rl;
|
|
+ atomic_t refcnt;
|
|
+ bool online;
|
|
+ struct blkg_rwstat stat_bytes;
|
|
+ struct blkg_rwstat stat_ios;
|
|
+ struct blkg_policy_data *pd[5];
|
|
+ struct callback_head callback_head;
|
|
+ atomic_t use_delay;
|
|
+ atomic64_t delay_nsec;
|
|
+ atomic64_t delay_start;
|
|
+ u64 last_delay;
|
|
+ int last_use;
|
|
+};
|
|
+
|
|
+typedef int suspend_state_t;
|
|
+
|
|
+enum suspend_stat_step {
|
|
+ SUSPEND_FREEZE = 1,
|
|
+ SUSPEND_PREPARE = 2,
|
|
+ SUSPEND_SUSPEND = 3,
|
|
+ SUSPEND_SUSPEND_LATE = 4,
|
|
+ SUSPEND_SUSPEND_NOIRQ = 5,
|
|
+ SUSPEND_RESUME_NOIRQ = 6,
|
|
+ SUSPEND_RESUME_EARLY = 7,
|
|
+ SUSPEND_RESUME = 8,
|
|
+};
|
|
+
|
|
+struct suspend_stats {
|
|
+ int success;
|
|
+ int fail;
|
|
+ int failed_freeze;
|
|
+ int failed_prepare;
|
|
+ int failed_suspend;
|
|
+ int failed_suspend_late;
|
|
+ int failed_suspend_noirq;
|
|
+ int failed_resume;
|
|
+ int failed_resume_early;
|
|
+ int failed_resume_noirq;
|
|
+ int last_failed_dev;
|
|
+ char failed_devs[80];
|
|
+ int last_failed_errno;
|
|
+ int errno[2];
|
|
+ int last_failed_step;
|
|
+ enum suspend_stat_step failed_steps[2];
|
|
+};
|
|
+
|
|
+enum s2idle_states {
|
|
+ S2IDLE_STATE_NONE = 0,
|
|
+ S2IDLE_STATE_ENTER = 1,
|
|
+ S2IDLE_STATE_WAKE = 2,
|
|
+};
|
|
+
|
|
+struct pbe {
|
|
+ void *address;
|
|
+ void *orig_address;
|
|
+ struct pbe *next;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Root_NFS = 255,
|
|
+ Root_RAM0 = 1048576,
|
|
+ Root_RAM1 = 1048577,
|
|
+ Root_FD0 = 2097152,
|
|
+ Root_HDA1 = 3145729,
|
|
+ Root_HDA2 = 3145730,
|
|
+ Root_SDA1 = 8388609,
|
|
+ Root_SDA2 = 8388610,
|
|
+ Root_HDC1 = 23068673,
|
|
+ Root_SR0 = 11534336,
|
|
+};
|
|
+
|
|
+struct scatterlist {
|
|
+ long unsigned int page_link;
|
|
+ unsigned int offset;
|
|
+ unsigned int length;
|
|
+ dma_addr_t dma_address;
|
|
+ unsigned int dma_length;
|
|
+};
|
|
+
|
|
+struct sg_table {
|
|
+ struct scatterlist *sgl;
|
|
+ unsigned int nents;
|
|
+ unsigned int orig_nents;
|
|
+};
|
|
+
|
|
+struct xdr_buf {
|
|
+ struct kvec head[1];
|
|
+ struct kvec tail[1];
|
|
+ struct page **pages;
|
|
+ unsigned int page_base;
|
|
+ unsigned int page_len;
|
|
+ unsigned int flags;
|
|
+ unsigned int buflen;
|
|
+ unsigned int len;
|
|
+};
|
|
+
|
|
+typedef unsigned int sk_buff_data_t;
|
|
+
|
|
+struct sec_path;
|
|
+
|
|
+struct nf_bridge_info;
|
|
+
|
|
+struct sk_buff {
|
|
+ union {
|
|
+ struct {
|
|
+ struct sk_buff *next;
|
|
+ struct sk_buff *prev;
|
|
+ union {
|
|
+ struct net_device *dev;
|
|
+ long unsigned int dev_scratch;
|
|
+ };
|
|
+ };
|
|
+ struct rb_node rbnode;
|
|
+ struct list_head list;
|
|
+ };
|
|
+ union {
|
|
+ struct sock *sk;
|
|
+ int ip_defrag_offset;
|
|
+ };
|
|
+ union {
|
|
+ ktime_t tstamp;
|
|
+ u64 skb_mstamp;
|
|
+ };
|
|
+ char cb[48];
|
|
+ union {
|
|
+ struct {
|
|
+ long unsigned int _skb_refdst;
|
|
+ void (*destructor)(struct sk_buff *);
|
|
+ };
|
|
+ struct list_head tcp_tsorted_anchor;
|
|
+ };
|
|
+ struct sec_path *sp;
|
|
+ long unsigned int _nfct;
|
|
+ struct nf_bridge_info *nf_bridge;
|
|
+ unsigned int len;
|
|
+ unsigned int data_len;
|
|
+ __u16 mac_len;
|
|
+ __u16 hdr_len;
|
|
+ __u16 queue_mapping;
|
|
+ __u8 __cloned_offset[0];
|
|
+ __u8 cloned: 1;
|
|
+ __u8 nohdr: 1;
|
|
+ __u8 fclone: 2;
|
|
+ __u8 peeked: 1;
|
|
+ __u8 head_frag: 1;
|
|
+ __u8 xmit_more: 1;
|
|
+ __u8 pfmemalloc: 1;
|
|
+ __u32 headers_start[0];
|
|
+ __u8 __pkt_type_offset[0];
|
|
+ __u8 pkt_type: 3;
|
|
+ __u8 ignore_df: 1;
|
|
+ __u8 nf_trace: 1;
|
|
+ __u8 ip_summed: 2;
|
|
+ __u8 ooo_okay: 1;
|
|
+ __u8 l4_hash: 1;
|
|
+ __u8 sw_hash: 1;
|
|
+ __u8 wifi_acked_valid: 1;
|
|
+ __u8 wifi_acked: 1;
|
|
+ __u8 no_fcs: 1;
|
|
+ __u8 encapsulation: 1;
|
|
+ __u8 encap_hdr_csum: 1;
|
|
+ __u8 csum_valid: 1;
|
|
+ __u8 csum_complete_sw: 1;
|
|
+ __u8 csum_level: 2;
|
|
+ __u8 csum_not_inet: 1;
|
|
+ __u8 dst_pending_confirm: 1;
|
|
+ __u8 ndisc_nodetype: 2;
|
|
+ __u8 ipvs_property: 1;
|
|
+ __u8 inner_protocol_type: 1;
|
|
+ __u8 remcsum_offload: 1;
|
|
+ __u8 offload_fwd_mark: 1;
|
|
+ __u8 offload_mr_fwd_mark: 1;
|
|
+ __u8 tc_skip_classify: 1;
|
|
+ __u8 tc_at_ingress: 1;
|
|
+ __u8 tc_redirected: 1;
|
|
+ __u8 tc_from_ingress: 1;
|
|
+ __u8 decrypted: 1;
|
|
+ __u16 tc_index;
|
|
+ union {
|
|
+ __wsum csum;
|
|
+ struct {
|
|
+ __u16 csum_start;
|
|
+ __u16 csum_offset;
|
|
+ };
|
|
+ };
|
|
+ __u32 priority;
|
|
+ int skb_iif;
|
|
+ __u32 hash;
|
|
+ __be16 vlan_proto;
|
|
+ __u16 vlan_tci;
|
|
+ union {
|
|
+ unsigned int napi_id;
|
|
+ unsigned int sender_cpu;
|
|
+ };
|
|
+ __u32 secmark;
|
|
+ union {
|
|
+ __u32 mark;
|
|
+ __u32 reserved_tailroom;
|
|
+ };
|
|
+ union {
|
|
+ __be16 inner_protocol;
|
|
+ __u8 inner_ipproto;
|
|
+ };
|
|
+ __u16 inner_transport_header;
|
|
+ __u16 inner_network_header;
|
|
+ __u16 inner_mac_header;
|
|
+ __be16 protocol;
|
|
+ __u16 transport_header;
|
|
+ __u16 network_header;
|
|
+ __u16 mac_header;
|
|
+ __u32 headers_end[0];
|
|
+ sk_buff_data_t tail;
|
|
+ sk_buff_data_t end;
|
|
+ unsigned char *head;
|
|
+ unsigned char *data;
|
|
+ unsigned int truesize;
|
|
+ refcount_t users;
|
|
+};
|
|
+
|
|
+struct xdr_stream {
|
|
+ __be32 *p;
|
|
+ struct xdr_buf *buf;
|
|
+ __be32 *end;
|
|
+ struct kvec *iov;
|
|
+ struct kvec scratch;
|
|
+ struct page **page_ptr;
|
|
+ unsigned int nwords;
|
|
+};
|
|
+
|
|
+struct rpc_rqst;
|
|
+
|
|
+typedef void (*kxdreproc_t)(struct rpc_rqst *, struct xdr_stream *, const void *);
|
|
+
|
|
+struct rpc_xprt;
|
|
+
|
|
+struct rpc_task;
|
|
+
|
|
+struct rpc_cred;
|
|
+
|
|
+struct rpc_rqst {
|
|
+ struct rpc_xprt *rq_xprt;
|
|
+ struct xdr_buf rq_snd_buf;
|
|
+ struct xdr_buf rq_rcv_buf;
|
|
+ struct rpc_task *rq_task;
|
|
+ struct rpc_cred *rq_cred;
|
|
+ __be32 rq_xid;
|
|
+ int rq_cong;
|
|
+ u32 rq_seqno;
|
|
+ int rq_enc_pages_num;
|
|
+ struct page **rq_enc_pages;
|
|
+ void (*rq_release_snd_buf)(struct rpc_rqst *);
|
|
+ struct list_head rq_list;
|
|
+ void *rq_buffer;
|
|
+ size_t rq_callsize;
|
|
+ void *rq_rbuffer;
|
|
+ size_t rq_rcvsize;
|
|
+ size_t rq_xmit_bytes_sent;
|
|
+ size_t rq_reply_bytes_recvd;
|
|
+ struct xdr_buf rq_private_buf;
|
|
+ long unsigned int rq_majortimeo;
|
|
+ long unsigned int rq_timeout;
|
|
+ ktime_t rq_rtt;
|
|
+ unsigned int rq_retries;
|
|
+ unsigned int rq_connect_cookie;
|
|
+ u32 rq_bytes_sent;
|
|
+ ktime_t rq_xtime;
|
|
+ int rq_ntrans;
|
|
+ struct list_head rq_bc_list;
|
|
+ long unsigned int rq_bc_pa_state;
|
|
+ struct list_head rq_bc_pa_list;
|
|
+};
|
|
+
|
|
+typedef int (*kxdrdproc_t)(struct rpc_rqst *, struct xdr_stream *, void *);
|
|
+
|
|
+struct rpc_procinfo;
|
|
+
|
|
+struct rpc_message {
|
|
+ const struct rpc_procinfo *rpc_proc;
|
|
+ void *rpc_argp;
|
|
+ void *rpc_resp;
|
|
+ struct rpc_cred *rpc_cred;
|
|
+};
|
|
+
|
|
+struct rpc_procinfo {
|
|
+ u32 p_proc;
|
|
+ kxdreproc_t p_encode;
|
|
+ kxdrdproc_t p_decode;
|
|
+ unsigned int p_arglen;
|
|
+ unsigned int p_replen;
|
|
+ unsigned int p_timer;
|
|
+ u32 p_statidx;
|
|
+ const char *p_name;
|
|
+};
|
|
+
|
|
+struct rpc_auth;
|
|
+
|
|
+struct rpc_credops;
|
|
+
|
|
+struct rpc_cred {
|
|
+ struct hlist_node cr_hash;
|
|
+ struct list_head cr_lru;
|
|
+ struct callback_head cr_rcu;
|
|
+ struct rpc_auth *cr_auth;
|
|
+ const struct rpc_credops *cr_ops;
|
|
+ long unsigned int cr_expire;
|
|
+ long unsigned int cr_flags;
|
|
+ atomic_t cr_count;
|
|
+ kuid_t cr_uid;
|
|
+};
|
|
+
|
|
+struct rpc_wait {
|
|
+ struct list_head list;
|
|
+ struct list_head links;
|
|
+ struct list_head timer_list;
|
|
+ long unsigned int expires;
|
|
+};
|
|
+
|
|
+struct rpc_wait_queue;
|
|
+
|
|
+struct rpc_call_ops;
|
|
+
|
|
+struct rpc_clnt;
|
|
+
|
|
+struct rpc_task {
|
|
+ atomic_t tk_count;
|
|
+ int tk_status;
|
|
+ struct list_head tk_task;
|
|
+ void (*tk_callback)(struct rpc_task *);
|
|
+ void (*tk_action)(struct rpc_task *);
|
|
+ long unsigned int tk_timeout;
|
|
+ long unsigned int tk_runstate;
|
|
+ struct rpc_wait_queue *tk_waitqueue;
|
|
+ union {
|
|
+ struct work_struct tk_work;
|
|
+ struct rpc_wait tk_wait;
|
|
+ } u;
|
|
+ struct rpc_message tk_msg;
|
|
+ void *tk_calldata;
|
|
+ const struct rpc_call_ops *tk_ops;
|
|
+ struct rpc_clnt *tk_client;
|
|
+ struct rpc_xprt *tk_xprt;
|
|
+ struct rpc_rqst *tk_rqstp;
|
|
+ struct workqueue_struct *tk_workqueue;
|
|
+ ktime_t tk_start;
|
|
+ pid_t tk_owner;
|
|
+ short unsigned int tk_flags;
|
|
+ short unsigned int tk_timeouts;
|
|
+ short unsigned int tk_pid;
|
|
+ unsigned char tk_priority: 2;
|
|
+ unsigned char tk_garb_retry: 2;
|
|
+ unsigned char tk_cred_retry: 2;
|
|
+ unsigned char tk_rebind_retry: 2;
|
|
+};
|
|
+
|
|
+struct rpc_timer {
|
|
+ struct timer_list timer;
|
|
+ struct list_head list;
|
|
+ long unsigned int expires;
|
|
+};
|
|
+
|
|
+struct rpc_wait_queue {
|
|
+ spinlock_t lock;
|
|
+ struct list_head tasks[4];
|
|
+ unsigned char maxpriority;
|
|
+ unsigned char priority;
|
|
+ unsigned char nr;
|
|
+ short unsigned int qlen;
|
|
+ struct rpc_timer timer_list;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct rpc_call_ops {
|
|
+ void (*rpc_call_prepare)(struct rpc_task *, void *);
|
|
+ void (*rpc_call_done)(struct rpc_task *, void *);
|
|
+ void (*rpc_count_stats)(struct rpc_task *, void *);
|
|
+ void (*rpc_release)(void *);
|
|
+};
|
|
+
|
|
+struct rpc_iostats;
|
|
+
|
|
+struct rpc_pipe_dir_head {
|
|
+ struct list_head pdh_entries;
|
|
+ struct dentry *pdh_dentry;
|
|
+};
|
|
+
|
|
+struct rpc_rtt {
|
|
+ long unsigned int timeo;
|
|
+ long unsigned int srtt[5];
|
|
+ long unsigned int sdrtt[5];
|
|
+ int ntimeouts[5];
|
|
+};
|
|
+
|
|
+struct rpc_timeout {
|
|
+ long unsigned int to_initval;
|
|
+ long unsigned int to_maxval;
|
|
+ long unsigned int to_increment;
|
|
+ unsigned int to_retries;
|
|
+ unsigned char to_exponential;
|
|
+};
|
|
+
|
|
+struct rpc_xprt_switch;
|
|
+
|
|
+struct rpc_xprt_iter_ops;
|
|
+
|
|
+struct rpc_xprt_iter {
|
|
+ struct rpc_xprt_switch *xpi_xpswitch;
|
|
+ struct rpc_xprt *xpi_cursor;
|
|
+ const struct rpc_xprt_iter_ops *xpi_ops;
|
|
+};
|
|
+
|
|
+struct rpc_stat;
|
|
+
|
|
+struct rpc_program;
|
|
+
|
|
+struct rpc_clnt {
|
|
+ atomic_t cl_count;
|
|
+ unsigned int cl_clid;
|
|
+ struct list_head cl_clients;
|
|
+ struct list_head cl_tasks;
|
|
+ spinlock_t cl_lock;
|
|
+ struct rpc_xprt *cl_xprt;
|
|
+ const struct rpc_procinfo *cl_procinfo;
|
|
+ u32 cl_prog;
|
|
+ u32 cl_vers;
|
|
+ u32 cl_maxproc;
|
|
+ struct rpc_auth *cl_auth;
|
|
+ struct rpc_stat *cl_stats;
|
|
+ struct rpc_iostats *cl_metrics;
|
|
+ unsigned int cl_softrtry: 1;
|
|
+ unsigned int cl_discrtry: 1;
|
|
+ unsigned int cl_noretranstimeo: 1;
|
|
+ unsigned int cl_autobind: 1;
|
|
+ unsigned int cl_chatty: 1;
|
|
+ struct rpc_rtt *cl_rtt;
|
|
+ const struct rpc_timeout *cl_timeout;
|
|
+ atomic_t cl_swapper;
|
|
+ int cl_nodelen;
|
|
+ char cl_nodename[65];
|
|
+ struct rpc_pipe_dir_head cl_pipedir_objects;
|
|
+ struct rpc_clnt *cl_parent;
|
|
+ struct rpc_rtt cl_rtt_default;
|
|
+ struct rpc_timeout cl_timeout_default;
|
|
+ const struct rpc_program *cl_program;
|
|
+ struct dentry *cl_debugfs;
|
|
+ struct rpc_xprt_iter cl_xpi;
|
|
+};
|
|
+
|
|
+struct svc_xprt;
|
|
+
|
|
+struct svc_serv;
|
|
+
|
|
+struct rpc_xprt_ops;
|
|
+
|
|
+struct rpc_xprt {
|
|
+ struct kref kref;
|
|
+ const struct rpc_xprt_ops *ops;
|
|
+ const struct rpc_timeout *timeout;
|
|
+ struct __kernel_sockaddr_storage addr;
|
|
+ size_t addrlen;
|
|
+ int prot;
|
|
+ long unsigned int cong;
|
|
+ long unsigned int cwnd;
|
|
+ size_t max_payload;
|
|
+ unsigned int tsh_size;
|
|
+ struct rpc_wait_queue binding;
|
|
+ struct rpc_wait_queue sending;
|
|
+ struct rpc_wait_queue pending;
|
|
+ struct rpc_wait_queue backlog;
|
|
+ struct list_head free;
|
|
+ unsigned int max_reqs;
|
|
+ unsigned int min_reqs;
|
|
+ unsigned int num_reqs;
|
|
+ long unsigned int state;
|
|
+ unsigned char resvport: 1;
|
|
+ unsigned char reuseport: 1;
|
|
+ atomic_t swapper;
|
|
+ unsigned int bind_index;
|
|
+ struct list_head xprt_switch;
|
|
+ long unsigned int bind_timeout;
|
|
+ long unsigned int reestablish_timeout;
|
|
+ unsigned int connect_cookie;
|
|
+ struct work_struct task_cleanup;
|
|
+ struct timer_list timer;
|
|
+ long unsigned int last_used;
|
|
+ long unsigned int idle_timeout;
|
|
+ long unsigned int connect_timeout;
|
|
+ long unsigned int max_reconnect_timeout;
|
|
+ spinlock_t transport_lock;
|
|
+ spinlock_t reserve_lock;
|
|
+ spinlock_t recv_lock;
|
|
+ u32 xid;
|
|
+ struct rpc_task *snd_task;
|
|
+ struct svc_xprt *bc_xprt;
|
|
+ struct svc_serv *bc_serv;
|
|
+ int bc_alloc_count;
|
|
+ atomic_t bc_free_slots;
|
|
+ spinlock_t bc_pa_lock;
|
|
+ struct list_head bc_pa_list;
|
|
+ struct list_head recv;
|
|
+ struct {
|
|
+ long unsigned int bind_count;
|
|
+ long unsigned int connect_count;
|
|
+ long unsigned int connect_start;
|
|
+ long unsigned int connect_time;
|
|
+ long unsigned int sends;
|
|
+ long unsigned int recvs;
|
|
+ long unsigned int bad_xids;
|
|
+ long unsigned int max_slots;
|
|
+ long long unsigned int req_u;
|
|
+ long long unsigned int bklog_u;
|
|
+ long long unsigned int sending_u;
|
|
+ long long unsigned int pending_u;
|
|
+ } stat;
|
|
+ struct net *xprt_net;
|
|
+ const char *servername;
|
|
+ const char *address_strings[6];
|
|
+ struct dentry *debugfs;
|
|
+ atomic_t inject_disconnect;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+typedef u32 rpc_authflavor_t;
|
|
+
|
|
+struct ethhdr {
|
|
+ unsigned char h_dest[6];
|
|
+ unsigned char h_source[6];
|
|
+ __be16 h_proto;
|
|
+};
|
|
+
|
|
+struct flow_dissector {
|
|
+ unsigned int used_keys;
|
|
+ short unsigned int offset[24];
|
|
+};
|
|
+
|
|
+struct flowi_tunnel {
|
|
+ __be64 tun_id;
|
|
+};
|
|
+
|
|
+struct flowi_common {
|
|
+ int flowic_oif;
|
|
+ int flowic_iif;
|
|
+ __u32 flowic_mark;
|
|
+ __u8 flowic_tos;
|
|
+ __u8 flowic_scope;
|
|
+ __u8 flowic_proto;
|
|
+ __u8 flowic_flags;
|
|
+ __u32 flowic_secid;
|
|
+ struct flowi_tunnel flowic_tun_key;
|
|
+ kuid_t flowic_uid;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+union flowi_uli {
|
|
+ struct {
|
|
+ __be16 dport;
|
|
+ __be16 sport;
|
|
+ } ports;
|
|
+ struct {
|
|
+ __u8 type;
|
|
+ __u8 code;
|
|
+ } icmpt;
|
|
+ struct {
|
|
+ __le16 dport;
|
|
+ __le16 sport;
|
|
+ } dnports;
|
|
+ __be32 spi;
|
|
+ __be32 gre_key;
|
|
+ struct {
|
|
+ __u8 type;
|
|
+ } mht;
|
|
+};
|
|
+
|
|
+struct flowi4 {
|
|
+ struct flowi_common __fl_common;
|
|
+ __be32 saddr;
|
|
+ __be32 daddr;
|
|
+ union flowi_uli uli;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct flowi6 {
|
|
+ struct flowi_common __fl_common;
|
|
+ struct in6_addr daddr;
|
|
+ struct in6_addr saddr;
|
|
+ __be32 flowlabel;
|
|
+ union flowi_uli uli;
|
|
+ __u32 mp_hash;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct flowidn {
|
|
+ struct flowi_common __fl_common;
|
|
+ __le16 daddr;
|
|
+ __le16 saddr;
|
|
+ union flowi_uli uli;
|
|
+};
|
|
+
|
|
+struct flowi {
|
|
+ union {
|
|
+ struct flowi_common __fl_common;
|
|
+ struct flowi4 ip4;
|
|
+ struct flowi6 ip6;
|
|
+ struct flowidn dn;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct ipstats_mib {
|
|
+ u64 mibs[37];
|
|
+ struct u64_stats_sync syncp;
|
|
+};
|
|
+
|
|
+struct icmp_mib {
|
|
+ long unsigned int mibs[28];
|
|
+};
|
|
+
|
|
+struct icmpmsg_mib {
|
|
+ atomic_long_t mibs[512];
|
|
+};
|
|
+
|
|
+struct icmpv6_mib {
|
|
+ long unsigned int mibs[6];
|
|
+};
|
|
+
|
|
+struct icmpv6_mib_device {
|
|
+ atomic_long_t mibs[6];
|
|
+};
|
|
+
|
|
+struct icmpv6msg_mib {
|
|
+ atomic_long_t mibs[512];
|
|
+};
|
|
+
|
|
+struct icmpv6msg_mib_device {
|
|
+ atomic_long_t mibs[512];
|
|
+};
|
|
+
|
|
+struct tcp_mib {
|
|
+ long unsigned int mibs[16];
|
|
+};
|
|
+
|
|
+struct udp_mib {
|
|
+ long unsigned int mibs[9];
|
|
+};
|
|
+
|
|
+struct linux_mib {
|
|
+ long unsigned int mibs[119];
|
|
+};
|
|
+
|
|
+struct linux_xfrm_mib {
|
|
+ long unsigned int mibs[29];
|
|
+};
|
|
+
|
|
+struct inet_frag_queue;
|
|
+
|
|
+struct inet_frags {
|
|
+ unsigned int qsize;
|
|
+ void (*constructor)(struct inet_frag_queue *, const void *);
|
|
+ void (*destructor)(struct inet_frag_queue *);
|
|
+ void (*frag_expire)(struct timer_list *);
|
|
+ struct kmem_cache *frags_cachep;
|
|
+ const char *frags_cache_name;
|
|
+ struct rhashtable_params rhash_params;
|
|
+};
|
|
+
|
|
+struct frag_v4_compare_key {
|
|
+ __be32 saddr;
|
|
+ __be32 daddr;
|
|
+ u32 user;
|
|
+ u32 vif;
|
|
+ __be16 id;
|
|
+ u16 protocol;
|
|
+};
|
|
+
|
|
+struct frag_v6_compare_key {
|
|
+ struct in6_addr saddr;
|
|
+ struct in6_addr daddr;
|
|
+ u32 user;
|
|
+ __be32 id;
|
|
+ u32 iif;
|
|
+};
|
|
+
|
|
+struct inet_frag_queue {
|
|
+ struct rhash_head node;
|
|
+ union {
|
|
+ struct frag_v4_compare_key v4;
|
|
+ struct frag_v6_compare_key v6;
|
|
+ } key;
|
|
+ struct timer_list timer;
|
|
+ spinlock_t lock;
|
|
+ refcount_t refcnt;
|
|
+ struct sk_buff *fragments;
|
|
+ struct rb_root rb_fragments;
|
|
+ struct sk_buff *fragments_tail;
|
|
+ struct sk_buff *last_run_head;
|
|
+ ktime_t stamp;
|
|
+ int len;
|
|
+ int meat;
|
|
+ __u8 flags;
|
|
+ u16 max_size;
|
|
+ struct netns_frags *net;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct fib_rule;
|
|
+
|
|
+struct fib_lookup_arg;
|
|
+
|
|
+struct fib_rule_hdr;
|
|
+
|
|
+struct nlattr;
|
|
+
|
|
+struct netlink_ext_ack;
|
|
+
|
|
+struct nla_policy;
|
|
+
|
|
+struct fib_rules_ops {
|
|
+ int family;
|
|
+ struct list_head list;
|
|
+ int rule_size;
|
|
+ int addr_size;
|
|
+ int unresolved_rules;
|
|
+ int nr_goto_rules;
|
|
+ unsigned int fib_rules_seq;
|
|
+ int (*action)(struct fib_rule *, struct flowi *, int, struct fib_lookup_arg *);
|
|
+ bool (*suppress)(struct fib_rule *, struct fib_lookup_arg *);
|
|
+ int (*match)(struct fib_rule *, struct flowi *, int);
|
|
+ int (*configure)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *, struct nlattr **, struct netlink_ext_ack *);
|
|
+ int (*delete)(struct fib_rule *);
|
|
+ int (*compare)(struct fib_rule *, struct fib_rule_hdr *, struct nlattr **);
|
|
+ int (*fill)(struct fib_rule *, struct sk_buff *, struct fib_rule_hdr *);
|
|
+ size_t (*nlmsg_payload)(struct fib_rule *);
|
|
+ void (*flush_cache)(struct fib_rules_ops *);
|
|
+ int nlgroup;
|
|
+ const struct nla_policy *policy;
|
|
+ struct list_head rules_list;
|
|
+ struct module *owner;
|
|
+ struct net *fro_net;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+enum tcp_ca_event {
|
|
+ CA_EVENT_TX_START = 0,
|
|
+ CA_EVENT_CWND_RESTART = 1,
|
|
+ CA_EVENT_COMPLETE_CWR = 2,
|
|
+ CA_EVENT_LOSS = 3,
|
|
+ CA_EVENT_ECN_NO_CE = 4,
|
|
+ CA_EVENT_ECN_IS_CE = 5,
|
|
+};
|
|
+
|
|
+struct ack_sample;
|
|
+
|
|
+struct rate_sample;
|
|
+
|
|
+union tcp_cc_info;
|
|
+
|
|
+struct tcp_congestion_ops {
|
|
+ struct list_head list;
|
|
+ u32 key;
|
|
+ u32 flags;
|
|
+ void (*init)(struct sock *);
|
|
+ void (*release)(struct sock *);
|
|
+ u32 (*ssthresh)(struct sock *);
|
|
+ void (*cong_avoid)(struct sock *, u32, u32);
|
|
+ void (*set_state)(struct sock *, u8);
|
|
+ void (*cwnd_event)(struct sock *, enum tcp_ca_event);
|
|
+ void (*in_ack_event)(struct sock *, u32);
|
|
+ u32 (*undo_cwnd)(struct sock *);
|
|
+ void (*pkts_acked)(struct sock *, const struct ack_sample *);
|
|
+ u32 (*min_tso_segs)(struct sock *);
|
|
+ u32 (*sndbuf_expand)(struct sock *);
|
|
+ void (*cong_control)(struct sock *, const struct rate_sample *);
|
|
+ size_t (*get_info)(struct sock *, u32, int *, union tcp_cc_info *);
|
|
+ char name[16];
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct fib_notifier_ops {
|
|
+ int family;
|
|
+ struct list_head list;
|
|
+ unsigned int (*fib_seq_read)(struct net *);
|
|
+ int (*fib_dump)(struct net *, struct notifier_block *);
|
|
+ struct module *owner;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct xfrm_state;
|
|
+
|
|
+struct lwtunnel_state;
|
|
+
|
|
+struct dst_entry {
|
|
+ struct net_device *dev;
|
|
+ struct dst_ops *ops;
|
|
+ long unsigned int _metrics;
|
|
+ long unsigned int expires;
|
|
+ struct xfrm_state *xfrm;
|
|
+ int (*input)(struct sk_buff *);
|
|
+ int (*output)(struct net *, struct sock *, struct sk_buff *);
|
|
+ short unsigned int flags;
|
|
+ short int obsolete;
|
|
+ short unsigned int header_len;
|
|
+ short unsigned int trailer_len;
|
|
+ atomic_t __refcnt;
|
|
+ int __use;
|
|
+ long unsigned int lastuse;
|
|
+ struct lwtunnel_state *lwtstate;
|
|
+ struct callback_head callback_head;
|
|
+ short int error;
|
|
+ short int __pad;
|
|
+ __u32 tclassid;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+};
|
|
+
|
|
+struct net_device_stats {
|
|
+ long unsigned int rx_packets;
|
|
+ long unsigned int tx_packets;
|
|
+ long unsigned int rx_bytes;
|
|
+ long unsigned int tx_bytes;
|
|
+ long unsigned int rx_errors;
|
|
+ long unsigned int tx_errors;
|
|
+ long unsigned int rx_dropped;
|
|
+ long unsigned int tx_dropped;
|
|
+ long unsigned int multicast;
|
|
+ long unsigned int collisions;
|
|
+ long unsigned int rx_length_errors;
|
|
+ long unsigned int rx_over_errors;
|
|
+ long unsigned int rx_crc_errors;
|
|
+ long unsigned int rx_frame_errors;
|
|
+ long unsigned int rx_fifo_errors;
|
|
+ long unsigned int rx_missed_errors;
|
|
+ long unsigned int tx_aborted_errors;
|
|
+ long unsigned int tx_carrier_errors;
|
|
+ long unsigned int tx_fifo_errors;
|
|
+ long unsigned int tx_heartbeat_errors;
|
|
+ long unsigned int tx_window_errors;
|
|
+ long unsigned int rx_compressed;
|
|
+ long unsigned int tx_compressed;
|
|
+};
|
|
+
|
|
+struct netdev_hw_addr_list {
|
|
+ struct list_head list;
|
|
+ int count;
|
|
+};
|
|
+
|
|
+struct tipc_bearer;
|
|
+
|
|
+struct wireless_dev;
|
|
+
|
|
+struct mpls_dev;
|
|
+
|
|
+enum rx_handler_result {
|
|
+ RX_HANDLER_CONSUMED = 0,
|
|
+ RX_HANDLER_ANOTHER = 1,
|
|
+ RX_HANDLER_EXACT = 2,
|
|
+ RX_HANDLER_PASS = 3,
|
|
+};
|
|
+
|
|
+typedef enum rx_handler_result rx_handler_result_t;
|
|
+
|
|
+typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **);
|
|
+
|
|
+struct pcpu_dstats;
|
|
+
|
|
+struct pcpu_vstats;
|
|
+
|
|
+struct garp_port;
|
|
+
|
|
+struct mrp_port;
|
|
+
|
|
+struct netdev_tc_txq {
|
|
+ u16 count;
|
|
+ u16 offset;
|
|
+};
|
|
+
|
|
+struct sfp_bus;
|
|
+
|
|
+struct dev_ifalias;
|
|
+
|
|
+struct net_device_ops;
|
|
+
|
|
+struct ethtool_ops;
|
|
+
|
|
+struct switchdev_ops;
|
|
+
|
|
+struct l3mdev_ops;
|
|
+
|
|
+struct ndisc_ops;
|
|
+
|
|
+struct xfrmdev_ops;
|
|
+
|
|
+struct tlsdev_ops;
|
|
+
|
|
+struct header_ops;
|
|
+
|
|
+struct vlan_info;
|
|
+
|
|
+struct in_device;
|
|
+
|
|
+struct inet6_dev;
|
|
+
|
|
+struct wpan_dev;
|
|
+
|
|
+struct netdev_rx_queue;
|
|
+
|
|
+struct mini_Qdisc;
|
|
+
|
|
+struct netdev_queue;
|
|
+
|
|
+struct cpu_rmap;
|
|
+
|
|
+struct Qdisc;
|
|
+
|
|
+struct xps_dev_maps;
|
|
+
|
|
+struct netpoll_info;
|
|
+
|
|
+struct pcpu_lstats;
|
|
+
|
|
+struct pcpu_sw_netstats;
|
|
+
|
|
+struct rtnl_link_ops;
|
|
+
|
|
+struct dcbnl_rtnl_ops;
|
|
+
|
|
+struct netprio_map;
|
|
+
|
|
+struct phy_device;
|
|
+
|
|
+struct net_device {
|
|
+ char name[16];
|
|
+ struct hlist_node name_hlist;
|
|
+ struct dev_ifalias *ifalias;
|
|
+ long unsigned int mem_end;
|
|
+ long unsigned int mem_start;
|
|
+ long unsigned int base_addr;
|
|
+ int irq;
|
|
+ long unsigned int state;
|
|
+ struct list_head dev_list;
|
|
+ struct list_head napi_list;
|
|
+ struct list_head unreg_list;
|
|
+ struct list_head close_list;
|
|
+ struct list_head ptype_all;
|
|
+ struct list_head ptype_specific;
|
|
+ struct {
|
|
+ struct list_head upper;
|
|
+ struct list_head lower;
|
|
+ } adj_list;
|
|
+ netdev_features_t features;
|
|
+ netdev_features_t hw_features;
|
|
+ netdev_features_t wanted_features;
|
|
+ netdev_features_t vlan_features;
|
|
+ netdev_features_t hw_enc_features;
|
|
+ netdev_features_t mpls_features;
|
|
+ netdev_features_t gso_partial_features;
|
|
+ int ifindex;
|
|
+ int group;
|
|
+ struct net_device_stats stats;
|
|
+ atomic_long_t rx_dropped;
|
|
+ atomic_long_t tx_dropped;
|
|
+ atomic_long_t rx_nohandler;
|
|
+ atomic_t carrier_up_count;
|
|
+ atomic_t carrier_down_count;
|
|
+ const struct net_device_ops *netdev_ops;
|
|
+ const struct ethtool_ops *ethtool_ops;
|
|
+ const struct switchdev_ops *switchdev_ops;
|
|
+ const struct l3mdev_ops *l3mdev_ops;
|
|
+ const struct ndisc_ops *ndisc_ops;
|
|
+ const struct xfrmdev_ops *xfrmdev_ops;
|
|
+ const struct tlsdev_ops *tlsdev_ops;
|
|
+ const struct header_ops *header_ops;
|
|
+ unsigned int flags;
|
|
+ unsigned int priv_flags;
|
|
+ short unsigned int gflags;
|
|
+ short unsigned int padded;
|
|
+ unsigned char operstate;
|
|
+ unsigned char link_mode;
|
|
+ unsigned char if_port;
|
|
+ unsigned char dma;
|
|
+ unsigned int mtu;
|
|
+ unsigned int min_mtu;
|
|
+ unsigned int max_mtu;
|
|
+ short unsigned int type;
|
|
+ short unsigned int hard_header_len;
|
|
+ unsigned char min_header_len;
|
|
+ short unsigned int needed_headroom;
|
|
+ short unsigned int needed_tailroom;
|
|
+ unsigned char perm_addr[32];
|
|
+ unsigned char addr_assign_type;
|
|
+ unsigned char addr_len;
|
|
+ unsigned char upper_level;
|
|
+ unsigned char lower_level;
|
|
+ short unsigned int neigh_priv_len;
|
|
+ short unsigned int dev_id;
|
|
+ short unsigned int dev_port;
|
|
+ spinlock_t addr_list_lock;
|
|
+ unsigned char name_assign_type;
|
|
+ bool uc_promisc;
|
|
+ struct netdev_hw_addr_list uc;
|
|
+ struct netdev_hw_addr_list mc;
|
|
+ struct netdev_hw_addr_list dev_addrs;
|
|
+ struct kset *queues_kset;
|
|
+ unsigned int promiscuity;
|
|
+ unsigned int allmulti;
|
|
+ struct vlan_info *vlan_info;
|
|
+ struct tipc_bearer *tipc_ptr;
|
|
+ struct in_device *ip_ptr;
|
|
+ struct inet6_dev *ip6_ptr;
|
|
+ struct wireless_dev *ieee80211_ptr;
|
|
+ struct wpan_dev *ieee802154_ptr;
|
|
+ struct mpls_dev *mpls_ptr;
|
|
+ unsigned char *dev_addr;
|
|
+ struct netdev_rx_queue *_rx;
|
|
+ unsigned int num_rx_queues;
|
|
+ unsigned int real_num_rx_queues;
|
|
+ struct bpf_prog *xdp_prog;
|
|
+ long unsigned int gro_flush_timeout;
|
|
+ rx_handler_func_t *rx_handler;
|
|
+ void *rx_handler_data;
|
|
+ struct mini_Qdisc *miniq_ingress;
|
|
+ struct netdev_queue *ingress_queue;
|
|
+ struct nf_hook_entries *nf_hooks_ingress;
|
|
+ unsigned char broadcast[32];
|
|
+ struct cpu_rmap *rx_cpu_rmap;
|
|
+ struct hlist_node index_hlist;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netdev_queue *_tx;
|
|
+ unsigned int num_tx_queues;
|
|
+ unsigned int real_num_tx_queues;
|
|
+ struct Qdisc *qdisc;
|
|
+ struct hlist_head qdisc_hash[16];
|
|
+ unsigned int tx_queue_len;
|
|
+ spinlock_t tx_global_lock;
|
|
+ int watchdog_timeo;
|
|
+ struct xps_dev_maps *xps_cpus_map;
|
|
+ struct xps_dev_maps *xps_rxqs_map;
|
|
+ struct mini_Qdisc *miniq_egress;
|
|
+ struct timer_list watchdog_timer;
|
|
+ int *pcpu_refcnt;
|
|
+ struct list_head todo_list;
|
|
+ struct list_head link_watch_list;
|
|
+ enum {
|
|
+ NETREG_UNINITIALIZED = 0,
|
|
+ NETREG_REGISTERED = 1,
|
|
+ NETREG_UNREGISTERING = 2,
|
|
+ NETREG_UNREGISTERED = 3,
|
|
+ NETREG_RELEASED = 4,
|
|
+ NETREG_DUMMY = 5,
|
|
+ } reg_state: 8;
|
|
+ bool dismantle;
|
|
+ enum {
|
|
+ RTNL_LINK_INITIALIZED = 0,
|
|
+ RTNL_LINK_INITIALIZING = 1,
|
|
+ } rtnl_link_state: 16;
|
|
+ bool needs_free_netdev;
|
|
+ void (*priv_destructor)(struct net_device *);
|
|
+ struct netpoll_info *npinfo;
|
|
+ possible_net_t nd_net;
|
|
+ union {
|
|
+ void *ml_priv;
|
|
+ struct pcpu_lstats *lstats;
|
|
+ struct pcpu_sw_netstats *tstats;
|
|
+ struct pcpu_dstats *dstats;
|
|
+ struct pcpu_vstats *vstats;
|
|
+ };
|
|
+ struct garp_port *garp_port;
|
|
+ struct mrp_port *mrp_port;
|
|
+ struct device dev;
|
|
+ const struct attribute_group *sysfs_groups[4];
|
|
+ const struct attribute_group *sysfs_rx_queue_group;
|
|
+ const struct rtnl_link_ops *rtnl_link_ops;
|
|
+ unsigned int gso_max_size;
|
|
+ u16 gso_max_segs;
|
|
+ const struct dcbnl_rtnl_ops *dcbnl_ops;
|
|
+ s16 num_tc;
|
|
+ struct netdev_tc_txq tc_to_txq[16];
|
|
+ u8 prio_tc_map[16];
|
|
+ unsigned int fcoe_ddp_xid;
|
|
+ struct netprio_map *priomap;
|
|
+ struct phy_device *phydev;
|
|
+ struct sfp_bus *sfp_bus;
|
|
+ struct lock_class_key *qdisc_tx_busylock;
|
|
+ struct lock_class_key *qdisc_running_key;
|
|
+ bool proto_down;
|
|
+ unsigned int wol_enabled: 1;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+ long unsigned int kabi_reserved17;
|
|
+ long unsigned int kabi_reserved18;
|
|
+ long unsigned int kabi_reserved19;
|
|
+ long unsigned int kabi_reserved20;
|
|
+ long unsigned int kabi_reserved21;
|
|
+ long unsigned int kabi_reserved22;
|
|
+ long unsigned int kabi_reserved23;
|
|
+ long unsigned int kabi_reserved24;
|
|
+ long unsigned int kabi_reserved25;
|
|
+ long unsigned int kabi_reserved26;
|
|
+ long unsigned int kabi_reserved27;
|
|
+ long unsigned int kabi_reserved28;
|
|
+ long unsigned int kabi_reserved29;
|
|
+ long unsigned int kabi_reserved30;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct hh_cache {
|
|
+ unsigned int hh_len;
|
|
+ seqlock_t hh_lock;
|
|
+ long unsigned int hh_data[12];
|
|
+};
|
|
+
|
|
+struct neigh_table;
|
|
+
|
|
+struct neigh_parms;
|
|
+
|
|
+struct neigh_ops;
|
|
+
|
|
+struct neighbour {
|
|
+ struct neighbour *next;
|
|
+ struct neigh_table *tbl;
|
|
+ struct neigh_parms *parms;
|
|
+ long unsigned int confirmed;
|
|
+ long unsigned int updated;
|
|
+ rwlock_t lock;
|
|
+ refcount_t refcnt;
|
|
+ struct sk_buff_head arp_queue;
|
|
+ unsigned int arp_queue_len_bytes;
|
|
+ struct timer_list timer;
|
|
+ long unsigned int used;
|
|
+ atomic_t probes;
|
|
+ __u8 flags;
|
|
+ __u8 nud_state;
|
|
+ __u8 type;
|
|
+ __u8 dead;
|
|
+ seqlock_t ha_lock;
|
|
+ unsigned char ha[32];
|
|
+ struct hh_cache hh;
|
|
+ int (*output)(struct neighbour *, struct sk_buff *);
|
|
+ const struct neigh_ops *ops;
|
|
+ struct callback_head rcu;
|
|
+ struct net_device *dev;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ u8 primary_key[0];
|
|
+};
|
|
+
|
|
+struct ipv6_stable_secret {
|
|
+ bool initialized;
|
|
+ struct in6_addr secret;
|
|
+};
|
|
+
|
|
+struct ipv6_devconf {
|
|
+ __s32 forwarding;
|
|
+ __s32 hop_limit;
|
|
+ __s32 mtu6;
|
|
+ __s32 accept_ra;
|
|
+ __s32 accept_redirects;
|
|
+ __s32 autoconf;
|
|
+ __s32 dad_transmits;
|
|
+ __s32 rtr_solicits;
|
|
+ __s32 rtr_solicit_interval;
|
|
+ __s32 rtr_solicit_max_interval;
|
|
+ __s32 rtr_solicit_delay;
|
|
+ __s32 force_mld_version;
|
|
+ __s32 mldv1_unsolicited_report_interval;
|
|
+ __s32 mldv2_unsolicited_report_interval;
|
|
+ __s32 use_tempaddr;
|
|
+ __s32 temp_valid_lft;
|
|
+ __s32 temp_prefered_lft;
|
|
+ __s32 regen_max_retry;
|
|
+ __s32 max_desync_factor;
|
|
+ __s32 max_addresses;
|
|
+ __s32 accept_ra_defrtr;
|
|
+ __s32 accept_ra_min_hop_limit;
|
|
+ __s32 accept_ra_pinfo;
|
|
+ __s32 ignore_routes_with_linkdown;
|
|
+ __s32 accept_ra_rtr_pref;
|
|
+ __s32 rtr_probe_interval;
|
|
+ __s32 accept_ra_rt_info_min_plen;
|
|
+ __s32 accept_ra_rt_info_max_plen;
|
|
+ __s32 proxy_ndp;
|
|
+ __s32 accept_source_route;
|
|
+ __s32 accept_ra_from_local;
|
|
+ __s32 optimistic_dad;
|
|
+ __s32 use_optimistic;
|
|
+ __s32 mc_forwarding;
|
|
+ __s32 disable_ipv6;
|
|
+ __s32 drop_unicast_in_l2_multicast;
|
|
+ __s32 accept_dad;
|
|
+ __s32 force_tllao;
|
|
+ __s32 ndisc_notify;
|
|
+ __s32 suppress_frag_ndisc;
|
|
+ __s32 accept_ra_mtu;
|
|
+ __s32 drop_unsolicited_na;
|
|
+ struct ipv6_stable_secret stable_secret;
|
|
+ __s32 use_oif_addrs_only;
|
|
+ __s32 keep_addr_on_down;
|
|
+ __s32 seg6_enabled;
|
|
+ __u32 enhanced_dad;
|
|
+ __u32 addr_gen_mode;
|
|
+ __s32 disable_policy;
|
|
+ __s32 ndisc_tclass;
|
|
+ struct ctl_table_header *sysctl_header;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+};
|
|
+
|
|
+struct nf_queue_entry;
|
|
+
|
|
+struct nf_queue_handler {
|
|
+ int (*outfn)(struct nf_queue_entry *, unsigned int);
|
|
+ void (*nf_hook_drop)(struct net *);
|
|
+};
|
|
+
|
|
+enum nf_log_type {
|
|
+ NF_LOG_TYPE_LOG = 0,
|
|
+ NF_LOG_TYPE_ULOG = 1,
|
|
+ NF_LOG_TYPE_MAX = 2,
|
|
+};
|
|
+
|
|
+typedef u8 u_int8_t;
|
|
+
|
|
+struct nf_loginfo;
|
|
+
|
|
+typedef void nf_logfn(struct net *, u_int8_t, unsigned int, const struct sk_buff *, const struct net_device *, const struct net_device *, const struct nf_loginfo *, const char *);
|
|
+
|
|
+struct nf_logger {
|
|
+ char *name;
|
|
+ enum nf_log_type type;
|
|
+ nf_logfn *logfn;
|
|
+ struct module *me;
|
|
+};
|
|
+
|
|
+struct hlist_nulls_head {
|
|
+ struct hlist_nulls_node *first;
|
|
+};
|
|
+
|
|
+struct ip_conntrack_stat {
|
|
+ unsigned int found;
|
|
+ unsigned int invalid;
|
|
+ unsigned int ignore;
|
|
+ unsigned int insert;
|
|
+ unsigned int insert_failed;
|
|
+ unsigned int drop;
|
|
+ unsigned int early_drop;
|
|
+ unsigned int error;
|
|
+ unsigned int expect_new;
|
|
+ unsigned int expect_create;
|
|
+ unsigned int expect_delete;
|
|
+ unsigned int search_restart;
|
|
+};
|
|
+
|
|
+struct ct_pcpu {
|
|
+ spinlock_t lock;
|
|
+ struct hlist_nulls_head unconfirmed;
|
|
+ struct hlist_nulls_head dying;
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ SS_FREE = 0,
|
|
+ SS_UNCONNECTED = 1,
|
|
+ SS_CONNECTING = 2,
|
|
+ SS_CONNECTED = 3,
|
|
+ SS_DISCONNECTING = 4,
|
|
+} socket_state;
|
|
+
|
|
+struct socket_wq {
|
|
+ wait_queue_head_t wait;
|
|
+ struct fasync_struct *fasync_list;
|
|
+ long unsigned int flags;
|
|
+ struct callback_head rcu;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct proto_ops;
|
|
+
|
|
+struct socket {
|
|
+ socket_state state;
|
|
+ short int type;
|
|
+ long unsigned int flags;
|
|
+ struct socket_wq *wq;
|
|
+ struct file *file;
|
|
+ struct sock *sk;
|
|
+ const struct proto_ops *ops;
|
|
+};
|
|
+
|
|
+typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t);
|
|
+
|
|
+struct proto_ops {
|
|
+ int family;
|
|
+ struct module *owner;
|
|
+ int (*release)(struct socket *);
|
|
+ int (*bind)(struct socket *, struct sockaddr *, int);
|
|
+ int (*connect)(struct socket *, struct sockaddr *, int, int);
|
|
+ int (*socketpair)(struct socket *, struct socket *);
|
|
+ int (*accept)(struct socket *, struct socket *, int, bool);
|
|
+ int (*getname)(struct socket *, struct sockaddr *, int);
|
|
+ __poll_t (*poll)(struct file *, struct socket *, struct poll_table_struct *);
|
|
+ int (*ioctl)(struct socket *, unsigned int, long unsigned int);
|
|
+ int (*compat_ioctl)(struct socket *, unsigned int, long unsigned int);
|
|
+ int (*listen)(struct socket *, int);
|
|
+ int (*shutdown)(struct socket *, int);
|
|
+ int (*setsockopt)(struct socket *, int, int, char *, unsigned int);
|
|
+ int (*getsockopt)(struct socket *, int, int, char *, int *);
|
|
+ int (*compat_setsockopt)(struct socket *, int, int, char *, unsigned int);
|
|
+ int (*compat_getsockopt)(struct socket *, int, int, char *, int *);
|
|
+ int (*sendmsg)(struct socket *, struct msghdr *, size_t);
|
|
+ int (*recvmsg)(struct socket *, struct msghdr *, size_t, int);
|
|
+ int (*mmap)(struct file *, struct socket *, struct vm_area_struct *);
|
|
+ ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
|
|
+ ssize_t (*splice_read)(struct socket *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
|
|
+ int (*set_peek_off)(struct sock *, int);
|
|
+ int (*peek_len)(struct socket *);
|
|
+ int (*read_sock)(struct sock *, read_descriptor_t *, sk_read_actor_t);
|
|
+ int (*sendpage_locked)(struct sock *, struct page *, int, size_t, int);
|
|
+ int (*sendmsg_locked)(struct sock *, struct msghdr *, size_t);
|
|
+ int (*set_rcvlowat)(struct sock *, int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+enum swiotlb_force {
|
|
+ SWIOTLB_NORMAL = 0,
|
|
+ SWIOTLB_FORCE = 1,
|
|
+ SWIOTLB_NO_FORCE = 2,
|
|
+};
|
|
+
|
|
+struct pipe_buf_operations;
|
|
+
|
|
+struct pipe_buffer {
|
|
+ struct page *page;
|
|
+ unsigned int offset;
|
|
+ unsigned int len;
|
|
+ const struct pipe_buf_operations *ops;
|
|
+ unsigned int flags;
|
|
+ long unsigned int private;
|
|
+};
|
|
+
|
|
+struct pipe_buf_operations {
|
|
+ int can_merge;
|
|
+ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
|
|
+ void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
|
|
+ int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
|
|
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
|
|
+};
|
|
+
|
|
+struct nf_bridge_info {
|
|
+ refcount_t use;
|
|
+ enum {
|
|
+ BRNF_PROTO_UNCHANGED = 0,
|
|
+ BRNF_PROTO_8021Q = 1,
|
|
+ BRNF_PROTO_PPPOE = 2,
|
|
+ } orig_proto: 8;
|
|
+ u8 pkt_otherhost: 1;
|
|
+ u8 in_prerouting: 1;
|
|
+ u8 bridged_dnat: 1;
|
|
+ __u16 frag_max_size;
|
|
+ struct net_device *physindev;
|
|
+ struct net_device *physoutdev;
|
|
+ union {
|
|
+ __be32 ipv4_daddr;
|
|
+ struct in6_addr ipv6_daddr;
|
|
+ char neigh_header[8];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct skb_checksum_ops {
|
|
+ __wsum (*update)(const void *, int, __wsum);
|
|
+ __wsum (*combine)(__wsum, __wsum, int, int);
|
|
+};
|
|
+
|
|
+struct pernet_operations {
|
|
+ struct list_head list;
|
|
+ int (*init)(struct net *);
|
|
+ void (*exit)(struct net *);
|
|
+ void (*exit_batch)(struct list_head *);
|
|
+ unsigned int *id;
|
|
+ size_t size;
|
|
+};
|
|
+
|
|
+struct auth_cred {
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ struct group_info *group_info;
|
|
+ const char *principal;
|
|
+ long unsigned int ac_flags;
|
|
+ unsigned char machine_cred: 1;
|
|
+};
|
|
+
|
|
+struct rpc_cred_cache;
|
|
+
|
|
+struct rpc_authops;
|
|
+
|
|
+struct rpc_auth {
|
|
+ unsigned int au_cslack;
|
|
+ unsigned int au_rslack;
|
|
+ unsigned int au_verfsize;
|
|
+ unsigned int au_flags;
|
|
+ const struct rpc_authops *au_ops;
|
|
+ rpc_authflavor_t au_flavor;
|
|
+ atomic_t au_count;
|
|
+ struct rpc_cred_cache *au_credcache;
|
|
+};
|
|
+
|
|
+struct rpc_credops {
|
|
+ const char *cr_name;
|
|
+ int (*cr_init)(struct rpc_auth *, struct rpc_cred *);
|
|
+ void (*crdestroy)(struct rpc_cred *);
|
|
+ int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
|
|
+ struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int);
|
|
+ __be32 * (*crmarshal)(struct rpc_task *, __be32 *);
|
|
+ int (*crrefresh)(struct rpc_task *);
|
|
+ __be32 * (*crvalidate)(struct rpc_task *, __be32 *);
|
|
+ int (*crwrap_req)(struct rpc_task *, kxdreproc_t, void *, __be32 *, void *);
|
|
+ int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t, void *, __be32 *, void *);
|
|
+ int (*crkey_timeout)(struct rpc_cred *);
|
|
+ bool (*crkey_to_expire)(struct rpc_cred *);
|
|
+ char * (*crstringify_acceptor)(struct rpc_cred *);
|
|
+};
|
|
+
|
|
+struct rpc_auth_create_args;
|
|
+
|
|
+struct rpcsec_gss_info;
|
|
+
|
|
+struct rpc_authops {
|
|
+ struct module *owner;
|
|
+ rpc_authflavor_t au_flavor;
|
|
+ char *au_name;
|
|
+ struct rpc_auth * (*create)(const struct rpc_auth_create_args *, struct rpc_clnt *);
|
|
+ void (*destroy)(struct rpc_auth *);
|
|
+ int (*hash_cred)(struct auth_cred *, unsigned int);
|
|
+ struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
|
|
+ struct rpc_cred * (*crcreate)(struct rpc_auth *, struct auth_cred *, int, gfp_t);
|
|
+ int (*list_pseudoflavors)(rpc_authflavor_t *, int);
|
|
+ rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *);
|
|
+ int (*flavor2info)(rpc_authflavor_t, struct rpcsec_gss_info *);
|
|
+ int (*key_timeout)(struct rpc_auth *, struct rpc_cred *);
|
|
+};
|
|
+
|
|
+struct rpc_auth_create_args {
|
|
+ rpc_authflavor_t pseudoflavor;
|
|
+ const char *target_name;
|
|
+};
|
|
+
|
|
+struct rpcsec_gss_oid {
|
|
+ unsigned int len;
|
|
+ u8 data[32];
|
|
+};
|
|
+
|
|
+struct rpcsec_gss_info {
|
|
+ struct rpcsec_gss_oid oid;
|
|
+ u32 qop;
|
|
+ u32 service;
|
|
+};
|
|
+
|
|
+struct rpc_xprt_ops {
|
|
+ void (*set_buffer_size)(struct rpc_xprt *, size_t, size_t);
|
|
+ int (*reserve_xprt)(struct rpc_xprt *, struct rpc_task *);
|
|
+ void (*release_xprt)(struct rpc_xprt *, struct rpc_task *);
|
|
+ void (*alloc_slot)(struct rpc_xprt *, struct rpc_task *);
|
|
+ void (*free_slot)(struct rpc_xprt *, struct rpc_rqst *);
|
|
+ void (*rpcbind)(struct rpc_task *);
|
|
+ void (*set_port)(struct rpc_xprt *, short unsigned int);
|
|
+ void (*connect)(struct rpc_xprt *, struct rpc_task *);
|
|
+ int (*buf_alloc)(struct rpc_task *);
|
|
+ void (*buf_free)(struct rpc_task *);
|
|
+ int (*send_request)(struct rpc_task *);
|
|
+ void (*set_retrans_timeout)(struct rpc_task *);
|
|
+ void (*timer)(struct rpc_xprt *, struct rpc_task *);
|
|
+ void (*release_request)(struct rpc_task *);
|
|
+ void (*close)(struct rpc_xprt *);
|
|
+ void (*destroy)(struct rpc_xprt *);
|
|
+ void (*set_connect_timeout)(struct rpc_xprt *, long unsigned int, long unsigned int);
|
|
+ void (*print_stats)(struct rpc_xprt *, struct seq_file *);
|
|
+ int (*enable_swap)(struct rpc_xprt *);
|
|
+ void (*disable_swap)(struct rpc_xprt *);
|
|
+ void (*inject_disconnect)(struct rpc_xprt *);
|
|
+ int (*bc_setup)(struct rpc_xprt *, unsigned int);
|
|
+ int (*bc_up)(struct svc_serv *, struct net *);
|
|
+ size_t (*bc_maxpayload)(struct rpc_xprt *);
|
|
+ void (*bc_free_rqst)(struct rpc_rqst *);
|
|
+ void (*bc_destroy)(struct rpc_xprt *, unsigned int);
|
|
+};
|
|
+
|
|
+struct rpc_xprt_switch {
|
|
+ spinlock_t xps_lock;
|
|
+ struct kref xps_kref;
|
|
+ unsigned int xps_nxprts;
|
|
+ struct list_head xps_xprt_list;
|
|
+ struct net *xps_net;
|
|
+ const struct rpc_xprt_iter_ops *xps_iter_ops;
|
|
+ struct callback_head xps_rcu;
|
|
+};
|
|
+
|
|
+struct rpc_stat {
|
|
+ const struct rpc_program *program;
|
|
+ unsigned int netcnt;
|
|
+ unsigned int netudpcnt;
|
|
+ unsigned int nettcpcnt;
|
|
+ unsigned int nettcpconn;
|
|
+ unsigned int netreconn;
|
|
+ unsigned int rpccnt;
|
|
+ unsigned int rpcretrans;
|
|
+ unsigned int rpcauthrefresh;
|
|
+ unsigned int rpcgarbage;
|
|
+};
|
|
+
|
|
+struct rpc_version;
|
|
+
|
|
+struct rpc_program {
|
|
+ const char *name;
|
|
+ u32 number;
|
|
+ unsigned int nrvers;
|
|
+ const struct rpc_version **version;
|
|
+ struct rpc_stat *stats;
|
|
+ const char *pipe_dir_name;
|
|
+};
|
|
+
|
|
+struct ipv6_params {
|
|
+ __s32 disable_ipv6;
|
|
+ __s32 autoconf;
|
|
+};
|
|
+
|
|
+struct dql {
|
|
+ unsigned int num_queued;
|
|
+ unsigned int adj_limit;
|
|
+ unsigned int last_obj_cnt;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ unsigned int limit;
|
|
+ unsigned int num_completed;
|
|
+ unsigned int prev_ovlimit;
|
|
+ unsigned int prev_num_queued;
|
|
+ unsigned int prev_last_obj_cnt;
|
|
+ unsigned int lowest_slack;
|
|
+ long unsigned int slack_start_time;
|
|
+ unsigned int max_limit;
|
|
+ unsigned int min_limit;
|
|
+ unsigned int slack_hold_time;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct ethtool_cmd {
|
|
+ __u32 cmd;
|
|
+ __u32 supported;
|
|
+ __u32 advertising;
|
|
+ __u16 speed;
|
|
+ __u8 duplex;
|
|
+ __u8 port;
|
|
+ __u8 phy_address;
|
|
+ __u8 transceiver;
|
|
+ __u8 autoneg;
|
|
+ __u8 mdio_support;
|
|
+ __u32 maxtxpkt;
|
|
+ __u32 maxrxpkt;
|
|
+ __u16 speed_hi;
|
|
+ __u8 eth_tp_mdix;
|
|
+ __u8 eth_tp_mdix_ctrl;
|
|
+ __u32 lp_advertising;
|
|
+ __u32 reserved[2];
|
|
+};
|
|
+
|
|
+struct ethtool_drvinfo {
|
|
+ __u32 cmd;
|
|
+ char driver[32];
|
|
+ char version[32];
|
|
+ char fw_version[32];
|
|
+ char bus_info[32];
|
|
+ char erom_version[32];
|
|
+ char reserved2[12];
|
|
+ __u32 n_priv_flags;
|
|
+ __u32 n_stats;
|
|
+ __u32 testinfo_len;
|
|
+ __u32 eedump_len;
|
|
+ __u32 regdump_len;
|
|
+};
|
|
+
|
|
+struct ethtool_wolinfo {
|
|
+ __u32 cmd;
|
|
+ __u32 supported;
|
|
+ __u32 wolopts;
|
|
+ __u8 sopass[6];
|
|
+};
|
|
+
|
|
+struct ethtool_tunable {
|
|
+ __u32 cmd;
|
|
+ __u32 id;
|
|
+ __u32 type_id;
|
|
+ __u32 len;
|
|
+ void *data[0];
|
|
+};
|
|
+
|
|
+struct ethtool_regs {
|
|
+ __u32 cmd;
|
|
+ __u32 version;
|
|
+ __u32 len;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct ethtool_eeprom {
|
|
+ __u32 cmd;
|
|
+ __u32 magic;
|
|
+ __u32 offset;
|
|
+ __u32 len;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct ethtool_eee {
|
|
+ __u32 cmd;
|
|
+ __u32 supported;
|
|
+ __u32 advertised;
|
|
+ __u32 lp_advertised;
|
|
+ __u32 eee_active;
|
|
+ __u32 eee_enabled;
|
|
+ __u32 tx_lpi_enabled;
|
|
+ __u32 tx_lpi_timer;
|
|
+ __u32 reserved[2];
|
|
+};
|
|
+
|
|
+struct ethtool_modinfo {
|
|
+ __u32 cmd;
|
|
+ __u32 type;
|
|
+ __u32 eeprom_len;
|
|
+ __u32 reserved[8];
|
|
+};
|
|
+
|
|
+struct ethtool_coalesce {
|
|
+ __u32 cmd;
|
|
+ __u32 rx_coalesce_usecs;
|
|
+ __u32 rx_max_coalesced_frames;
|
|
+ __u32 rx_coalesce_usecs_irq;
|
|
+ __u32 rx_max_coalesced_frames_irq;
|
|
+ __u32 tx_coalesce_usecs;
|
|
+ __u32 tx_max_coalesced_frames;
|
|
+ __u32 tx_coalesce_usecs_irq;
|
|
+ __u32 tx_max_coalesced_frames_irq;
|
|
+ __u32 stats_block_coalesce_usecs;
|
|
+ __u32 use_adaptive_rx_coalesce;
|
|
+ __u32 use_adaptive_tx_coalesce;
|
|
+ __u32 pkt_rate_low;
|
|
+ __u32 rx_coalesce_usecs_low;
|
|
+ __u32 rx_max_coalesced_frames_low;
|
|
+ __u32 tx_coalesce_usecs_low;
|
|
+ __u32 tx_max_coalesced_frames_low;
|
|
+ __u32 pkt_rate_high;
|
|
+ __u32 rx_coalesce_usecs_high;
|
|
+ __u32 rx_max_coalesced_frames_high;
|
|
+ __u32 tx_coalesce_usecs_high;
|
|
+ __u32 tx_max_coalesced_frames_high;
|
|
+ __u32 rate_sample_interval;
|
|
+};
|
|
+
|
|
+struct ethtool_ringparam {
|
|
+ __u32 cmd;
|
|
+ __u32 rx_max_pending;
|
|
+ __u32 rx_mini_max_pending;
|
|
+ __u32 rx_jumbo_max_pending;
|
|
+ __u32 tx_max_pending;
|
|
+ __u32 rx_pending;
|
|
+ __u32 rx_mini_pending;
|
|
+ __u32 rx_jumbo_pending;
|
|
+ __u32 tx_pending;
|
|
+};
|
|
+
|
|
+struct ethtool_channels {
|
|
+ __u32 cmd;
|
|
+ __u32 max_rx;
|
|
+ __u32 max_tx;
|
|
+ __u32 max_other;
|
|
+ __u32 max_combined;
|
|
+ __u32 rx_count;
|
|
+ __u32 tx_count;
|
|
+ __u32 other_count;
|
|
+ __u32 combined_count;
|
|
+};
|
|
+
|
|
+struct ethtool_pauseparam {
|
|
+ __u32 cmd;
|
|
+ __u32 autoneg;
|
|
+ __u32 rx_pause;
|
|
+ __u32 tx_pause;
|
|
+};
|
|
+
|
|
+struct ethtool_test {
|
|
+ __u32 cmd;
|
|
+ __u32 flags;
|
|
+ __u32 reserved;
|
|
+ __u32 len;
|
|
+ __u64 data[0];
|
|
+};
|
|
+
|
|
+struct ethtool_stats {
|
|
+ __u32 cmd;
|
|
+ __u32 n_stats;
|
|
+ __u64 data[0];
|
|
+};
|
|
+
|
|
+struct ethtool_tcpip4_spec {
|
|
+ __be32 ip4src;
|
|
+ __be32 ip4dst;
|
|
+ __be16 psrc;
|
|
+ __be16 pdst;
|
|
+ __u8 tos;
|
|
+};
|
|
+
|
|
+struct ethtool_ah_espip4_spec {
|
|
+ __be32 ip4src;
|
|
+ __be32 ip4dst;
|
|
+ __be32 spi;
|
|
+ __u8 tos;
|
|
+};
|
|
+
|
|
+struct ethtool_usrip4_spec {
|
|
+ __be32 ip4src;
|
|
+ __be32 ip4dst;
|
|
+ __be32 l4_4_bytes;
|
|
+ __u8 tos;
|
|
+ __u8 ip_ver;
|
|
+ __u8 proto;
|
|
+};
|
|
+
|
|
+struct ethtool_tcpip6_spec {
|
|
+ __be32 ip6src[4];
|
|
+ __be32 ip6dst[4];
|
|
+ __be16 psrc;
|
|
+ __be16 pdst;
|
|
+ __u8 tclass;
|
|
+};
|
|
+
|
|
+struct ethtool_ah_espip6_spec {
|
|
+ __be32 ip6src[4];
|
|
+ __be32 ip6dst[4];
|
|
+ __be32 spi;
|
|
+ __u8 tclass;
|
|
+};
|
|
+
|
|
+struct ethtool_usrip6_spec {
|
|
+ __be32 ip6src[4];
|
|
+ __be32 ip6dst[4];
|
|
+ __be32 l4_4_bytes;
|
|
+ __u8 tclass;
|
|
+ __u8 l4_proto;
|
|
+};
|
|
+
|
|
+union ethtool_flow_union {
|
|
+ struct ethtool_tcpip4_spec tcp_ip4_spec;
|
|
+ struct ethtool_tcpip4_spec udp_ip4_spec;
|
|
+ struct ethtool_tcpip4_spec sctp_ip4_spec;
|
|
+ struct ethtool_ah_espip4_spec ah_ip4_spec;
|
|
+ struct ethtool_ah_espip4_spec esp_ip4_spec;
|
|
+ struct ethtool_usrip4_spec usr_ip4_spec;
|
|
+ struct ethtool_tcpip6_spec tcp_ip6_spec;
|
|
+ struct ethtool_tcpip6_spec udp_ip6_spec;
|
|
+ struct ethtool_tcpip6_spec sctp_ip6_spec;
|
|
+ struct ethtool_ah_espip6_spec ah_ip6_spec;
|
|
+ struct ethtool_ah_espip6_spec esp_ip6_spec;
|
|
+ struct ethtool_usrip6_spec usr_ip6_spec;
|
|
+ struct ethhdr ether_spec;
|
|
+ __u8 hdata[52];
|
|
+};
|
|
+
|
|
+struct ethtool_flow_ext {
|
|
+ __u8 padding[2];
|
|
+ unsigned char h_dest[6];
|
|
+ __be16 vlan_etype;
|
|
+ __be16 vlan_tci;
|
|
+ __be32 data[2];
|
|
+};
|
|
+
|
|
+struct ethtool_rx_flow_spec {
|
|
+ __u32 flow_type;
|
|
+ union ethtool_flow_union h_u;
|
|
+ struct ethtool_flow_ext h_ext;
|
|
+ union ethtool_flow_union m_u;
|
|
+ struct ethtool_flow_ext m_ext;
|
|
+ __u64 ring_cookie;
|
|
+ __u32 location;
|
|
+};
|
|
+
|
|
+struct ethtool_rxnfc {
|
|
+ __u32 cmd;
|
|
+ __u32 flow_type;
|
|
+ __u64 data;
|
|
+ struct ethtool_rx_flow_spec fs;
|
|
+ union {
|
|
+ __u32 rule_cnt;
|
|
+ __u32 rss_context;
|
|
+ };
|
|
+ __u32 rule_locs[0];
|
|
+};
|
|
+
|
|
+struct ethtool_flash {
|
|
+ __u32 cmd;
|
|
+ __u32 region;
|
|
+ char data[128];
|
|
+};
|
|
+
|
|
+struct ethtool_dump {
|
|
+ __u32 cmd;
|
|
+ __u32 version;
|
|
+ __u32 flag;
|
|
+ __u32 len;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct ethtool_ts_info {
|
|
+ __u32 cmd;
|
|
+ __u32 so_timestamping;
|
|
+ __s32 phc_index;
|
|
+ __u32 tx_types;
|
|
+ __u32 tx_reserved[3];
|
|
+ __u32 rx_filters;
|
|
+ __u32 rx_reserved[3];
|
|
+};
|
|
+
|
|
+struct ethtool_fecparam {
|
|
+ __u32 cmd;
|
|
+ __u32 active_fec;
|
|
+ __u32 fec;
|
|
+ __u32 reserved;
|
|
+};
|
|
+
|
|
+struct ethtool_link_settings {
|
|
+ __u32 cmd;
|
|
+ __u32 speed;
|
|
+ __u8 duplex;
|
|
+ __u8 port;
|
|
+ __u8 phy_address;
|
|
+ __u8 autoneg;
|
|
+ __u8 mdio_support;
|
|
+ __u8 eth_tp_mdix;
|
|
+ __u8 eth_tp_mdix_ctrl;
|
|
+ __s8 link_mode_masks_nwords;
|
|
+ __u8 transceiver;
|
|
+ __u8 reserved1[3];
|
|
+ __u32 reserved[7];
|
|
+ __u32 link_mode_masks[0];
|
|
+};
|
|
+
|
|
+enum ethtool_phys_id_state {
|
|
+ ETHTOOL_ID_INACTIVE = 0,
|
|
+ ETHTOOL_ID_ACTIVE = 1,
|
|
+ ETHTOOL_ID_ON = 2,
|
|
+ ETHTOOL_ID_OFF = 3,
|
|
+};
|
|
+
|
|
+struct ethtool_link_ksettings {
|
|
+ struct ethtool_link_settings base;
|
|
+ struct {
|
|
+ long unsigned int supported[1];
|
|
+ long unsigned int advertising[1];
|
|
+ long unsigned int lp_advertising[1];
|
|
+ } link_modes;
|
|
+};
|
|
+
|
|
+struct ethtool_ops {
|
|
+ int (*get_settings)(struct net_device *, struct ethtool_cmd *);
|
|
+ int (*set_settings)(struct net_device *, struct ethtool_cmd *);
|
|
+ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *);
|
|
+ int (*get_regs_len)(struct net_device *);
|
|
+ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *);
|
|
+ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *);
|
|
+ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *);
|
|
+ u32 (*get_msglevel)(struct net_device *);
|
|
+ void (*set_msglevel)(struct net_device *, u32);
|
|
+ int (*nway_reset)(struct net_device *);
|
|
+ u32 (*get_link)(struct net_device *);
|
|
+ int (*get_eeprom_len)(struct net_device *);
|
|
+ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
|
|
+ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
|
|
+ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
|
|
+ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
|
|
+ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *);
|
|
+ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *);
|
|
+ void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam *);
|
|
+ int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam *);
|
|
+ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
|
|
+ void (*get_strings)(struct net_device *, u32, u8 *);
|
|
+ int (*set_phys_id)(struct net_device *, enum ethtool_phys_id_state);
|
|
+ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *);
|
|
+ int (*begin)(struct net_device *);
|
|
+ void (*complete)(struct net_device *);
|
|
+ u32 (*get_priv_flags)(struct net_device *);
|
|
+ int (*set_priv_flags)(struct net_device *, u32);
|
|
+ int (*get_sset_count)(struct net_device *, int);
|
|
+ int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, u32 *);
|
|
+ int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
|
|
+ int (*flash_device)(struct net_device *, struct ethtool_flash *);
|
|
+ int (*reset)(struct net_device *, u32 *);
|
|
+ u32 (*get_rxfh_key_size)(struct net_device *);
|
|
+ u32 (*get_rxfh_indir_size)(struct net_device *);
|
|
+ int (*get_rxfh)(struct net_device *, u32 *, u8 *, u8 *);
|
|
+ int (*set_rxfh)(struct net_device *, const u32 *, const u8 *, const u8);
|
|
+ int (*get_rxfh_context)(struct net_device *, u32 *, u8 *, u8 *, u32);
|
|
+ int (*set_rxfh_context)(struct net_device *, const u32 *, const u8 *, const u8, u32 *, bool);
|
|
+ void (*get_channels)(struct net_device *, struct ethtool_channels *);
|
|
+ int (*set_channels)(struct net_device *, struct ethtool_channels *);
|
|
+ int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
|
|
+ int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *);
|
|
+ int (*set_dump)(struct net_device *, struct ethtool_dump *);
|
|
+ int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
|
|
+ int (*get_module_info)(struct net_device *, struct ethtool_modinfo *);
|
|
+ int (*get_module_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *);
|
|
+ int (*get_eee)(struct net_device *, struct ethtool_eee *);
|
|
+ int (*set_eee)(struct net_device *, struct ethtool_eee *);
|
|
+ int (*get_tunable)(struct net_device *, const struct ethtool_tunable *, void *);
|
|
+ int (*set_tunable)(struct net_device *, const struct ethtool_tunable *, const void *);
|
|
+ int (*get_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *);
|
|
+ int (*set_per_queue_coalesce)(struct net_device *, u32, struct ethtool_coalesce *);
|
|
+ int (*get_link_ksettings)(struct net_device *, struct ethtool_link_ksettings *);
|
|
+ int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *);
|
|
+ int (*get_fecparam)(struct net_device *, struct ethtool_fecparam *);
|
|
+ int (*set_fecparam)(struct net_device *, struct ethtool_fecparam *);
|
|
+ void (*get_ethtool_phy_stats)(struct net_device *, struct ethtool_stats *, u64 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+ long unsigned int kabi_reserved17;
|
|
+ long unsigned int kabi_reserved18;
|
|
+ long unsigned int kabi_reserved19;
|
|
+ long unsigned int kabi_reserved20;
|
|
+ long unsigned int kabi_reserved21;
|
|
+ long unsigned int kabi_reserved22;
|
|
+ long unsigned int kabi_reserved23;
|
|
+ long unsigned int kabi_reserved24;
|
|
+ long unsigned int kabi_reserved25;
|
|
+ long unsigned int kabi_reserved26;
|
|
+ long unsigned int kabi_reserved27;
|
|
+ long unsigned int kabi_reserved28;
|
|
+ long unsigned int kabi_reserved29;
|
|
+ long unsigned int kabi_reserved30;
|
|
+ long unsigned int kabi_reserved31;
|
|
+ long unsigned int kabi_reserved32;
|
|
+};
|
|
+
|
|
+struct ieee_ets {
|
|
+ __u8 willing;
|
|
+ __u8 ets_cap;
|
|
+ __u8 cbs;
|
|
+ __u8 tc_tx_bw[8];
|
|
+ __u8 tc_rx_bw[8];
|
|
+ __u8 tc_tsa[8];
|
|
+ __u8 prio_tc[8];
|
|
+ __u8 tc_reco_bw[8];
|
|
+ __u8 tc_reco_tsa[8];
|
|
+ __u8 reco_prio_tc[8];
|
|
+};
|
|
+
|
|
+struct ieee_maxrate {
|
|
+ __u64 tc_maxrate[8];
|
|
+};
|
|
+
|
|
+struct ieee_qcn {
|
|
+ __u8 rpg_enable[8];
|
|
+ __u32 rppp_max_rps[8];
|
|
+ __u32 rpg_time_reset[8];
|
|
+ __u32 rpg_byte_reset[8];
|
|
+ __u32 rpg_threshold[8];
|
|
+ __u32 rpg_max_rate[8];
|
|
+ __u32 rpg_ai_rate[8];
|
|
+ __u32 rpg_hai_rate[8];
|
|
+ __u32 rpg_gd[8];
|
|
+ __u32 rpg_min_dec_fac[8];
|
|
+ __u32 rpg_min_rate[8];
|
|
+ __u32 cndd_state_machine[8];
|
|
+};
|
|
+
|
|
+struct ieee_qcn_stats {
|
|
+ __u64 rppp_rp_centiseconds[8];
|
|
+ __u32 rppp_created_rps[8];
|
|
+};
|
|
+
|
|
+struct ieee_pfc {
|
|
+ __u8 pfc_cap;
|
|
+ __u8 pfc_en;
|
|
+ __u8 mbc;
|
|
+ __u16 delay;
|
|
+ __u64 requests[8];
|
|
+ __u64 indications[8];
|
|
+};
|
|
+
|
|
+struct dcbnl_buffer {
|
|
+ __u8 prio2buffer[8];
|
|
+ __u32 buffer_size[8];
|
|
+ __u32 total_size;
|
|
+};
|
|
+
|
|
+struct cee_pg {
|
|
+ __u8 willing;
|
|
+ __u8 error;
|
|
+ __u8 pg_en;
|
|
+ __u8 tcs_supported;
|
|
+ __u8 pg_bw[8];
|
|
+ __u8 prio_pg[8];
|
|
+};
|
|
+
|
|
+struct cee_pfc {
|
|
+ __u8 willing;
|
|
+ __u8 error;
|
|
+ __u8 pfc_en;
|
|
+ __u8 tcs_supported;
|
|
+};
|
|
+
|
|
+struct dcb_app {
|
|
+ __u8 selector;
|
|
+ __u8 priority;
|
|
+ __u16 protocol;
|
|
+};
|
|
+
|
|
+struct dcb_peer_app_info {
|
|
+ __u8 willing;
|
|
+ __u8 error;
|
|
+};
|
|
+
|
|
+struct dcbnl_rtnl_ops {
|
|
+ int (*ieee_getets)(struct net_device *, struct ieee_ets *);
|
|
+ int (*ieee_setets)(struct net_device *, struct ieee_ets *);
|
|
+ int (*ieee_getmaxrate)(struct net_device *, struct ieee_maxrate *);
|
|
+ int (*ieee_setmaxrate)(struct net_device *, struct ieee_maxrate *);
|
|
+ int (*ieee_getqcn)(struct net_device *, struct ieee_qcn *);
|
|
+ int (*ieee_setqcn)(struct net_device *, struct ieee_qcn *);
|
|
+ int (*ieee_getqcnstats)(struct net_device *, struct ieee_qcn_stats *);
|
|
+ int (*ieee_getpfc)(struct net_device *, struct ieee_pfc *);
|
|
+ int (*ieee_setpfc)(struct net_device *, struct ieee_pfc *);
|
|
+ int (*ieee_getapp)(struct net_device *, struct dcb_app *);
|
|
+ int (*ieee_setapp)(struct net_device *, struct dcb_app *);
|
|
+ int (*ieee_delapp)(struct net_device *, struct dcb_app *);
|
|
+ int (*ieee_peer_getets)(struct net_device *, struct ieee_ets *);
|
|
+ int (*ieee_peer_getpfc)(struct net_device *, struct ieee_pfc *);
|
|
+ u8 (*getstate)(struct net_device *);
|
|
+ u8 (*setstate)(struct net_device *, u8);
|
|
+ void (*getpermhwaddr)(struct net_device *, u8 *);
|
|
+ void (*setpgtccfgtx)(struct net_device *, int, u8, u8, u8, u8);
|
|
+ void (*setpgbwgcfgtx)(struct net_device *, int, u8);
|
|
+ void (*setpgtccfgrx)(struct net_device *, int, u8, u8, u8, u8);
|
|
+ void (*setpgbwgcfgrx)(struct net_device *, int, u8);
|
|
+ void (*getpgtccfgtx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);
|
|
+ void (*getpgbwgcfgtx)(struct net_device *, int, u8 *);
|
|
+ void (*getpgtccfgrx)(struct net_device *, int, u8 *, u8 *, u8 *, u8 *);
|
|
+ void (*getpgbwgcfgrx)(struct net_device *, int, u8 *);
|
|
+ void (*setpfccfg)(struct net_device *, int, u8);
|
|
+ void (*getpfccfg)(struct net_device *, int, u8 *);
|
|
+ u8 (*setall)(struct net_device *);
|
|
+ u8 (*getcap)(struct net_device *, int, u8 *);
|
|
+ int (*getnumtcs)(struct net_device *, int, u8 *);
|
|
+ int (*setnumtcs)(struct net_device *, int, u8);
|
|
+ u8 (*getpfcstate)(struct net_device *);
|
|
+ void (*setpfcstate)(struct net_device *, u8);
|
|
+ void (*getbcncfg)(struct net_device *, int, u32 *);
|
|
+ void (*setbcncfg)(struct net_device *, int, u32);
|
|
+ void (*getbcnrp)(struct net_device *, int, u8 *);
|
|
+ void (*setbcnrp)(struct net_device *, int, u8);
|
|
+ int (*setapp)(struct net_device *, u8, u16, u8);
|
|
+ int (*getapp)(struct net_device *, u8, u16);
|
|
+ u8 (*getfeatcfg)(struct net_device *, int, u8 *);
|
|
+ u8 (*setfeatcfg)(struct net_device *, int, u8);
|
|
+ u8 (*getdcbx)(struct net_device *);
|
|
+ u8 (*setdcbx)(struct net_device *, u8);
|
|
+ int (*peer_getappinfo)(struct net_device *, struct dcb_peer_app_info *, u16 *);
|
|
+ int (*peer_getapptable)(struct net_device *, struct dcb_app *);
|
|
+ int (*cee_peer_getpg)(struct net_device *, struct cee_pg *);
|
|
+ int (*cee_peer_getpfc)(struct net_device *, struct cee_pfc *);
|
|
+ int (*dcbnl_getbuffer)(struct net_device *, struct dcbnl_buffer *);
|
|
+ int (*dcbnl_setbuffer)(struct net_device *, struct dcbnl_buffer *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+};
|
|
+
|
|
+struct netprio_map {
|
|
+ struct callback_head rcu;
|
|
+ u32 priomap_len;
|
|
+ u32 priomap[0];
|
|
+};
|
|
+
|
|
+struct xdp_mem_info {
|
|
+ u32 type;
|
|
+ u32 id;
|
|
+};
|
|
+
|
|
+struct xdp_rxq_info {
|
|
+ struct net_device *dev;
|
|
+ u32 queue_index;
|
|
+ u32 reg_state;
|
|
+ struct xdp_mem_info mem;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct xdp_frame {
|
|
+ void *data;
|
|
+ u16 len;
|
|
+ u16 headroom;
|
|
+ u16 metasize;
|
|
+ struct xdp_mem_info mem;
|
|
+ struct net_device *dev_rx;
|
|
+};
|
|
+
|
|
+struct nlmsghdr {
|
|
+ __u32 nlmsg_len;
|
|
+ __u16 nlmsg_type;
|
|
+ __u16 nlmsg_flags;
|
|
+ __u32 nlmsg_seq;
|
|
+ __u32 nlmsg_pid;
|
|
+};
|
|
+
|
|
+struct nlattr {
|
|
+ __u16 nla_len;
|
|
+ __u16 nla_type;
|
|
+};
|
|
+
|
|
+struct netlink_ext_ack {
|
|
+ const char *_msg;
|
|
+ const struct nlattr *bad_attr;
|
|
+ u8 cookie[20];
|
|
+ u8 cookie_len;
|
|
+};
|
|
+
|
|
+struct netlink_callback {
|
|
+ struct sk_buff *skb;
|
|
+ const struct nlmsghdr *nlh;
|
|
+ int (*dump)(struct sk_buff *, struct netlink_callback *);
|
|
+ int (*done)(struct netlink_callback *);
|
|
+ void *data;
|
|
+ struct module *module;
|
|
+ u16 family;
|
|
+ u16 min_dump_alloc;
|
|
+ unsigned int prev_seq;
|
|
+ unsigned int seq;
|
|
+ long int args[6];
|
|
+};
|
|
+
|
|
+struct ndmsg {
|
|
+ __u8 ndm_family;
|
|
+ __u8 ndm_pad1;
|
|
+ __u16 ndm_pad2;
|
|
+ __s32 ndm_ifindex;
|
|
+ __u16 ndm_state;
|
|
+ __u8 ndm_flags;
|
|
+ __u8 ndm_type;
|
|
+};
|
|
+
|
|
+struct rtnl_link_stats64 {
|
|
+ __u64 rx_packets;
|
|
+ __u64 tx_packets;
|
|
+ __u64 rx_bytes;
|
|
+ __u64 tx_bytes;
|
|
+ __u64 rx_errors;
|
|
+ __u64 tx_errors;
|
|
+ __u64 rx_dropped;
|
|
+ __u64 tx_dropped;
|
|
+ __u64 multicast;
|
|
+ __u64 collisions;
|
|
+ __u64 rx_length_errors;
|
|
+ __u64 rx_over_errors;
|
|
+ __u64 rx_crc_errors;
|
|
+ __u64 rx_frame_errors;
|
|
+ __u64 rx_fifo_errors;
|
|
+ __u64 rx_missed_errors;
|
|
+ __u64 tx_aborted_errors;
|
|
+ __u64 tx_carrier_errors;
|
|
+ __u64 tx_fifo_errors;
|
|
+ __u64 tx_heartbeat_errors;
|
|
+ __u64 tx_window_errors;
|
|
+ __u64 rx_compressed;
|
|
+ __u64 tx_compressed;
|
|
+ __u64 rx_nohandler;
|
|
+};
|
|
+
|
|
+struct ifla_vf_stats {
|
|
+ __u64 rx_packets;
|
|
+ __u64 tx_packets;
|
|
+ __u64 rx_bytes;
|
|
+ __u64 tx_bytes;
|
|
+ __u64 broadcast;
|
|
+ __u64 multicast;
|
|
+ __u64 rx_dropped;
|
|
+ __u64 tx_dropped;
|
|
+};
|
|
+
|
|
+struct ifla_vf_info {
|
|
+ __u32 vf;
|
|
+ __u8 mac[32];
|
|
+ __u32 vlan;
|
|
+ __u32 qos;
|
|
+ __u32 spoofchk;
|
|
+ __u32 linkstate;
|
|
+ __u32 min_tx_rate;
|
|
+ __u32 max_tx_rate;
|
|
+ __u32 rss_query_en;
|
|
+ __u32 trusted;
|
|
+ __be16 vlan_proto;
|
|
+};
|
|
+
|
|
+struct tc_stats {
|
|
+ __u64 bytes;
|
|
+ __u32 packets;
|
|
+ __u32 drops;
|
|
+ __u32 overlimits;
|
|
+ __u32 bps;
|
|
+ __u32 pps;
|
|
+ __u32 qlen;
|
|
+ __u32 backlog;
|
|
+};
|
|
+
|
|
+struct tc_sizespec {
|
|
+ unsigned char cell_log;
|
|
+ unsigned char size_log;
|
|
+ short int cell_align;
|
|
+ int overhead;
|
|
+ unsigned int linklayer;
|
|
+ unsigned int mpu;
|
|
+ unsigned int mtu;
|
|
+ unsigned int tsize;
|
|
+};
|
|
+
|
|
+enum netdev_tx {
|
|
+ __NETDEV_TX_MIN = -2147483648,
|
|
+ NETDEV_TX_OK = 0,
|
|
+ NETDEV_TX_BUSY = 16,
|
|
+};
|
|
+
|
|
+typedef enum netdev_tx netdev_tx_t;
|
|
+
|
|
+struct header_ops {
|
|
+ int (*create)(struct sk_buff *, struct net_device *, short unsigned int, const void *, const void *, unsigned int);
|
|
+ int (*parse)(const struct sk_buff *, unsigned char *);
|
|
+ int (*cache)(const struct neighbour *, struct hh_cache *, __be16);
|
|
+ void (*cache_update)(struct hh_cache *, const struct net_device *, const unsigned char *);
|
|
+ bool (*validate)(const char *, unsigned int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+};
|
|
+
|
|
+struct gro_list {
|
|
+ struct list_head list;
|
|
+ int count;
|
|
+};
|
|
+
|
|
+struct napi_struct {
|
|
+ struct list_head poll_list;
|
|
+ long unsigned int state;
|
|
+ int weight;
|
|
+ long unsigned int gro_bitmask;
|
|
+ int (*poll)(struct napi_struct *, int);
|
|
+ int poll_owner;
|
|
+ struct net_device *dev;
|
|
+ struct gro_list gro_hash[8];
|
|
+ struct sk_buff *skb;
|
|
+ struct hrtimer timer;
|
|
+ struct list_head dev_list;
|
|
+ struct hlist_node napi_hash_node;
|
|
+ unsigned int napi_id;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+struct netdev_queue {
|
|
+ struct net_device *dev;
|
|
+ struct Qdisc *qdisc;
|
|
+ struct Qdisc *qdisc_sleeping;
|
|
+ struct kobject kobj;
|
|
+ int numa_node;
|
|
+ long unsigned int tx_maxrate;
|
|
+ long unsigned int trans_timeout;
|
|
+ struct net_device *sb_dev;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ spinlock_t _xmit_lock;
|
|
+ int xmit_lock_owner;
|
|
+ long unsigned int trans_start;
|
|
+ long unsigned int state;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct dql dql;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct qdisc_skb_head {
|
|
+ struct sk_buff *head;
|
|
+ struct sk_buff *tail;
|
|
+ union {
|
|
+ u32 qlen;
|
|
+ atomic_t atomic_qlen;
|
|
+ };
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct gnet_stats_basic_packed {
|
|
+ __u64 bytes;
|
|
+ __u32 packets;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct gnet_stats_queue {
|
|
+ __u32 qlen;
|
|
+ __u32 backlog;
|
|
+ __u32 drops;
|
|
+ __u32 requeues;
|
|
+ __u32 overlimits;
|
|
+};
|
|
+
|
|
+struct Qdisc_ops;
|
|
+
|
|
+struct qdisc_size_table;
|
|
+
|
|
+struct net_rate_estimator;
|
|
+
|
|
+struct gnet_stats_basic_cpu;
|
|
+
|
|
+struct Qdisc {
|
|
+ int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
|
|
+ struct sk_buff * (*dequeue)(struct Qdisc *);
|
|
+ unsigned int flags;
|
|
+ u32 limit;
|
|
+ const struct Qdisc_ops *ops;
|
|
+ struct qdisc_size_table *stab;
|
|
+ struct hlist_node hash;
|
|
+ u32 handle;
|
|
+ u32 parent;
|
|
+ struct netdev_queue *dev_queue;
|
|
+ struct net_rate_estimator *rate_est;
|
|
+ struct gnet_stats_basic_cpu *cpu_bstats;
|
|
+ struct gnet_stats_queue *cpu_qstats;
|
|
+ int padded;
|
|
+ refcount_t refcnt;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct sk_buff_head gso_skb;
|
|
+ struct qdisc_skb_head q;
|
|
+ struct gnet_stats_basic_packed bstats;
|
|
+ seqcount_t running;
|
|
+ struct gnet_stats_queue qstats;
|
|
+ long unsigned int state;
|
|
+ struct Qdisc *next_sched;
|
|
+ struct sk_buff_head skb_bad_txq;
|
|
+ spinlock_t busylock;
|
|
+ spinlock_t seqlock;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct rps_map {
|
|
+ unsigned int len;
|
|
+ struct callback_head rcu;
|
|
+ u16 cpus[0];
|
|
+};
|
|
+
|
|
+struct rps_dev_flow {
|
|
+ u16 cpu;
|
|
+ u16 filter;
|
|
+ unsigned int last_qtail;
|
|
+};
|
|
+
|
|
+struct rps_dev_flow_table {
|
|
+ unsigned int mask;
|
|
+ struct callback_head rcu;
|
|
+ struct rps_dev_flow flows[0];
|
|
+};
|
|
+
|
|
+struct rps_sock_flow_table {
|
|
+ u32 mask;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ u32 ents[0];
|
|
+};
|
|
+
|
|
+struct netdev_rx_queue {
|
|
+ struct rps_map *rps_map;
|
|
+ struct rps_dev_flow_table *rps_flow_table;
|
|
+ struct kobject kobj;
|
|
+ struct net_device *dev;
|
|
+ long: 64;
|
|
+ struct xdp_rxq_info xdp_rxq;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct xps_map {
|
|
+ unsigned int len;
|
|
+ unsigned int alloc_len;
|
|
+ struct callback_head rcu;
|
|
+ u16 queues[0];
|
|
+};
|
|
+
|
|
+struct xps_dev_maps {
|
|
+ struct callback_head rcu;
|
|
+ struct xps_map *attr_map[0];
|
|
+};
|
|
+
|
|
+struct netdev_fcoe_hbainfo {
|
|
+ char manufacturer[64];
|
|
+ char serial_number[64];
|
|
+ char hardware_version[64];
|
|
+ char driver_version[64];
|
|
+ char optionrom_version[64];
|
|
+ char firmware_version[64];
|
|
+ char model[256];
|
|
+ char model_description[256];
|
|
+};
|
|
+
|
|
+struct netdev_phys_item_id {
|
|
+ unsigned char id[32];
|
|
+ unsigned char id_len;
|
|
+};
|
|
+
|
|
+typedef u16 (*select_queue_fallback_t)(struct net_device *, struct sk_buff *, struct net_device *);
|
|
+
|
|
+enum tc_setup_type {
|
|
+ TC_SETUP_QDISC_MQPRIO = 0,
|
|
+ TC_SETUP_CLSU32 = 1,
|
|
+ TC_SETUP_CLSFLOWER = 2,
|
|
+ TC_SETUP_CLSMATCHALL = 3,
|
|
+ TC_SETUP_CLSBPF = 4,
|
|
+ TC_SETUP_BLOCK = 5,
|
|
+ TC_SETUP_QDISC_CBS = 6,
|
|
+ TC_SETUP_QDISC_RED = 7,
|
|
+ TC_SETUP_QDISC_PRIO = 8,
|
|
+ TC_SETUP_QDISC_MQ = 9,
|
|
+ TC_SETUP_QDISC_ETF = 10,
|
|
+};
|
|
+
|
|
+enum bpf_netdev_command {
|
|
+ XDP_SETUP_PROG = 0,
|
|
+ XDP_SETUP_PROG_HW = 1,
|
|
+ XDP_QUERY_PROG = 2,
|
|
+ XDP_QUERY_PROG_HW = 3,
|
|
+ BPF_OFFLOAD_VERIFIER_PREP = 4,
|
|
+ BPF_OFFLOAD_TRANSLATE = 5,
|
|
+ BPF_OFFLOAD_DESTROY = 6,
|
|
+ BPF_OFFLOAD_MAP_ALLOC = 7,
|
|
+ BPF_OFFLOAD_MAP_FREE = 8,
|
|
+ XDP_QUERY_XSK_UMEM = 9,
|
|
+ XDP_SETUP_XSK_UMEM = 10,
|
|
+};
|
|
+
|
|
+struct bpf_verifier_env;
|
|
+
|
|
+struct bpf_prog_offload_ops {
|
|
+ int (*insn_hook)(struct bpf_verifier_env *, int, int);
|
|
+};
|
|
+
|
|
+struct bpf_offloaded_map;
|
|
+
|
|
+struct xdp_umem;
|
|
+
|
|
+struct netdev_bpf {
|
|
+ enum bpf_netdev_command command;
|
|
+ union {
|
|
+ struct {
|
|
+ u32 flags;
|
|
+ struct bpf_prog *prog;
|
|
+ struct netlink_ext_ack *extack;
|
|
+ };
|
|
+ struct {
|
|
+ u32 prog_id;
|
|
+ u32 prog_flags;
|
|
+ };
|
|
+ struct {
|
|
+ struct bpf_prog *prog;
|
|
+ const struct bpf_prog_offload_ops *ops;
|
|
+ } verifier;
|
|
+ struct {
|
|
+ struct bpf_prog *prog;
|
|
+ } offload;
|
|
+ struct {
|
|
+ struct bpf_offloaded_map *offmap;
|
|
+ };
|
|
+ struct {
|
|
+ struct xdp_umem *umem;
|
|
+ u16 queue_id;
|
|
+ } xsk;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct xfrmdev_ops {
|
|
+ int (*xdo_dev_state_add)(struct xfrm_state *);
|
|
+ void (*xdo_dev_state_delete)(struct xfrm_state *);
|
|
+ void (*xdo_dev_state_free)(struct xfrm_state *);
|
|
+ bool (*xdo_dev_offload_ok)(struct sk_buff *, struct xfrm_state *);
|
|
+ void (*xdo_dev_state_advance_esn)(struct xfrm_state *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+enum tls_offload_ctx_dir {
|
|
+ TLS_OFFLOAD_CTX_DIR_RX = 0,
|
|
+ TLS_OFFLOAD_CTX_DIR_TX = 1,
|
|
+};
|
|
+
|
|
+struct tls_crypto_info;
|
|
+
|
|
+struct tls_context;
|
|
+
|
|
+struct tlsdev_ops {
|
|
+ int (*tls_dev_add)(struct net_device *, struct sock *, enum tls_offload_ctx_dir, struct tls_crypto_info *, u32);
|
|
+ void (*tls_dev_del)(struct net_device *, struct tls_context *, enum tls_offload_ctx_dir);
|
|
+ void (*tls_dev_resync_rx)(struct net_device *, struct sock *, u32, u64);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+struct dev_ifalias {
|
|
+ struct callback_head rcuhead;
|
|
+ char ifalias[0];
|
|
+};
|
|
+
|
|
+struct udp_tunnel_info;
|
|
+
|
|
+struct net_device_ops {
|
|
+ int (*ndo_init)(struct net_device *);
|
|
+ void (*ndo_uninit)(struct net_device *);
|
|
+ int (*ndo_open)(struct net_device *);
|
|
+ int (*ndo_stop)(struct net_device *);
|
|
+ netdev_tx_t (*ndo_start_xmit)(struct sk_buff *, struct net_device *);
|
|
+ netdev_features_t (*ndo_features_check)(struct sk_buff *, struct net_device *, netdev_features_t);
|
|
+ u16 (*ndo_select_queue)(struct net_device *, struct sk_buff *, struct net_device *, select_queue_fallback_t);
|
|
+ void (*ndo_change_rx_flags)(struct net_device *, int);
|
|
+ void (*ndo_set_rx_mode)(struct net_device *);
|
|
+ int (*ndo_set_mac_address)(struct net_device *, void *);
|
|
+ int (*ndo_validate_addr)(struct net_device *);
|
|
+ int (*ndo_do_ioctl)(struct net_device *, struct ifreq *, int);
|
|
+ int (*ndo_set_config)(struct net_device *, struct ifmap *);
|
|
+ int (*ndo_change_mtu)(struct net_device *, int);
|
|
+ int (*ndo_neigh_setup)(struct net_device *, struct neigh_parms *);
|
|
+ void (*ndo_tx_timeout)(struct net_device *);
|
|
+ void (*ndo_get_stats64)(struct net_device *, struct rtnl_link_stats64 *);
|
|
+ bool (*ndo_has_offload_stats)(const struct net_device *, int);
|
|
+ int (*ndo_get_offload_stats)(int, const struct net_device *, void *);
|
|
+ struct net_device_stats * (*ndo_get_stats)(struct net_device *);
|
|
+ int (*ndo_vlan_rx_add_vid)(struct net_device *, __be16, u16);
|
|
+ int (*ndo_vlan_rx_kill_vid)(struct net_device *, __be16, u16);
|
|
+ void (*ndo_poll_controller)(struct net_device *);
|
|
+ int (*ndo_netpoll_setup)(struct net_device *, struct netpoll_info *);
|
|
+ void (*ndo_netpoll_cleanup)(struct net_device *);
|
|
+ int (*ndo_set_vf_mac)(struct net_device *, int, u8 *);
|
|
+ int (*ndo_set_vf_vlan)(struct net_device *, int, u16, u8, __be16);
|
|
+ int (*ndo_set_vf_rate)(struct net_device *, int, int, int);
|
|
+ int (*ndo_set_vf_spoofchk)(struct net_device *, int, bool);
|
|
+ int (*ndo_set_vf_trust)(struct net_device *, int, bool);
|
|
+ int (*ndo_get_vf_config)(struct net_device *, int, struct ifla_vf_info *);
|
|
+ int (*ndo_set_vf_link_state)(struct net_device *, int, int);
|
|
+ int (*ndo_get_vf_stats)(struct net_device *, int, struct ifla_vf_stats *);
|
|
+ int (*ndo_set_vf_port)(struct net_device *, int, struct nlattr **);
|
|
+ int (*ndo_get_vf_port)(struct net_device *, int, struct sk_buff *);
|
|
+ int (*ndo_set_vf_guid)(struct net_device *, int, u64, int);
|
|
+ int (*ndo_set_vf_rss_query_en)(struct net_device *, int, bool);
|
|
+ int (*ndo_setup_tc)(struct net_device *, enum tc_setup_type, void *);
|
|
+ int (*ndo_fcoe_enable)(struct net_device *);
|
|
+ int (*ndo_fcoe_disable)(struct net_device *);
|
|
+ int (*ndo_fcoe_ddp_setup)(struct net_device *, u16, struct scatterlist *, unsigned int);
|
|
+ int (*ndo_fcoe_ddp_done)(struct net_device *, u16);
|
|
+ int (*ndo_fcoe_ddp_target)(struct net_device *, u16, struct scatterlist *, unsigned int);
|
|
+ int (*ndo_fcoe_get_hbainfo)(struct net_device *, struct netdev_fcoe_hbainfo *);
|
|
+ int (*ndo_fcoe_get_wwn)(struct net_device *, u64 *, int);
|
|
+ int (*ndo_rx_flow_steer)(struct net_device *, const struct sk_buff *, u16, u32);
|
|
+ int (*ndo_add_slave)(struct net_device *, struct net_device *, struct netlink_ext_ack *);
|
|
+ int (*ndo_del_slave)(struct net_device *, struct net_device *);
|
|
+ netdev_features_t (*ndo_fix_features)(struct net_device *, netdev_features_t);
|
|
+ int (*ndo_set_features)(struct net_device *, netdev_features_t);
|
|
+ int (*ndo_neigh_construct)(struct net_device *, struct neighbour *);
|
|
+ void (*ndo_neigh_destroy)(struct net_device *, struct neighbour *);
|
|
+ int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16, u16);
|
|
+ int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device *, const unsigned char *, u16);
|
|
+ int (*ndo_fdb_dump)(struct sk_buff *, struct netlink_callback *, struct net_device *, struct net_device *, int *);
|
|
+ int (*ndo_bridge_setlink)(struct net_device *, struct nlmsghdr *, u16);
|
|
+ int (*ndo_bridge_getlink)(struct sk_buff *, u32, u32, struct net_device *, u32, int);
|
|
+ int (*ndo_bridge_dellink)(struct net_device *, struct nlmsghdr *, u16);
|
|
+ int (*ndo_change_carrier)(struct net_device *, bool);
|
|
+ int (*ndo_get_phys_port_id)(struct net_device *, struct netdev_phys_item_id *);
|
|
+ int (*ndo_get_phys_port_name)(struct net_device *, char *, size_t);
|
|
+ void (*ndo_udp_tunnel_add)(struct net_device *, struct udp_tunnel_info *);
|
|
+ void (*ndo_udp_tunnel_del)(struct net_device *, struct udp_tunnel_info *);
|
|
+ void * (*ndo_dfwd_add_station)(struct net_device *, struct net_device *);
|
|
+ void (*ndo_dfwd_del_station)(struct net_device *, void *);
|
|
+ int (*ndo_get_lock_subclass)(struct net_device *);
|
|
+ int (*ndo_set_tx_maxrate)(struct net_device *, int, u32);
|
|
+ int (*ndo_get_iflink)(const struct net_device *);
|
|
+ int (*ndo_change_proto_down)(struct net_device *, bool);
|
|
+ int (*ndo_fill_metadata_dst)(struct net_device *, struct sk_buff *);
|
|
+ void (*ndo_set_rx_headroom)(struct net_device *, int);
|
|
+ int (*ndo_bpf)(struct net_device *, struct netdev_bpf *);
|
|
+ int (*ndo_xdp_xmit)(struct net_device *, int, struct xdp_frame **, u32);
|
|
+ int (*ndo_xsk_async_xmit)(struct net_device *, u32);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+ long unsigned int kabi_reserved17;
|
|
+ long unsigned int kabi_reserved18;
|
|
+ long unsigned int kabi_reserved19;
|
|
+ long unsigned int kabi_reserved20;
|
|
+ long unsigned int kabi_reserved21;
|
|
+ long unsigned int kabi_reserved22;
|
|
+ long unsigned int kabi_reserved23;
|
|
+ long unsigned int kabi_reserved24;
|
|
+ long unsigned int kabi_reserved25;
|
|
+ long unsigned int kabi_reserved26;
|
|
+ long unsigned int kabi_reserved27;
|
|
+ long unsigned int kabi_reserved28;
|
|
+ long unsigned int kabi_reserved29;
|
|
+ long unsigned int kabi_reserved30;
|
|
+ long unsigned int kabi_reserved31;
|
|
+ long unsigned int kabi_reserved32;
|
|
+ long unsigned int kabi_reserved33;
|
|
+ long unsigned int kabi_reserved34;
|
|
+ long unsigned int kabi_reserved35;
|
|
+ long unsigned int kabi_reserved36;
|
|
+ long unsigned int kabi_reserved37;
|
|
+ long unsigned int kabi_reserved38;
|
|
+ long unsigned int kabi_reserved39;
|
|
+ long unsigned int kabi_reserved40;
|
|
+ long unsigned int kabi_reserved41;
|
|
+ long unsigned int kabi_reserved42;
|
|
+ long unsigned int kabi_reserved43;
|
|
+ long unsigned int kabi_reserved44;
|
|
+ long unsigned int kabi_reserved45;
|
|
+ long unsigned int kabi_reserved46;
|
|
+ long unsigned int kabi_reserved47;
|
|
+};
|
|
+
|
|
+struct neigh_parms {
|
|
+ possible_net_t net;
|
|
+ struct net_device *dev;
|
|
+ struct list_head list;
|
|
+ int (*neigh_setup)(struct neighbour *);
|
|
+ void (*neigh_cleanup)(struct neighbour *);
|
|
+ struct neigh_table *tbl;
|
|
+ void *sysctl_table;
|
|
+ int dead;
|
|
+ refcount_t refcnt;
|
|
+ struct callback_head callback_head;
|
|
+ int reachable_time;
|
|
+ int data[13];
|
|
+ long unsigned int data_state[1];
|
|
+};
|
|
+
|
|
+struct pcpu_sw_netstats {
|
|
+ u64 rx_packets;
|
|
+ u64 rx_bytes;
|
|
+ u64 tx_packets;
|
|
+ u64 tx_bytes;
|
|
+ struct u64_stats_sync syncp;
|
|
+};
|
|
+
|
|
+struct switchdev_attr;
|
|
+
|
|
+struct switchdev_trans;
|
|
+
|
|
+struct switchdev_obj;
|
|
+
|
|
+struct switchdev_ops {
|
|
+ int (*switchdev_port_attr_get)(struct net_device *, struct switchdev_attr *);
|
|
+ int (*switchdev_port_attr_set)(struct net_device *, const struct switchdev_attr *, struct switchdev_trans *);
|
|
+ int (*switchdev_port_obj_add)(struct net_device *, const struct switchdev_obj *, struct switchdev_trans *);
|
|
+ int (*switchdev_port_obj_del)(struct net_device *, const struct switchdev_obj *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+struct l3mdev_ops {
|
|
+ u32 (*l3mdev_fib_table)(const struct net_device *);
|
|
+ struct sk_buff * (*l3mdev_l3_rcv)(struct net_device *, struct sk_buff *, u16);
|
|
+ struct sk_buff * (*l3mdev_l3_out)(struct net_device *, struct sock *, struct sk_buff *, u16);
|
|
+ struct dst_entry * (*l3mdev_link_scope_lookup)(const struct net_device *, struct flowi6 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+struct nd_opt_hdr;
|
|
+
|
|
+struct ndisc_options;
|
|
+
|
|
+struct prefix_info;
|
|
+
|
|
+struct ndisc_ops {
|
|
+ int (*is_useropt)(u8);
|
|
+ int (*parse_options)(const struct net_device *, struct nd_opt_hdr *, struct ndisc_options *);
|
|
+ void (*update)(const struct net_device *, struct neighbour *, u32, u8, const struct ndisc_options *);
|
|
+ int (*opt_addr_space)(const struct net_device *, u8, struct neighbour *, u8 *, u8 **);
|
|
+ void (*fill_addr_option)(const struct net_device *, struct sk_buff *, u8, const u8 *);
|
|
+ void (*prefix_rcv_add_addr)(struct net *, struct net_device *, const struct prefix_info *, struct inet6_dev *, struct in6_addr *, int, u32, bool, bool, __u32, u32, bool);
|
|
+};
|
|
+
|
|
+struct ipv6_devstat {
|
|
+ struct proc_dir_entry *proc_dir_entry;
|
|
+ struct ipstats_mib *ipv6;
|
|
+ struct icmpv6_mib_device *icmpv6dev;
|
|
+ struct icmpv6msg_mib_device *icmpv6msgdev;
|
|
+};
|
|
+
|
|
+struct ifmcaddr6;
|
|
+
|
|
+struct ifacaddr6;
|
|
+
|
|
+struct inet6_dev {
|
|
+ struct net_device *dev;
|
|
+ struct list_head addr_list;
|
|
+ struct ifmcaddr6 *mc_list;
|
|
+ struct ifmcaddr6 *mc_tomb;
|
|
+ spinlock_t mc_lock;
|
|
+ unsigned char mc_qrv;
|
|
+ unsigned char mc_gq_running;
|
|
+ unsigned char mc_ifc_count;
|
|
+ unsigned char mc_dad_count;
|
|
+ long unsigned int mc_v1_seen;
|
|
+ long unsigned int mc_qi;
|
|
+ long unsigned int mc_qri;
|
|
+ long unsigned int mc_maxdelay;
|
|
+ struct timer_list mc_gq_timer;
|
|
+ struct timer_list mc_ifc_timer;
|
|
+ struct timer_list mc_dad_timer;
|
|
+ struct ifacaddr6 *ac_list;
|
|
+ rwlock_t lock;
|
|
+ refcount_t refcnt;
|
|
+ __u32 if_flags;
|
|
+ int dead;
|
|
+ u32 desync_factor;
|
|
+ u8 rndid[8];
|
|
+ struct list_head tempaddr_list;
|
|
+ struct in6_addr token;
|
|
+ struct neigh_parms *nd_parms;
|
|
+ struct ipv6_devconf cnf;
|
|
+ struct ipv6_devstat stats;
|
|
+ struct timer_list rs_timer;
|
|
+ __s32 rs_interval;
|
|
+ __u8 rs_probes;
|
|
+ long unsigned int tstamp;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct tcf_proto;
|
|
+
|
|
+struct mini_Qdisc {
|
|
+ struct tcf_proto *filter_list;
|
|
+ struct gnet_stats_basic_cpu *cpu_bstats;
|
|
+ struct gnet_stats_queue *cpu_qstats;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct rtnl_link_ops {
|
|
+ struct list_head list;
|
|
+ const char *kind;
|
|
+ size_t priv_size;
|
|
+ void (*setup)(struct net_device *);
|
|
+ unsigned int maxtype;
|
|
+ const struct nla_policy *policy;
|
|
+ int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
|
|
+ int (*newlink)(struct net *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
|
|
+ int (*changelink)(struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
|
|
+ void (*dellink)(struct net_device *, struct list_head *);
|
|
+ size_t (*get_size)(const struct net_device *);
|
|
+ int (*fill_info)(struct sk_buff *, const struct net_device *);
|
|
+ size_t (*get_xstats_size)(const struct net_device *);
|
|
+ int (*fill_xstats)(struct sk_buff *, const struct net_device *);
|
|
+ unsigned int (*get_num_tx_queues)();
|
|
+ unsigned int (*get_num_rx_queues)();
|
|
+ unsigned int slave_maxtype;
|
|
+ const struct nla_policy *slave_policy;
|
|
+ int (*slave_changelink)(struct net_device *, struct net_device *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
|
|
+ size_t (*get_slave_size)(const struct net_device *, const struct net_device *);
|
|
+ int (*fill_slave_info)(struct sk_buff *, const struct net_device *, const struct net_device *);
|
|
+ struct net * (*get_link_net)(const struct net_device *);
|
|
+ size_t (*get_linkxstats_size)(const struct net_device *, int);
|
|
+ int (*fill_linkxstats)(struct sk_buff *, const struct net_device *, int *, int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+};
|
|
+
|
|
+struct sd_flow_limit {
|
|
+ u64 count;
|
|
+ unsigned int num_buckets;
|
|
+ unsigned int history_head;
|
|
+ u16 history[128];
|
|
+ u8 buckets[0];
|
|
+};
|
|
+
|
|
+struct softnet_data {
|
|
+ struct list_head poll_list;
|
|
+ struct sk_buff_head process_queue;
|
|
+ unsigned int processed;
|
|
+ unsigned int time_squeeze;
|
|
+ unsigned int received_rps;
|
|
+ struct softnet_data *rps_ipi_list;
|
|
+ struct sd_flow_limit *flow_limit;
|
|
+ struct Qdisc *output_queue;
|
|
+ struct Qdisc **output_queue_tailp;
|
|
+ struct sk_buff *completion_queue;
|
|
+ struct sk_buff_head xfrm_backlog;
|
|
+ struct {
|
|
+ u16 recursion;
|
|
+ u8 more;
|
|
+ } xmit;
|
|
+ int: 32;
|
|
+ unsigned int input_queue_head;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ call_single_data_t csd;
|
|
+ struct softnet_data *rps_ipi_next;
|
|
+ unsigned int cpu;
|
|
+ unsigned int input_queue_tail;
|
|
+ unsigned int dropped;
|
|
+ struct sk_buff_head input_pkt_queue;
|
|
+ struct napi_struct backlog;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ RTAX_UNSPEC = 0,
|
|
+ RTAX_LOCK = 1,
|
|
+ RTAX_MTU = 2,
|
|
+ RTAX_WINDOW = 3,
|
|
+ RTAX_RTT = 4,
|
|
+ RTAX_RTTVAR = 5,
|
|
+ RTAX_SSTHRESH = 6,
|
|
+ RTAX_CWND = 7,
|
|
+ RTAX_ADVMSS = 8,
|
|
+ RTAX_REORDERING = 9,
|
|
+ RTAX_HOPLIMIT = 10,
|
|
+ RTAX_INITCWND = 11,
|
|
+ RTAX_FEATURES = 12,
|
|
+ RTAX_RTO_MIN = 13,
|
|
+ RTAX_INITRWND = 14,
|
|
+ RTAX_QUICKACK = 15,
|
|
+ RTAX_CC_ALGO = 16,
|
|
+ RTAX_FASTOPEN_NO_COOKIE = 17,
|
|
+ __RTAX_MAX = 18,
|
|
+};
|
|
+
|
|
+struct tcmsg {
|
|
+ unsigned char tcm_family;
|
|
+ unsigned char tcm__pad1;
|
|
+ short unsigned int tcm__pad2;
|
|
+ int tcm_ifindex;
|
|
+ __u32 tcm_handle;
|
|
+ __u32 tcm_parent;
|
|
+ __u32 tcm_info;
|
|
+};
|
|
+
|
|
+struct gnet_stats_basic_cpu {
|
|
+ struct gnet_stats_basic_packed bstats;
|
|
+ struct u64_stats_sync syncp;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct gnet_dump {
|
|
+ spinlock_t *lock;
|
|
+ struct sk_buff *skb;
|
|
+ struct nlattr *tail;
|
|
+ int compat_tc_stats;
|
|
+ int compat_xstats;
|
|
+ int padattr;
|
|
+ void *xstats;
|
|
+ int xstats_len;
|
|
+ struct tc_stats tc_stats;
|
|
+};
|
|
+
|
|
+struct nla_policy {
|
|
+ u16 type;
|
|
+ u16 len;
|
|
+ void *validation_data;
|
|
+};
|
|
+
|
|
+typedef int tc_setup_cb_t(enum tc_setup_type, void *, void *);
|
|
+
|
|
+struct qdisc_size_table {
|
|
+ struct callback_head rcu;
|
|
+ struct list_head list;
|
|
+ struct tc_sizespec szopts;
|
|
+ int refcnt;
|
|
+ u16 data[0];
|
|
+};
|
|
+
|
|
+struct Qdisc_class_ops;
|
|
+
|
|
+struct Qdisc_ops {
|
|
+ struct Qdisc_ops *next;
|
|
+ const struct Qdisc_class_ops *cl_ops;
|
|
+ char id[16];
|
|
+ int priv_size;
|
|
+ unsigned int static_flags;
|
|
+ int (*enqueue)(struct sk_buff *, struct Qdisc *, struct sk_buff **);
|
|
+ struct sk_buff * (*dequeue)(struct Qdisc *);
|
|
+ struct sk_buff * (*peek)(struct Qdisc *);
|
|
+ int (*init)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *);
|
|
+ void (*reset)(struct Qdisc *);
|
|
+ void (*destroy)(struct Qdisc *);
|
|
+ int (*change)(struct Qdisc *, struct nlattr *, struct netlink_ext_ack *);
|
|
+ void (*attach)(struct Qdisc *);
|
|
+ int (*change_tx_queue_len)(struct Qdisc *, unsigned int);
|
|
+ int (*dump)(struct Qdisc *, struct sk_buff *);
|
|
+ int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
|
|
+ void (*ingress_block_set)(struct Qdisc *, u32);
|
|
+ void (*egress_block_set)(struct Qdisc *, u32);
|
|
+ u32 (*ingress_block_get)(struct Qdisc *);
|
|
+ u32 (*egress_block_get)(struct Qdisc *);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct qdisc_walker;
|
|
+
|
|
+struct tcf_block;
|
|
+
|
|
+struct Qdisc_class_ops {
|
|
+ struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
|
|
+ int (*graft)(struct Qdisc *, long unsigned int, struct Qdisc *, struct Qdisc **, struct netlink_ext_ack *);
|
|
+ struct Qdisc * (*leaf)(struct Qdisc *, long unsigned int);
|
|
+ void (*qlen_notify)(struct Qdisc *, long unsigned int);
|
|
+ long unsigned int (*find)(struct Qdisc *, u32);
|
|
+ int (*change)(struct Qdisc *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *);
|
|
+ int (*delete)(struct Qdisc *, long unsigned int);
|
|
+ void (*walk)(struct Qdisc *, struct qdisc_walker *);
|
|
+ struct tcf_block * (*tcf_block)(struct Qdisc *, long unsigned int, struct netlink_ext_ack *);
|
|
+ long unsigned int (*bind_tcf)(struct Qdisc *, long unsigned int, u32);
|
|
+ void (*unbind_tcf)(struct Qdisc *, long unsigned int);
|
|
+ int (*dump)(struct Qdisc *, long unsigned int, struct sk_buff *, struct tcmsg *);
|
|
+ int (*dump_stats)(struct Qdisc *, long unsigned int, struct gnet_dump *);
|
|
+};
|
|
+
|
|
+struct tcf_chain;
|
|
+
|
|
+struct tcf_block {
|
|
+ struct list_head chain_list;
|
|
+ u32 index;
|
|
+ unsigned int refcnt;
|
|
+ struct net *net;
|
|
+ struct Qdisc *q;
|
|
+ struct list_head cb_list;
|
|
+ struct list_head owner_list;
|
|
+ bool keep_dst;
|
|
+ unsigned int offloadcnt;
|
|
+ unsigned int nooffloaddevcnt;
|
|
+ struct {
|
|
+ struct tcf_chain *chain;
|
|
+ struct list_head filter_chain_list;
|
|
+ } chain0;
|
|
+};
|
|
+
|
|
+struct tcf_result;
|
|
+
|
|
+struct tcf_proto_ops;
|
|
+
|
|
+struct tcf_proto {
|
|
+ struct tcf_proto *next;
|
|
+ void *root;
|
|
+ int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);
|
|
+ __be16 protocol;
|
|
+ u32 prio;
|
|
+ void *data;
|
|
+ const struct tcf_proto_ops *ops;
|
|
+ struct tcf_chain *chain;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct tcf_result {
|
|
+ union {
|
|
+ struct {
|
|
+ long unsigned int class;
|
|
+ u32 classid;
|
|
+ };
|
|
+ const struct tcf_proto *goto_tp;
|
|
+ struct {
|
|
+ bool ingress;
|
|
+ struct gnet_stats_queue *qstats;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct tcf_walker;
|
|
+
|
|
+struct tcf_proto_ops {
|
|
+ struct list_head head;
|
|
+ char kind[16];
|
|
+ int (*classify)(struct sk_buff *, const struct tcf_proto *, struct tcf_result *);
|
|
+ int (*init)(struct tcf_proto *);
|
|
+ void (*destroy)(struct tcf_proto *, struct netlink_ext_ack *);
|
|
+ void * (*get)(struct tcf_proto *, u32);
|
|
+ int (*change)(struct net *, struct sk_buff *, struct tcf_proto *, long unsigned int, u32, struct nlattr **, void **, bool, struct netlink_ext_ack *);
|
|
+ int (*delete)(struct tcf_proto *, void *, bool *, struct netlink_ext_ack *);
|
|
+ void (*walk)(struct tcf_proto *, struct tcf_walker *);
|
|
+ int (*reoffload)(struct tcf_proto *, bool, tc_setup_cb_t *, void *, struct netlink_ext_ack *);
|
|
+ void (*bind_class)(void *, u32, long unsigned int);
|
|
+ void * (*tmplt_create)(struct net *, struct tcf_chain *, struct nlattr **, struct netlink_ext_ack *);
|
|
+ void (*tmplt_destroy)(void *);
|
|
+ int (*dump)(struct net *, struct tcf_proto *, void *, struct sk_buff *, struct tcmsg *);
|
|
+ int (*tmplt_dump)(struct sk_buff *, struct net *, void *);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct tcf_chain {
|
|
+ struct tcf_proto *filter_chain;
|
|
+ struct list_head list;
|
|
+ struct tcf_block *block;
|
|
+ u32 index;
|
|
+ unsigned int refcnt;
|
|
+ unsigned int action_refcnt;
|
|
+ bool explicitly_created;
|
|
+ const struct tcf_proto_ops *tmplt_ops;
|
|
+ void *tmplt_priv;
|
|
+};
|
|
+
|
|
+struct sock_fprog_kern {
|
|
+ u16 len;
|
|
+ struct sock_filter *filter;
|
|
+};
|
|
+
|
|
+struct sk_filter {
|
|
+ refcount_t refcnt;
|
|
+ struct callback_head rcu;
|
|
+ struct bpf_prog *prog;
|
|
+};
|
|
+
|
|
+struct bpf_map;
|
|
+
|
|
+struct bpf_redirect_info {
|
|
+ u32 ifindex;
|
|
+ u32 flags;
|
|
+ struct bpf_map *map;
|
|
+ struct bpf_map *map_to_flush;
|
|
+ u32 kern_flags;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NEIGH_VAR_MCAST_PROBES = 0,
|
|
+ NEIGH_VAR_UCAST_PROBES = 1,
|
|
+ NEIGH_VAR_APP_PROBES = 2,
|
|
+ NEIGH_VAR_MCAST_REPROBES = 3,
|
|
+ NEIGH_VAR_RETRANS_TIME = 4,
|
|
+ NEIGH_VAR_BASE_REACHABLE_TIME = 5,
|
|
+ NEIGH_VAR_DELAY_PROBE_TIME = 6,
|
|
+ NEIGH_VAR_GC_STALETIME = 7,
|
|
+ NEIGH_VAR_QUEUE_LEN_BYTES = 8,
|
|
+ NEIGH_VAR_PROXY_QLEN = 9,
|
|
+ NEIGH_VAR_ANYCAST_DELAY = 10,
|
|
+ NEIGH_VAR_PROXY_DELAY = 11,
|
|
+ NEIGH_VAR_LOCKTIME = 12,
|
|
+ NEIGH_VAR_QUEUE_LEN = 13,
|
|
+ NEIGH_VAR_RETRANS_TIME_MS = 14,
|
|
+ NEIGH_VAR_BASE_REACHABLE_TIME_MS = 15,
|
|
+ NEIGH_VAR_GC_INTERVAL = 16,
|
|
+ NEIGH_VAR_GC_THRESH1 = 17,
|
|
+ NEIGH_VAR_GC_THRESH2 = 18,
|
|
+ NEIGH_VAR_GC_THRESH3 = 19,
|
|
+ NEIGH_VAR_MAX = 20,
|
|
+};
|
|
+
|
|
+struct pneigh_entry;
|
|
+
|
|
+struct neigh_statistics;
|
|
+
|
|
+struct neigh_hash_table;
|
|
+
|
|
+struct neigh_table {
|
|
+ int family;
|
|
+ unsigned int entry_size;
|
|
+ unsigned int key_len;
|
|
+ __be16 protocol;
|
|
+ __u32 (*hash)(const void *, const struct net_device *, __u32 *);
|
|
+ bool (*key_eq)(const struct neighbour *, const void *);
|
|
+ int (*constructor)(struct neighbour *);
|
|
+ int (*pconstructor)(struct pneigh_entry *);
|
|
+ void (*pdestructor)(struct pneigh_entry *);
|
|
+ void (*proxy_redo)(struct sk_buff *);
|
|
+ char *id;
|
|
+ struct neigh_parms parms;
|
|
+ struct list_head parms_list;
|
|
+ int gc_interval;
|
|
+ int gc_thresh1;
|
|
+ int gc_thresh2;
|
|
+ int gc_thresh3;
|
|
+ long unsigned int last_flush;
|
|
+ struct delayed_work gc_work;
|
|
+ struct timer_list proxy_timer;
|
|
+ struct sk_buff_head proxy_queue;
|
|
+ atomic_t entries;
|
|
+ rwlock_t lock;
|
|
+ long unsigned int last_rand;
|
|
+ struct neigh_statistics *stats;
|
|
+ struct neigh_hash_table *nht;
|
|
+ struct pneigh_entry **phash_buckets;
|
|
+};
|
|
+
|
|
+struct neigh_statistics {
|
|
+ long unsigned int allocs;
|
|
+ long unsigned int destroys;
|
|
+ long unsigned int hash_grows;
|
|
+ long unsigned int res_failed;
|
|
+ long unsigned int lookups;
|
|
+ long unsigned int hits;
|
|
+ long unsigned int rcv_probes_mcast;
|
|
+ long unsigned int rcv_probes_ucast;
|
|
+ long unsigned int periodic_gc_runs;
|
|
+ long unsigned int forced_gc_runs;
|
|
+ long unsigned int unres_discards;
|
|
+ long unsigned int table_fulls;
|
|
+};
|
|
+
|
|
+struct neigh_ops {
|
|
+ int family;
|
|
+ void (*solicit)(struct neighbour *, struct sk_buff *);
|
|
+ void (*error_report)(struct neighbour *, struct sk_buff *);
|
|
+ int (*output)(struct neighbour *, struct sk_buff *);
|
|
+ int (*connected_output)(struct neighbour *, struct sk_buff *);
|
|
+};
|
|
+
|
|
+struct pneigh_entry {
|
|
+ struct pneigh_entry *next;
|
|
+ possible_net_t net;
|
|
+ struct net_device *dev;
|
|
+ u8 flags;
|
|
+ u8 key[0];
|
|
+};
|
|
+
|
|
+struct neigh_hash_table {
|
|
+ struct neighbour **hash_buckets;
|
|
+ unsigned int hash_shift;
|
|
+ __u32 hash_rnd[4];
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct dst_metrics {
|
|
+ u32 metrics[17];
|
|
+ refcount_t refcnt;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCP_ESTABLISHED = 1,
|
|
+ TCP_SYN_SENT = 2,
|
|
+ TCP_SYN_RECV = 3,
|
|
+ TCP_FIN_WAIT1 = 4,
|
|
+ TCP_FIN_WAIT2 = 5,
|
|
+ TCP_TIME_WAIT = 6,
|
|
+ TCP_CLOSE = 7,
|
|
+ TCP_CLOSE_WAIT = 8,
|
|
+ TCP_LAST_ACK = 9,
|
|
+ TCP_LISTEN = 10,
|
|
+ TCP_CLOSING = 11,
|
|
+ TCP_NEW_SYN_RECV = 12,
|
|
+ TCP_MAX_STATES = 13,
|
|
+};
|
|
+
|
|
+struct smc_hashinfo {
|
|
+ rwlock_t lock;
|
|
+ struct hlist_head ht;
|
|
+};
|
|
+
|
|
+struct fib_rule_hdr {
|
|
+ __u8 family;
|
|
+ __u8 dst_len;
|
|
+ __u8 src_len;
|
|
+ __u8 tos;
|
|
+ __u8 table;
|
|
+ __u8 res1;
|
|
+ __u8 res2;
|
|
+ __u8 action;
|
|
+ __u32 flags;
|
|
+};
|
|
+
|
|
+struct fib_rule_port_range {
|
|
+ __u16 start;
|
|
+ __u16 end;
|
|
+};
|
|
+
|
|
+struct fib_kuid_range {
|
|
+ kuid_t start;
|
|
+ kuid_t end;
|
|
+};
|
|
+
|
|
+struct fib_rule {
|
|
+ struct list_head list;
|
|
+ int iifindex;
|
|
+ int oifindex;
|
|
+ u32 mark;
|
|
+ u32 mark_mask;
|
|
+ u32 flags;
|
|
+ u32 table;
|
|
+ u8 action;
|
|
+ u8 l3mdev;
|
|
+ u8 proto;
|
|
+ u8 ip_proto;
|
|
+ u32 target;
|
|
+ __be64 tun_id;
|
|
+ struct fib_rule *ctarget;
|
|
+ struct net *fr_net;
|
|
+ refcount_t refcnt;
|
|
+ u32 pref;
|
|
+ int suppress_ifgroup;
|
|
+ int suppress_prefixlen;
|
|
+ char iifname[16];
|
|
+ char oifname[16];
|
|
+ struct fib_kuid_range uid_range;
|
|
+ struct fib_rule_port_range sport_range;
|
|
+ struct fib_rule_port_range dport_range;
|
|
+ struct callback_head rcu;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct fib_lookup_arg {
|
|
+ void *lookup_ptr;
|
|
+ const void *lookup_data;
|
|
+ void *result;
|
|
+ struct fib_rule *rule;
|
|
+ u32 table;
|
|
+ int flags;
|
|
+};
|
|
+
|
|
+struct request_sock_ops;
|
|
+
|
|
+struct timewait_sock_ops;
|
|
+
|
|
+struct udp_table;
|
|
+
|
|
+struct raw_hashinfo;
|
|
+
|
|
+struct proto {
|
|
+ void (*close)(struct sock *, long int);
|
|
+ int (*pre_connect)(struct sock *, struct sockaddr *, int);
|
|
+ int (*connect)(struct sock *, struct sockaddr *, int);
|
|
+ int (*disconnect)(struct sock *, int);
|
|
+ struct sock * (*accept)(struct sock *, int, int *, bool);
|
|
+ int (*ioctl)(struct sock *, int, long unsigned int);
|
|
+ int (*init)(struct sock *);
|
|
+ void (*destroy)(struct sock *);
|
|
+ void (*shutdown)(struct sock *, int);
|
|
+ int (*setsockopt)(struct sock *, int, int, char *, unsigned int);
|
|
+ int (*getsockopt)(struct sock *, int, int, char *, int *);
|
|
+ void (*keepalive)(struct sock *, int);
|
|
+ int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int);
|
|
+ int (*compat_getsockopt)(struct sock *, int, int, char *, int *);
|
|
+ int (*compat_ioctl)(struct sock *, unsigned int, long unsigned int);
|
|
+ int (*sendmsg)(struct sock *, struct msghdr *, size_t);
|
|
+ int (*recvmsg)(struct sock *, struct msghdr *, size_t, int, int, int *);
|
|
+ int (*sendpage)(struct sock *, struct page *, int, size_t, int);
|
|
+ int (*bind)(struct sock *, struct sockaddr *, int);
|
|
+ int (*backlog_rcv)(struct sock *, struct sk_buff *);
|
|
+ void (*release_cb)(struct sock *);
|
|
+ int (*hash)(struct sock *);
|
|
+ void (*unhash)(struct sock *);
|
|
+ void (*rehash)(struct sock *);
|
|
+ int (*get_port)(struct sock *, short unsigned int);
|
|
+ unsigned int inuse_idx;
|
|
+ bool (*stream_memory_free)(const struct sock *);
|
|
+ bool (*stream_memory_read)(const struct sock *);
|
|
+ void (*enter_memory_pressure)(struct sock *);
|
|
+ void (*leave_memory_pressure)(struct sock *);
|
|
+ atomic_long_t *memory_allocated;
|
|
+ struct percpu_counter *sockets_allocated;
|
|
+ long unsigned int *memory_pressure;
|
|
+ long int *sysctl_mem;
|
|
+ int *sysctl_wmem;
|
|
+ int *sysctl_rmem;
|
|
+ u32 sysctl_wmem_offset;
|
|
+ u32 sysctl_rmem_offset;
|
|
+ int max_header;
|
|
+ bool no_autobind;
|
|
+ struct kmem_cache *slab;
|
|
+ unsigned int obj_size;
|
|
+ slab_flags_t slab_flags;
|
|
+ unsigned int useroffset;
|
|
+ unsigned int usersize;
|
|
+ struct percpu_counter *orphan_count;
|
|
+ struct request_sock_ops *rsk_prot;
|
|
+ struct timewait_sock_ops *twsk_prot;
|
|
+ union {
|
|
+ struct inet_hashinfo *hashinfo;
|
|
+ struct udp_table *udp_table;
|
|
+ struct raw_hashinfo *raw_hash;
|
|
+ struct smc_hashinfo *smc_hash;
|
|
+ } h;
|
|
+ struct module *owner;
|
|
+ char name[32];
|
|
+ struct list_head node;
|
|
+ int (*diag_destroy)(struct sock *, int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+};
|
|
+
|
|
+struct request_sock;
|
|
+
|
|
+struct request_sock_ops {
|
|
+ int family;
|
|
+ unsigned int obj_size;
|
|
+ struct kmem_cache *slab;
|
|
+ char *slab_name;
|
|
+ int (*rtx_syn_ack)(const struct sock *, struct request_sock *);
|
|
+ void (*send_ack)(const struct sock *, struct sk_buff *, struct request_sock *);
|
|
+ void (*send_reset)(const struct sock *, struct sk_buff *);
|
|
+ void (*destructor)(struct request_sock *);
|
|
+ void (*syn_ack_timeout)(const struct request_sock *);
|
|
+};
|
|
+
|
|
+struct timewait_sock_ops {
|
|
+ struct kmem_cache *twsk_slab;
|
|
+ char *twsk_slab_name;
|
|
+ unsigned int twsk_obj_size;
|
|
+ int (*twsk_unique)(struct sock *, struct sock *, void *);
|
|
+ void (*twsk_destructor)(struct sock *);
|
|
+};
|
|
+
|
|
+struct request_sock {
|
|
+ struct sock_common __req_common;
|
|
+ struct request_sock *dl_next;
|
|
+ u16 mss;
|
|
+ u8 num_retrans;
|
|
+ u8 cookie_ts: 1;
|
|
+ u8 num_timeout: 7;
|
|
+ u32 ts_recent;
|
|
+ struct timer_list rsk_timer;
|
|
+ const struct request_sock_ops *rsk_ops;
|
|
+ struct sock *sk;
|
|
+ u32 *saved_syn;
|
|
+ u32 secid;
|
|
+ u32 peer_secid;
|
|
+};
|
|
+
|
|
+enum tsq_enum {
|
|
+ TSQ_THROTTLED = 0,
|
|
+ TSQ_QUEUED = 1,
|
|
+ TCP_TSQ_DEFERRED = 2,
|
|
+ TCP_WRITE_TIMER_DEFERRED = 3,
|
|
+ TCP_DELACK_TIMER_DEFERRED = 4,
|
|
+ TCP_MTU_REDUCED_DEFERRED = 5,
|
|
+};
|
|
+
|
|
+struct ip6_sf_list {
|
|
+ struct ip6_sf_list *sf_next;
|
|
+ struct in6_addr sf_addr;
|
|
+ long unsigned int sf_count[2];
|
|
+ unsigned char sf_gsresp;
|
|
+ unsigned char sf_oldin;
|
|
+ unsigned char sf_crcount;
|
|
+};
|
|
+
|
|
+struct ifmcaddr6 {
|
|
+ struct in6_addr mca_addr;
|
|
+ struct inet6_dev *idev;
|
|
+ struct ifmcaddr6 *next;
|
|
+ struct ip6_sf_list *mca_sources;
|
|
+ struct ip6_sf_list *mca_tomb;
|
|
+ unsigned int mca_sfmode;
|
|
+ unsigned char mca_crcount;
|
|
+ long unsigned int mca_sfcount[2];
|
|
+ struct timer_list mca_timer;
|
|
+ unsigned int mca_flags;
|
|
+ int mca_users;
|
|
+ refcount_t mca_refcnt;
|
|
+ spinlock_t mca_lock;
|
|
+ long unsigned int mca_cstamp;
|
|
+ long unsigned int mca_tstamp;
|
|
+};
|
|
+
|
|
+struct ifacaddr6 {
|
|
+ struct in6_addr aca_addr;
|
|
+ struct fib6_info *aca_rt;
|
|
+ struct ifacaddr6 *aca_next;
|
|
+ int aca_users;
|
|
+ refcount_t aca_refcnt;
|
|
+ long unsigned int aca_cstamp;
|
|
+ long unsigned int aca_tstamp;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ __ND_OPT_PREFIX_INFO_END = 0,
|
|
+ ND_OPT_SOURCE_LL_ADDR = 1,
|
|
+ ND_OPT_TARGET_LL_ADDR = 2,
|
|
+ ND_OPT_PREFIX_INFO = 3,
|
|
+ ND_OPT_REDIRECT_HDR = 4,
|
|
+ ND_OPT_MTU = 5,
|
|
+ ND_OPT_NONCE = 14,
|
|
+ __ND_OPT_ARRAY_MAX = 15,
|
|
+ ND_OPT_ROUTE_INFO = 24,
|
|
+ ND_OPT_RDNSS = 25,
|
|
+ ND_OPT_DNSSL = 31,
|
|
+ ND_OPT_6CO = 34,
|
|
+ __ND_OPT_MAX = 35,
|
|
+};
|
|
+
|
|
+struct nd_opt_hdr {
|
|
+ __u8 nd_opt_type;
|
|
+ __u8 nd_opt_len;
|
|
+};
|
|
+
|
|
+struct ndisc_options {
|
|
+ struct nd_opt_hdr *nd_opt_array[15];
|
|
+ struct nd_opt_hdr *nd_opts_ri;
|
|
+ struct nd_opt_hdr *nd_opts_ri_end;
|
|
+ struct nd_opt_hdr *nd_useropts;
|
|
+ struct nd_opt_hdr *nd_useropts_end;
|
|
+ struct nd_opt_hdr *nd_802154_opt_array[3];
|
|
+};
|
|
+
|
|
+struct prefix_info {
|
|
+ __u8 type;
|
|
+ __u8 length;
|
|
+ __u8 prefix_len;
|
|
+ __u8 reserved: 6;
|
|
+ __u8 autoconf: 1;
|
|
+ __u8 onlink: 1;
|
|
+ __be32 valid;
|
|
+ __be32 prefered;
|
|
+ __be32 reserved2;
|
|
+ struct in6_addr prefix;
|
|
+};
|
|
+
|
|
+struct ip6_ra_chain {
|
|
+ struct ip6_ra_chain *next;
|
|
+ struct sock *sk;
|
|
+ int sel;
|
|
+ void (*destructor)(struct sock *);
|
|
+};
|
|
+
|
|
+struct rpc_xprt_iter_ops {
|
|
+ void (*xpi_rewind)(struct rpc_xprt_iter *);
|
|
+ struct rpc_xprt * (*xpi_xprt)(struct rpc_xprt_iter *);
|
|
+ struct rpc_xprt * (*xpi_next)(struct rpc_xprt_iter *);
|
|
+};
|
|
+
|
|
+struct rpc_version {
|
|
+ u32 number;
|
|
+ unsigned int nrprocs;
|
|
+ const struct rpc_procinfo *procs;
|
|
+ unsigned int *counts;
|
|
+};
|
|
+
|
|
+struct nfs_fh {
|
|
+ short unsigned int size;
|
|
+ unsigned char data[128];
|
|
+};
|
|
+
|
|
+enum nfs3_stable_how {
|
|
+ NFS_UNSTABLE = 0,
|
|
+ NFS_DATA_SYNC = 1,
|
|
+ NFS_FILE_SYNC = 2,
|
|
+ NFS_INVALID_STABLE_HOW = -1,
|
|
+};
|
|
+
|
|
+struct nfs4_label {
|
|
+ uint32_t lfs;
|
|
+ uint32_t pi;
|
|
+ u32 len;
|
|
+ char *label;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ char data[8];
|
|
+} nfs4_verifier;
|
|
+
|
|
+struct nfs4_stateid_struct {
|
|
+ union {
|
|
+ char data[16];
|
|
+ struct {
|
|
+ __be32 seqid;
|
|
+ char other[12];
|
|
+ };
|
|
+ };
|
|
+ enum {
|
|
+ NFS4_INVALID_STATEID_TYPE = 0,
|
|
+ NFS4_SPECIAL_STATEID_TYPE = 1,
|
|
+ NFS4_OPEN_STATEID_TYPE = 2,
|
|
+ NFS4_LOCK_STATEID_TYPE = 3,
|
|
+ NFS4_DELEGATION_STATEID_TYPE = 4,
|
|
+ NFS4_LAYOUT_STATEID_TYPE = 5,
|
|
+ NFS4_PNFS_DS_STATEID_TYPE = 6,
|
|
+ NFS4_REVOKED_STATEID_TYPE = 7,
|
|
+ } type;
|
|
+};
|
|
+
|
|
+typedef struct nfs4_stateid_struct nfs4_stateid;
|
|
+
|
|
+enum nfs_opnum4 {
|
|
+ OP_ACCESS = 3,
|
|
+ OP_CLOSE = 4,
|
|
+ OP_COMMIT = 5,
|
|
+ OP_CREATE = 6,
|
|
+ OP_DELEGPURGE = 7,
|
|
+ OP_DELEGRETURN = 8,
|
|
+ OP_GETATTR = 9,
|
|
+ OP_GETFH = 10,
|
|
+ OP_LINK = 11,
|
|
+ OP_LOCK = 12,
|
|
+ OP_LOCKT = 13,
|
|
+ OP_LOCKU = 14,
|
|
+ OP_LOOKUP = 15,
|
|
+ OP_LOOKUPP = 16,
|
|
+ OP_NVERIFY = 17,
|
|
+ OP_OPEN = 18,
|
|
+ OP_OPENATTR = 19,
|
|
+ OP_OPEN_CONFIRM = 20,
|
|
+ OP_OPEN_DOWNGRADE = 21,
|
|
+ OP_PUTFH = 22,
|
|
+ OP_PUTPUBFH = 23,
|
|
+ OP_PUTROOTFH = 24,
|
|
+ OP_READ = 25,
|
|
+ OP_READDIR = 26,
|
|
+ OP_READLINK = 27,
|
|
+ OP_REMOVE = 28,
|
|
+ OP_RENAME = 29,
|
|
+ OP_RENEW = 30,
|
|
+ OP_RESTOREFH = 31,
|
|
+ OP_SAVEFH = 32,
|
|
+ OP_SECINFO = 33,
|
|
+ OP_SETATTR = 34,
|
|
+ OP_SETCLIENTID = 35,
|
|
+ OP_SETCLIENTID_CONFIRM = 36,
|
|
+ OP_VERIFY = 37,
|
|
+ OP_WRITE = 38,
|
|
+ OP_RELEASE_LOCKOWNER = 39,
|
|
+ OP_BACKCHANNEL_CTL = 40,
|
|
+ OP_BIND_CONN_TO_SESSION = 41,
|
|
+ OP_EXCHANGE_ID = 42,
|
|
+ OP_CREATE_SESSION = 43,
|
|
+ OP_DESTROY_SESSION = 44,
|
|
+ OP_FREE_STATEID = 45,
|
|
+ OP_GET_DIR_DELEGATION = 46,
|
|
+ OP_GETDEVICEINFO = 47,
|
|
+ OP_GETDEVICELIST = 48,
|
|
+ OP_LAYOUTCOMMIT = 49,
|
|
+ OP_LAYOUTGET = 50,
|
|
+ OP_LAYOUTRETURN = 51,
|
|
+ OP_SECINFO_NO_NAME = 52,
|
|
+ OP_SEQUENCE = 53,
|
|
+ OP_SET_SSV = 54,
|
|
+ OP_TEST_STATEID = 55,
|
|
+ OP_WANT_DELEGATION = 56,
|
|
+ OP_DESTROY_CLIENTID = 57,
|
|
+ OP_RECLAIM_COMPLETE = 58,
|
|
+ OP_ALLOCATE = 59,
|
|
+ OP_COPY = 60,
|
|
+ OP_COPY_NOTIFY = 61,
|
|
+ OP_DEALLOCATE = 62,
|
|
+ OP_IO_ADVISE = 63,
|
|
+ OP_LAYOUTERROR = 64,
|
|
+ OP_LAYOUTSTATS = 65,
|
|
+ OP_OFFLOAD_CANCEL = 66,
|
|
+ OP_OFFLOAD_STATUS = 67,
|
|
+ OP_READ_PLUS = 68,
|
|
+ OP_SEEK = 69,
|
|
+ OP_WRITE_SAME = 70,
|
|
+ OP_CLONE = 71,
|
|
+ OP_ILLEGAL = 10044,
|
|
+};
|
|
+
|
|
+struct nfs4_string {
|
|
+ unsigned int len;
|
|
+ char *data;
|
|
+};
|
|
+
|
|
+struct nfs_fsid {
|
|
+ uint64_t major;
|
|
+ uint64_t minor;
|
|
+};
|
|
+
|
|
+struct nfs4_threshold {
|
|
+ __u32 bm;
|
|
+ __u32 l_type;
|
|
+ __u64 rd_sz;
|
|
+ __u64 wr_sz;
|
|
+ __u64 rd_io_sz;
|
|
+ __u64 wr_io_sz;
|
|
+};
|
|
+
|
|
+struct nfs_fattr {
|
|
+ unsigned int valid;
|
|
+ umode_t mode;
|
|
+ __u32 nlink;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ dev_t rdev;
|
|
+ __u64 size;
|
|
+ union {
|
|
+ struct {
|
|
+ __u32 blocksize;
|
|
+ __u32 blocks;
|
|
+ } nfs2;
|
|
+ struct {
|
|
+ __u64 used;
|
|
+ } nfs3;
|
|
+ } du;
|
|
+ struct nfs_fsid fsid;
|
|
+ __u64 fileid;
|
|
+ __u64 mounted_on_fileid;
|
|
+ struct timespec atime;
|
|
+ struct timespec mtime;
|
|
+ struct timespec ctime;
|
|
+ __u64 change_attr;
|
|
+ __u64 pre_change_attr;
|
|
+ __u64 pre_size;
|
|
+ struct timespec pre_mtime;
|
|
+ struct timespec pre_ctime;
|
|
+ long unsigned int time_start;
|
|
+ long unsigned int gencount;
|
|
+ struct nfs4_string *owner_name;
|
|
+ struct nfs4_string *group_name;
|
|
+ struct nfs4_threshold *mdsthreshold;
|
|
+};
|
|
+
|
|
+struct nfs_fsinfo {
|
|
+ struct nfs_fattr *fattr;
|
|
+ __u32 rtmax;
|
|
+ __u32 rtpref;
|
|
+ __u32 rtmult;
|
|
+ __u32 wtmax;
|
|
+ __u32 wtpref;
|
|
+ __u32 wtmult;
|
|
+ __u32 dtpref;
|
|
+ __u64 maxfilesize;
|
|
+ struct timespec time_delta;
|
|
+ __u32 lease_time;
|
|
+ __u32 nlayouttypes;
|
|
+ __u32 layouttype[8];
|
|
+ __u32 blksize;
|
|
+ __u32 clone_blksize;
|
|
+};
|
|
+
|
|
+struct nfs_fsstat {
|
|
+ struct nfs_fattr *fattr;
|
|
+ __u64 tbytes;
|
|
+ __u64 fbytes;
|
|
+ __u64 abytes;
|
|
+ __u64 tfiles;
|
|
+ __u64 ffiles;
|
|
+ __u64 afiles;
|
|
+};
|
|
+
|
|
+struct nfs_pathconf {
|
|
+ struct nfs_fattr *fattr;
|
|
+ __u32 max_link;
|
|
+ __u32 max_namelen;
|
|
+};
|
|
+
|
|
+struct nfs4_change_info {
|
|
+ u32 atomic;
|
|
+ u64 before;
|
|
+ u64 after;
|
|
+};
|
|
+
|
|
+struct nfs4_slot;
|
|
+
|
|
+struct nfs4_sequence_args {
|
|
+ struct nfs4_slot *sa_slot;
|
|
+ u8 sa_cache_this: 1;
|
|
+ u8 sa_privileged: 1;
|
|
+};
|
|
+
|
|
+struct nfs4_sequence_res {
|
|
+ struct nfs4_slot *sr_slot;
|
|
+ long unsigned int sr_timestamp;
|
|
+ int sr_status;
|
|
+ u32 sr_status_flags;
|
|
+ u32 sr_highest_slotid;
|
|
+ u32 sr_target_highest_slotid;
|
|
+};
|
|
+
|
|
+struct nfs_open_context;
|
|
+
|
|
+struct nfs_lock_context {
|
|
+ refcount_t count;
|
|
+ struct list_head list;
|
|
+ struct nfs_open_context *open_context;
|
|
+ fl_owner_t lockowner;
|
|
+ atomic_t io_count;
|
|
+};
|
|
+
|
|
+struct nfs4_state;
|
|
+
|
|
+struct nfs_open_context {
|
|
+ struct nfs_lock_context lock_context;
|
|
+ fl_owner_t flock_owner;
|
|
+ struct dentry *dentry;
|
|
+ struct rpc_cred *cred;
|
|
+ struct nfs4_state *state;
|
|
+ fmode_t mode;
|
|
+ long unsigned int flags;
|
|
+ int error;
|
|
+ struct list_head list;
|
|
+ struct nfs4_threshold *mdsthreshold;
|
|
+};
|
|
+
|
|
+struct nlm_host;
|
|
+
|
|
+struct nfs_iostats;
|
|
+
|
|
+struct nfs_auth_info {
|
|
+ unsigned int flavor_len;
|
|
+ rpc_authflavor_t flavors[12];
|
|
+};
|
|
+
|
|
+struct nfs_fscache_key;
|
|
+
|
|
+struct fscache_cookie;
|
|
+
|
|
+struct pnfs_layoutdriver_type;
|
|
+
|
|
+struct nfs_client;
|
|
+
|
|
+struct nfs_server {
|
|
+ struct nfs_client *nfs_client;
|
|
+ struct list_head client_link;
|
|
+ struct list_head master_link;
|
|
+ struct rpc_clnt *client;
|
|
+ struct rpc_clnt *client_acl;
|
|
+ struct nlm_host *nlm_host;
|
|
+ struct nfs_iostats *io_stats;
|
|
+ atomic_long_t writeback;
|
|
+ int flags;
|
|
+ unsigned int caps;
|
|
+ unsigned int rsize;
|
|
+ unsigned int rpages;
|
|
+ unsigned int wsize;
|
|
+ unsigned int wpages;
|
|
+ unsigned int wtmult;
|
|
+ unsigned int dtsize;
|
|
+ short unsigned int port;
|
|
+ unsigned int bsize;
|
|
+ unsigned int acregmin;
|
|
+ unsigned int acregmax;
|
|
+ unsigned int acdirmin;
|
|
+ unsigned int acdirmax;
|
|
+ unsigned int namelen;
|
|
+ unsigned int options;
|
|
+ unsigned int clone_blksize;
|
|
+ struct nfs_fsid fsid;
|
|
+ __u64 maxfilesize;
|
|
+ struct timespec time_delta;
|
|
+ long unsigned int mount_time;
|
|
+ struct super_block *super;
|
|
+ dev_t s_dev;
|
|
+ struct nfs_auth_info auth_info;
|
|
+ struct nfs_fscache_key *fscache_key;
|
|
+ struct fscache_cookie *fscache;
|
|
+ u32 pnfs_blksize;
|
|
+ u32 attr_bitmask[3];
|
|
+ u32 attr_bitmask_nl[3];
|
|
+ u32 exclcreat_bitmask[3];
|
|
+ u32 cache_consistency_bitmask[3];
|
|
+ u32 acl_bitmask;
|
|
+ u32 fh_expire_type;
|
|
+ struct pnfs_layoutdriver_type *pnfs_curr_ld;
|
|
+ struct rpc_wait_queue roc_rpcwaitq;
|
|
+ void *pnfs_ld_data;
|
|
+ struct rb_root state_owners;
|
|
+ struct ida openowner_id;
|
|
+ struct ida lockowner_id;
|
|
+ struct list_head state_owners_lru;
|
|
+ struct list_head layouts;
|
|
+ struct list_head delegations;
|
|
+ struct list_head ss_copies;
|
|
+ long unsigned int mig_gen;
|
|
+ long unsigned int mig_status;
|
|
+ void (*destroy)(struct nfs_server *);
|
|
+ atomic_t active;
|
|
+ int: 32;
|
|
+ struct __kernel_sockaddr_storage mountd_address;
|
|
+ size_t mountd_addrlen;
|
|
+ u32 mountd_version;
|
|
+ short unsigned int mountd_port;
|
|
+ short unsigned int mountd_protocol;
|
|
+ struct rpc_wait_queue uoc_rpcwaitq;
|
|
+};
|
|
+
|
|
+struct nfs_subversion;
|
|
+
|
|
+struct idmap;
|
|
+
|
|
+struct nfs4_minor_version_ops;
|
|
+
|
|
+struct nfs4_slot_table;
|
|
+
|
|
+struct nfs4_session;
|
|
+
|
|
+struct nfs_rpc_ops;
|
|
+
|
|
+struct nfs41_server_owner;
|
|
+
|
|
+struct nfs41_server_scope;
|
|
+
|
|
+struct nfs41_impl_id;
|
|
+
|
|
+struct nfs_client {
|
|
+ refcount_t cl_count;
|
|
+ atomic_t cl_mds_count;
|
|
+ int cl_cons_state;
|
|
+ long unsigned int cl_res_state;
|
|
+ long unsigned int cl_flags;
|
|
+ struct __kernel_sockaddr_storage cl_addr;
|
|
+ size_t cl_addrlen;
|
|
+ char *cl_hostname;
|
|
+ char *cl_acceptor;
|
|
+ struct list_head cl_share_link;
|
|
+ struct list_head cl_superblocks;
|
|
+ struct rpc_clnt *cl_rpcclient;
|
|
+ const struct nfs_rpc_ops *rpc_ops;
|
|
+ int cl_proto;
|
|
+ struct nfs_subversion *cl_nfs_mod;
|
|
+ u32 cl_minorversion;
|
|
+ struct rpc_cred *cl_machine_cred;
|
|
+ struct list_head cl_ds_clients;
|
|
+ u64 cl_clientid;
|
|
+ nfs4_verifier cl_confirm;
|
|
+ long unsigned int cl_state;
|
|
+ spinlock_t cl_lock;
|
|
+ long unsigned int cl_lease_time;
|
|
+ long unsigned int cl_last_renewal;
|
|
+ struct delayed_work cl_renewd;
|
|
+ struct rpc_wait_queue cl_rpcwaitq;
|
|
+ struct idmap *cl_idmap;
|
|
+ const char *cl_owner_id;
|
|
+ u32 cl_cb_ident;
|
|
+ const struct nfs4_minor_version_ops *cl_mvops;
|
|
+ long unsigned int cl_mig_gen;
|
|
+ struct nfs4_slot_table *cl_slot_tbl;
|
|
+ u32 cl_seqid;
|
|
+ u32 cl_exchange_flags;
|
|
+ struct nfs4_session *cl_session;
|
|
+ bool cl_preserve_clid;
|
|
+ struct nfs41_server_owner *cl_serverowner;
|
|
+ struct nfs41_server_scope *cl_serverscope;
|
|
+ struct nfs41_impl_id *cl_implid;
|
|
+ long unsigned int cl_sp4_flags;
|
|
+ wait_queue_head_t cl_lock_waitq;
|
|
+ char cl_ipaddr[48];
|
|
+ struct fscache_cookie *fscache;
|
|
+ struct net *cl_net;
|
|
+ struct list_head pending_cb_stateids;
|
|
+};
|
|
+
|
|
+struct nfs_write_verifier {
|
|
+ char data[8];
|
|
+};
|
|
+
|
|
+struct nfs_writeverf {
|
|
+ struct nfs_write_verifier verifier;
|
|
+ enum nfs3_stable_how committed;
|
|
+};
|
|
+
|
|
+struct nfs_pgio_args {
|
|
+ struct nfs4_sequence_args seq_args;
|
|
+ struct nfs_fh *fh;
|
|
+ struct nfs_open_context *context;
|
|
+ struct nfs_lock_context *lock_context;
|
|
+ nfs4_stateid stateid;
|
|
+ __u64 offset;
|
|
+ __u32 count;
|
|
+ unsigned int pgbase;
|
|
+ struct page **pages;
|
|
+ const u32 *bitmask;
|
|
+ enum nfs3_stable_how stable;
|
|
+};
|
|
+
|
|
+struct nfs_pgio_res {
|
|
+ struct nfs4_sequence_res seq_res;
|
|
+ struct nfs_fattr *fattr;
|
|
+ __u32 count;
|
|
+ __u32 op_status;
|
|
+ int eof;
|
|
+ struct nfs_writeverf *verf;
|
|
+ const struct nfs_server *server;
|
|
+};
|
|
+
|
|
+struct nfs_commitargs {
|
|
+ struct nfs4_sequence_args seq_args;
|
|
+ struct nfs_fh *fh;
|
|
+ __u64 offset;
|
|
+ __u32 count;
|
|
+ const u32 *bitmask;
|
|
+};
|
|
+
|
|
+struct nfs_commitres {
|
|
+ struct nfs4_sequence_res seq_res;
|
|
+ __u32 op_status;
|
|
+ struct nfs_fattr *fattr;
|
|
+ struct nfs_writeverf *verf;
|
|
+ const struct nfs_server *server;
|
|
+};
|
|
+
|
|
+struct nfs_removeargs {
|
|
+ struct nfs4_sequence_args seq_args;
|
|
+ const struct nfs_fh *fh;
|
|
+ struct qstr name;
|
|
+};
|
|
+
|
|
+struct nfs_removeres {
|
|
+ struct nfs4_sequence_res seq_res;
|
|
+ struct nfs_server *server;
|
|
+ struct nfs_fattr *dir_attr;
|
|
+ struct nfs4_change_info cinfo;
|
|
+};
|
|
+
|
|
+struct nfs_renameargs {
|
|
+ struct nfs4_sequence_args seq_args;
|
|
+ const struct nfs_fh *old_dir;
|
|
+ const struct nfs_fh *new_dir;
|
|
+ const struct qstr *old_name;
|
|
+ const struct qstr *new_name;
|
|
+};
|
|
+
|
|
+struct nfs_renameres {
|
|
+ struct nfs4_sequence_res seq_res;
|
|
+ struct nfs_server *server;
|
|
+ struct nfs4_change_info old_cinfo;
|
|
+ struct nfs_fattr *old_fattr;
|
|
+ struct nfs4_change_info new_cinfo;
|
|
+ struct nfs_fattr *new_fattr;
|
|
+};
|
|
+
|
|
+struct nfs_entry {
|
|
+ __u64 ino;
|
|
+ __u64 cookie;
|
|
+ __u64 prev_cookie;
|
|
+ const char *name;
|
|
+ unsigned int len;
|
|
+ int eof;
|
|
+ struct nfs_fh *fh;
|
|
+ struct nfs_fattr *fattr;
|
|
+ struct nfs4_label *label;
|
|
+ unsigned char d_type;
|
|
+ struct nfs_server *server;
|
|
+};
|
|
+
|
|
+struct nfstime4 {
|
|
+ u64 seconds;
|
|
+ u32 nseconds;
|
|
+};
|
|
+
|
|
+struct pnfs_layout_segment;
|
|
+
|
|
+struct pnfs_commit_bucket {
|
|
+ struct list_head written;
|
|
+ struct list_head committing;
|
|
+ struct pnfs_layout_segment *wlseg;
|
|
+ struct pnfs_layout_segment *clseg;
|
|
+ struct nfs_writeverf direct_verf;
|
|
+};
|
|
+
|
|
+struct pnfs_ds_commit_info {
|
|
+ int nwritten;
|
|
+ int ncommitting;
|
|
+ int nbuckets;
|
|
+ struct pnfs_commit_bucket *buckets;
|
|
+};
|
|
+
|
|
+struct nfs41_server_owner {
|
|
+ uint64_t minor_id;
|
|
+ uint32_t major_id_sz;
|
|
+ char major_id[1024];
|
|
+};
|
|
+
|
|
+struct nfs41_server_scope {
|
|
+ uint32_t server_scope_sz;
|
|
+ char server_scope[1024];
|
|
+};
|
|
+
|
|
+struct nfs41_impl_id {
|
|
+ char domain[1025];
|
|
+ char name[1025];
|
|
+ struct nfstime4 date;
|
|
+};
|
|
+
|
|
+struct nfs_page_array {
|
|
+ struct page **pagevec;
|
|
+ unsigned int npages;
|
|
+ struct page *page_array[8];
|
|
+};
|
|
+
|
|
+struct nfs_page;
|
|
+
|
|
+struct nfs_rw_ops;
|
|
+
|
|
+struct nfs_io_completion;
|
|
+
|
|
+struct nfs_direct_req;
|
|
+
|
|
+struct nfs_pgio_completion_ops;
|
|
+
|
|
+struct nfs_pgio_header {
|
|
+ struct inode *inode;
|
|
+ struct rpc_cred *cred;
|
|
+ struct list_head pages;
|
|
+ struct nfs_page *req;
|
|
+ struct nfs_writeverf verf;
|
|
+ fmode_t rw_mode;
|
|
+ struct pnfs_layout_segment *lseg;
|
|
+ loff_t io_start;
|
|
+ const struct rpc_call_ops *mds_ops;
|
|
+ void (*release)(struct nfs_pgio_header *);
|
|
+ const struct nfs_pgio_completion_ops *completion_ops;
|
|
+ const struct nfs_rw_ops *rw_ops;
|
|
+ struct nfs_io_completion *io_completion;
|
|
+ struct nfs_direct_req *dreq;
|
|
+ spinlock_t lock;
|
|
+ int pnfs_error;
|
|
+ int error;
|
|
+ long unsigned int good_bytes;
|
|
+ long unsigned int flags;
|
|
+ struct rpc_task task;
|
|
+ struct nfs_fattr fattr;
|
|
+ struct nfs_pgio_args args;
|
|
+ struct nfs_pgio_res res;
|
|
+ long unsigned int timestamp;
|
|
+ int (*pgio_done_cb)(struct rpc_task *, struct nfs_pgio_header *);
|
|
+ __u64 mds_offset;
|
|
+ struct nfs_page_array page_array;
|
|
+ struct nfs_client *ds_clp;
|
|
+ int ds_commit_idx;
|
|
+ int pgio_mirror_idx;
|
|
+};
|
|
+
|
|
+struct nfs_pgio_completion_ops {
|
|
+ void (*error_cleanup)(struct list_head *, int);
|
|
+ void (*init_hdr)(struct nfs_pgio_header *);
|
|
+ void (*completion)(struct nfs_pgio_header *);
|
|
+ void (*reschedule_io)(struct nfs_pgio_header *);
|
|
+};
|
|
+
|
|
+struct nfs_mds_commit_info {
|
|
+ atomic_t rpcs_out;
|
|
+ atomic_long_t ncommit;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct nfs_commit_data;
|
|
+
|
|
+struct nfs_commit_info;
|
|
+
|
|
+struct nfs_commit_completion_ops {
|
|
+ void (*completion)(struct nfs_commit_data *);
|
|
+ void (*resched_write)(struct nfs_commit_info *, struct nfs_page *);
|
|
+};
|
|
+
|
|
+struct nfs_commit_data {
|
|
+ struct rpc_task task;
|
|
+ struct inode *inode;
|
|
+ struct rpc_cred *cred;
|
|
+ struct nfs_fattr fattr;
|
|
+ struct nfs_writeverf verf;
|
|
+ struct list_head pages;
|
|
+ struct list_head list;
|
|
+ struct nfs_direct_req *dreq;
|
|
+ struct nfs_commitargs args;
|
|
+ struct nfs_commitres res;
|
|
+ struct nfs_open_context *context;
|
|
+ struct pnfs_layout_segment *lseg;
|
|
+ struct nfs_client *ds_clp;
|
|
+ int ds_commit_index;
|
|
+ loff_t lwb;
|
|
+ const struct rpc_call_ops *mds_ops;
|
|
+ const struct nfs_commit_completion_ops *completion_ops;
|
|
+ int (*commit_done_cb)(struct rpc_task *, struct nfs_commit_data *);
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct nfs_commit_info {
|
|
+ struct inode *inode;
|
|
+ struct nfs_mds_commit_info *mds;
|
|
+ struct pnfs_ds_commit_info *ds;
|
|
+ struct nfs_direct_req *dreq;
|
|
+ const struct nfs_commit_completion_ops *completion_ops;
|
|
+};
|
|
+
|
|
+struct nfs_unlinkdata {
|
|
+ struct nfs_removeargs args;
|
|
+ struct nfs_removeres res;
|
|
+ struct dentry *dentry;
|
|
+ wait_queue_head_t wq;
|
|
+ struct rpc_cred *cred;
|
|
+ struct nfs_fattr dir_attr;
|
|
+ long int timeout;
|
|
+};
|
|
+
|
|
+struct nfs_renamedata {
|
|
+ struct nfs_renameargs args;
|
|
+ struct nfs_renameres res;
|
|
+ struct rpc_cred *cred;
|
|
+ struct inode *old_dir;
|
|
+ struct dentry *old_dentry;
|
|
+ struct nfs_fattr old_fattr;
|
|
+ struct inode *new_dir;
|
|
+ struct dentry *new_dentry;
|
|
+ struct nfs_fattr new_fattr;
|
|
+ void (*complete)(struct rpc_task *, struct nfs_renamedata *);
|
|
+ long int timeout;
|
|
+ bool cancelled;
|
|
+};
|
|
+
|
|
+struct nlmclnt_operations;
|
|
+
|
|
+struct nfs_mount_info;
|
|
+
|
|
+struct nfs_client_initdata;
|
|
+
|
|
+struct nfs_access_entry;
|
|
+
|
|
+struct nfs_rpc_ops {
|
|
+ u32 version;
|
|
+ const struct dentry_operations *dentry_ops;
|
|
+ const struct inode_operations *dir_inode_ops;
|
|
+ const struct inode_operations *file_inode_ops;
|
|
+ const struct file_operations *file_ops;
|
|
+ const struct nlmclnt_operations *nlmclnt_ops;
|
|
+ int (*getroot)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
|
|
+ struct vfsmount * (*submount)(struct nfs_server *, struct dentry *, struct nfs_fh *, struct nfs_fattr *);
|
|
+ struct dentry * (*try_mount)(int, const char *, struct nfs_mount_info *, struct nfs_subversion *);
|
|
+ int (*getattr)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *, struct inode *);
|
|
+ int (*setattr)(struct dentry *, struct nfs_fattr *, struct iattr *);
|
|
+ int (*lookup)(struct inode *, const struct qstr *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *);
|
|
+ int (*lookupp)(struct inode *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *);
|
|
+ int (*access)(struct inode *, struct nfs_access_entry *);
|
|
+ int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int);
|
|
+ int (*create)(struct inode *, struct dentry *, struct iattr *, int);
|
|
+ int (*remove)(struct inode *, struct dentry *);
|
|
+ void (*unlink_setup)(struct rpc_message *, struct dentry *, struct inode *);
|
|
+ void (*unlink_rpc_prepare)(struct rpc_task *, struct nfs_unlinkdata *);
|
|
+ int (*unlink_done)(struct rpc_task *, struct inode *);
|
|
+ void (*rename_setup)(struct rpc_message *, struct dentry *, struct dentry *);
|
|
+ void (*rename_rpc_prepare)(struct rpc_task *, struct nfs_renamedata *);
|
|
+ int (*rename_done)(struct rpc_task *, struct inode *, struct inode *);
|
|
+ int (*link)(struct inode *, struct inode *, const struct qstr *);
|
|
+ int (*symlink)(struct inode *, struct dentry *, struct page *, unsigned int, struct iattr *);
|
|
+ int (*mkdir)(struct inode *, struct dentry *, struct iattr *);
|
|
+ int (*rmdir)(struct inode *, const struct qstr *);
|
|
+ int (*readdir)(struct dentry *, struct rpc_cred *, u64, struct page **, unsigned int, bool);
|
|
+ int (*mknod)(struct inode *, struct dentry *, struct iattr *, dev_t);
|
|
+ int (*statfs)(struct nfs_server *, struct nfs_fh *, struct nfs_fsstat *);
|
|
+ int (*fsinfo)(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
|
|
+ int (*pathconf)(struct nfs_server *, struct nfs_fh *, struct nfs_pathconf *);
|
|
+ int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
|
|
+ int (*decode_dirent)(struct xdr_stream *, struct nfs_entry *, bool);
|
|
+ int (*pgio_rpc_prepare)(struct rpc_task *, struct nfs_pgio_header *);
|
|
+ void (*read_setup)(struct nfs_pgio_header *, struct rpc_message *);
|
|
+ int (*read_done)(struct rpc_task *, struct nfs_pgio_header *);
|
|
+ void (*write_setup)(struct nfs_pgio_header *, struct rpc_message *, struct rpc_clnt **);
|
|
+ int (*write_done)(struct rpc_task *, struct nfs_pgio_header *);
|
|
+ void (*commit_setup)(struct nfs_commit_data *, struct rpc_message *, struct rpc_clnt **);
|
|
+ void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
|
|
+ int (*commit_done)(struct rpc_task *, struct nfs_commit_data *);
|
|
+ int (*lock)(struct file *, int, struct file_lock *);
|
|
+ int (*lock_check_bounds)(const struct file_lock *);
|
|
+ void (*clear_acl_cache)(struct inode *);
|
|
+ void (*close_context)(struct nfs_open_context *, int);
|
|
+ struct inode * (*open_context)(struct inode *, struct nfs_open_context *, int, struct iattr *, int *);
|
|
+ int (*have_delegation)(struct inode *, fmode_t);
|
|
+ struct nfs_client * (*alloc_client)(const struct nfs_client_initdata *);
|
|
+ struct nfs_client * (*init_client)(struct nfs_client *, const struct nfs_client_initdata *);
|
|
+ void (*free_client)(struct nfs_client *);
|
|
+ struct nfs_server * (*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
|
|
+ struct nfs_server * (*clone_server)(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, rpc_authflavor_t);
|
|
+};
|
|
+
|
|
+struct nfs_access_entry {
|
|
+ struct rb_node rb_node;
|
|
+ struct list_head lru;
|
|
+ struct rpc_cred *cred;
|
|
+ __u32 mask;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+enum blkg_rwstat_type {
|
|
+ BLKG_RWSTAT_READ = 0,
|
|
+ BLKG_RWSTAT_WRITE = 1,
|
|
+ BLKG_RWSTAT_SYNC = 2,
|
|
+ BLKG_RWSTAT_ASYNC = 3,
|
|
+ BLKG_RWSTAT_DISCARD = 4,
|
|
+ BLKG_RWSTAT_NR = 5,
|
|
+ BLKG_RWSTAT_TOTAL = 5,
|
|
+};
|
|
+
|
|
+struct blkcg_policy_data;
|
|
+
|
|
+struct blkcg {
|
|
+ struct cgroup_subsys_state css;
|
|
+ spinlock_t lock;
|
|
+ struct radix_tree_root blkg_tree;
|
|
+ struct blkcg_gq *blkg_hint;
|
|
+ struct hlist_head blkg_list;
|
|
+ struct blkcg_policy_data *cpd[5];
|
|
+ struct list_head all_blkcgs_node;
|
|
+ struct list_head cgwb_list;
|
|
+ refcount_t cgwb_refcnt;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct blkcg_policy_data {
|
|
+ struct blkcg *blkcg;
|
|
+ int plid;
|
|
+};
|
|
+
|
|
+struct blkg_policy_data {
|
|
+ struct blkcg_gq *blkg;
|
|
+ int plid;
|
|
+};
|
|
+
|
|
+enum perf_sw_ids {
|
|
+ PERF_COUNT_SW_CPU_CLOCK = 0,
|
|
+ PERF_COUNT_SW_TASK_CLOCK = 1,
|
|
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
|
|
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
|
|
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
|
|
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
|
|
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
|
|
+ PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
|
|
+ PERF_COUNT_SW_EMULATION_FAULTS = 8,
|
|
+ PERF_COUNT_SW_DUMMY = 9,
|
|
+ PERF_COUNT_SW_BPF_OUTPUT = 10,
|
|
+ PERF_COUNT_SW_MAX = 11,
|
|
+};
|
|
+
|
|
+enum perf_branch_sample_type_shift {
|
|
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0,
|
|
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1,
|
|
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2,
|
|
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3,
|
|
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4,
|
|
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5,
|
|
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6,
|
|
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7,
|
|
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8,
|
|
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9,
|
|
+ PERF_SAMPLE_BRANCH_COND_SHIFT = 10,
|
|
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11,
|
|
+ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12,
|
|
+ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13,
|
|
+ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14,
|
|
+ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15,
|
|
+ PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16,
|
|
+ PERF_SAMPLE_BRANCH_MAX_SHIFT = 17,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TSK_TRACE_FL_TRACE_BIT = 0,
|
|
+ TSK_TRACE_FL_GRAPH_BIT = 1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TRACE_EVENT_FL_FILTERED_BIT = 0,
|
|
+ TRACE_EVENT_FL_CAP_ANY_BIT = 1,
|
|
+ TRACE_EVENT_FL_NO_SET_FILTER_BIT = 2,
|
|
+ TRACE_EVENT_FL_IGNORE_ENABLE_BIT = 3,
|
|
+ TRACE_EVENT_FL_TRACEPOINT_BIT = 4,
|
|
+ TRACE_EVENT_FL_KPROBE_BIT = 5,
|
|
+ TRACE_EVENT_FL_UPROBE_BIT = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ EVENT_FILE_FL_ENABLED_BIT = 0,
|
|
+ EVENT_FILE_FL_RECORDED_CMD_BIT = 1,
|
|
+ EVENT_FILE_FL_RECORDED_TGID_BIT = 2,
|
|
+ EVENT_FILE_FL_FILTERED_BIT = 3,
|
|
+ EVENT_FILE_FL_NO_SET_FILTER_BIT = 4,
|
|
+ EVENT_FILE_FL_SOFT_MODE_BIT = 5,
|
|
+ EVENT_FILE_FL_SOFT_DISABLED_BIT = 6,
|
|
+ EVENT_FILE_FL_TRIGGER_MODE_BIT = 7,
|
|
+ EVENT_FILE_FL_TRIGGER_COND_BIT = 8,
|
|
+ EVENT_FILE_FL_PID_FILTER_BIT = 9,
|
|
+ EVENT_FILE_FL_WAS_ENABLED_BIT = 10,
|
|
+};
|
|
+
|
|
+struct uuidcmp {
|
|
+ const char *uuid;
|
|
+ int len;
|
|
+};
|
|
+
|
|
+typedef long unsigned int pao_T__;
|
|
+
|
|
+struct subprocess_info {
|
|
+ struct work_struct work;
|
|
+ struct completion *complete;
|
|
+ const char *path;
|
|
+ char **argv;
|
|
+ char **envp;
|
|
+ struct file *file;
|
|
+ int wait;
|
|
+ int retval;
|
|
+ pid_t pid;
|
|
+ int (*init)(struct subprocess_info *, struct cred *);
|
|
+ void (*cleanup)(struct subprocess_info *);
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TASKSTATS_CMD_UNSPEC = 0,
|
|
+ TASKSTATS_CMD_GET = 1,
|
|
+ TASKSTATS_CMD_NEW = 2,
|
|
+ __TASKSTATS_CMD_MAX = 3,
|
|
+};
|
|
+
|
|
+enum ucount_type {
|
|
+ UCOUNT_USER_NAMESPACES = 0,
|
|
+ UCOUNT_PID_NAMESPACES = 1,
|
|
+ UCOUNT_UTS_NAMESPACES = 2,
|
|
+ UCOUNT_IPC_NAMESPACES = 3,
|
|
+ UCOUNT_NET_NAMESPACES = 4,
|
|
+ UCOUNT_MNT_NAMESPACES = 5,
|
|
+ UCOUNT_CGROUP_NAMESPACES = 6,
|
|
+ UCOUNT_INOTIFY_INSTANCES = 7,
|
|
+ UCOUNT_INOTIFY_WATCHES = 8,
|
|
+ UCOUNT_COUNTS = 9,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ HI_SOFTIRQ = 0,
|
|
+ TIMER_SOFTIRQ = 1,
|
|
+ NET_TX_SOFTIRQ = 2,
|
|
+ NET_RX_SOFTIRQ = 3,
|
|
+ BLOCK_SOFTIRQ = 4,
|
|
+ IRQ_POLL_SOFTIRQ = 5,
|
|
+ TASKLET_SOFTIRQ = 6,
|
|
+ SCHED_SOFTIRQ = 7,
|
|
+ HRTIMER_SOFTIRQ = 8,
|
|
+ RCU_SOFTIRQ = 9,
|
|
+ NR_SOFTIRQS = 10,
|
|
+};
|
|
+
|
|
+enum cpu_usage_stat {
|
|
+ CPUTIME_USER = 0,
|
|
+ CPUTIME_NICE = 1,
|
|
+ CPUTIME_SYSTEM = 2,
|
|
+ CPUTIME_SOFTIRQ = 3,
|
|
+ CPUTIME_IRQ = 4,
|
|
+ CPUTIME_IDLE = 5,
|
|
+ CPUTIME_IOWAIT = 6,
|
|
+ CPUTIME_STEAL = 7,
|
|
+ CPUTIME_GUEST = 8,
|
|
+ CPUTIME_GUEST_NICE = 9,
|
|
+ CPUTIME_SOFTIRQ_IDLE = 10,
|
|
+ CPUTIME_IRQ_IDLE = 11,
|
|
+ NR_STATS = 12,
|
|
+};
|
|
+
|
|
+enum cgroup_subsys_id {
|
|
+ cpuset_cgrp_id = 0,
|
|
+ cpu_cgrp_id = 1,
|
|
+ cpuacct_cgrp_id = 2,
|
|
+ io_cgrp_id = 3,
|
|
+ memory_cgrp_id = 4,
|
|
+ devices_cgrp_id = 5,
|
|
+ freezer_cgrp_id = 6,
|
|
+ net_cls_cgrp_id = 7,
|
|
+ perf_event_cgrp_id = 8,
|
|
+ net_prio_cgrp_id = 9,
|
|
+ hugetlb_cgrp_id = 10,
|
|
+ pids_cgrp_id = 11,
|
|
+ rdma_cgrp_id = 12,
|
|
+ files_cgrp_id = 13,
|
|
+ CGROUP_SUBSYS_COUNT = 14,
|
|
+};
|
|
+
|
|
+struct mdu_array_info_s {
|
|
+ int major_version;
|
|
+ int minor_version;
|
|
+ int patch_version;
|
|
+ unsigned int ctime;
|
|
+ int level;
|
|
+ int size;
|
|
+ int nr_disks;
|
|
+ int raid_disks;
|
|
+ int md_minor;
|
|
+ int not_persistent;
|
|
+ unsigned int utime;
|
|
+ int state;
|
|
+ int active_disks;
|
|
+ int working_disks;
|
|
+ int failed_disks;
|
|
+ int spare_disks;
|
|
+ int layout;
|
|
+ int chunk_size;
|
|
+};
|
|
+
|
|
+typedef struct mdu_array_info_s mdu_array_info_t;
|
|
+
|
|
+struct mdu_disk_info_s {
|
|
+ int number;
|
|
+ int major;
|
|
+ int minor;
|
|
+ int raid_disk;
|
|
+ int state;
|
|
+};
|
|
+
|
|
+typedef struct mdu_disk_info_s mdu_disk_info_t;
|
|
+
|
|
+struct hash {
|
|
+ int ino;
|
|
+ int minor;
|
|
+ int major;
|
|
+ umode_t mode;
|
|
+ struct hash *next;
|
|
+ char name[4098];
|
|
+};
|
|
+
|
|
+struct dir_entry {
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+ time64_t mtime;
|
|
+};
|
|
+
|
|
+enum state {
|
|
+ Start = 0,
|
|
+ Collect = 1,
|
|
+ GotHeader = 2,
|
|
+ SkipIt = 3,
|
|
+ GotName = 4,
|
|
+ CopyFile = 5,
|
|
+ GotSymlink = 6,
|
|
+ Reset = 7,
|
|
+};
|
|
+
|
|
+typedef int (*decompress_fn)(unsigned char *, long int, long int (*)(void *, long unsigned int), long int (*)(void *, long unsigned int), unsigned char *, long int *, void (*)(char *));
|
|
+
|
|
+typedef u32 note_buf_t[92];
|
|
+
|
|
+struct kimage_arch {
|
|
+ p4d_t *p4d;
|
|
+ pud_t *pud;
|
|
+ pmd_t *pmd;
|
|
+ pte_t *pte;
|
|
+ long unsigned int backup_src_start;
|
|
+ long unsigned int backup_src_sz;
|
|
+ long unsigned int backup_load_addr;
|
|
+ void *elf_headers;
|
|
+ long unsigned int elf_headers_sz;
|
|
+ long unsigned int elf_load_addr;
|
|
+};
|
|
+
|
|
+typedef void crash_vmclear_fn();
|
|
+
|
|
+typedef long unsigned int kimage_entry_t;
|
|
+
|
|
+struct kexec_segment {
|
|
+ union {
|
|
+ void *buf;
|
|
+ void *kbuf;
|
|
+ };
|
|
+ size_t bufsz;
|
|
+ long unsigned int mem;
|
|
+ size_t memsz;
|
|
+};
|
|
+
|
|
+struct purgatory_info {
|
|
+ const Elf64_Ehdr *ehdr;
|
|
+ Elf64_Shdr *sechdrs;
|
|
+ void *purgatory_buf;
|
|
+};
|
|
+
|
|
+typedef int kexec_probe_t(const char *, long unsigned int);
|
|
+
|
|
+struct kimage;
|
|
+
|
|
+typedef void *kexec_load_t(struct kimage *, char *, long unsigned int, char *, long unsigned int, char *, long unsigned int);
|
|
+
|
|
+struct kexec_file_ops;
|
|
+
|
|
+struct kimage {
|
|
+ kimage_entry_t head;
|
|
+ kimage_entry_t *entry;
|
|
+ kimage_entry_t *last_entry;
|
|
+ long unsigned int start;
|
|
+ struct page *control_code_page;
|
|
+ struct page *swap_page;
|
|
+ void *vmcoreinfo_data_copy;
|
|
+ long unsigned int nr_segments;
|
|
+ struct kexec_segment segment[16];
|
|
+ struct list_head control_pages;
|
|
+ struct list_head dest_pages;
|
|
+ struct list_head unusable_pages;
|
|
+ long unsigned int control_page;
|
|
+ unsigned int type: 2;
|
|
+ unsigned int preserve_context: 1;
|
|
+ unsigned int file_mode: 1;
|
|
+ struct kimage_arch arch;
|
|
+ void *kernel_buf;
|
|
+ long unsigned int kernel_buf_len;
|
|
+ void *initrd_buf;
|
|
+ long unsigned int initrd_buf_len;
|
|
+ char *cmdline_buf;
|
|
+ long unsigned int cmdline_buf_len;
|
|
+ const struct kexec_file_ops *fops;
|
|
+ void *image_loader_data;
|
|
+ struct purgatory_info purgatory_info;
|
|
+};
|
|
+
|
|
+typedef int kexec_cleanup_t(void *);
|
|
+
|
|
+typedef int kexec_verify_sig_t(const char *, long unsigned int);
|
|
+
|
|
+struct kexec_file_ops {
|
|
+ kexec_probe_t *probe;
|
|
+ kexec_load_t *load;
|
|
+ kexec_cleanup_t *cleanup;
|
|
+ kexec_verify_sig_t *verify_sig;
|
|
+};
|
|
+
|
|
+enum flow_dissector_key_id {
|
|
+ FLOW_DISSECTOR_KEY_CONTROL = 0,
|
|
+ FLOW_DISSECTOR_KEY_BASIC = 1,
|
|
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS = 2,
|
|
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS = 3,
|
|
+ FLOW_DISSECTOR_KEY_PORTS = 4,
|
|
+ FLOW_DISSECTOR_KEY_ICMP = 5,
|
|
+ FLOW_DISSECTOR_KEY_ETH_ADDRS = 6,
|
|
+ FLOW_DISSECTOR_KEY_TIPC = 7,
|
|
+ FLOW_DISSECTOR_KEY_ARP = 8,
|
|
+ FLOW_DISSECTOR_KEY_VLAN = 9,
|
|
+ FLOW_DISSECTOR_KEY_FLOW_LABEL = 10,
|
|
+ FLOW_DISSECTOR_KEY_GRE_KEYID = 11,
|
|
+ FLOW_DISSECTOR_KEY_MPLS_ENTROPY = 12,
|
|
+ FLOW_DISSECTOR_KEY_ENC_KEYID = 13,
|
|
+ FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS = 14,
|
|
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS = 15,
|
|
+ FLOW_DISSECTOR_KEY_ENC_CONTROL = 16,
|
|
+ FLOW_DISSECTOR_KEY_ENC_PORTS = 17,
|
|
+ FLOW_DISSECTOR_KEY_MPLS = 18,
|
|
+ FLOW_DISSECTOR_KEY_TCP = 19,
|
|
+ FLOW_DISSECTOR_KEY_IP = 20,
|
|
+ FLOW_DISSECTOR_KEY_CVLAN = 21,
|
|
+ FLOW_DISSECTOR_KEY_ENC_IP = 22,
|
|
+ FLOW_DISSECTOR_KEY_ENC_OPTS = 23,
|
|
+ FLOW_DISSECTOR_KEY_MAX = 24,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IPSTATS_MIB_NUM = 0,
|
|
+ IPSTATS_MIB_INPKTS = 1,
|
|
+ IPSTATS_MIB_INOCTETS = 2,
|
|
+ IPSTATS_MIB_INDELIVERS = 3,
|
|
+ IPSTATS_MIB_OUTFORWDATAGRAMS = 4,
|
|
+ IPSTATS_MIB_OUTPKTS = 5,
|
|
+ IPSTATS_MIB_OUTOCTETS = 6,
|
|
+ IPSTATS_MIB_INHDRERRORS = 7,
|
|
+ IPSTATS_MIB_INTOOBIGERRORS = 8,
|
|
+ IPSTATS_MIB_INNOROUTES = 9,
|
|
+ IPSTATS_MIB_INADDRERRORS = 10,
|
|
+ IPSTATS_MIB_INUNKNOWNPROTOS = 11,
|
|
+ IPSTATS_MIB_INTRUNCATEDPKTS = 12,
|
|
+ IPSTATS_MIB_INDISCARDS = 13,
|
|
+ IPSTATS_MIB_OUTDISCARDS = 14,
|
|
+ IPSTATS_MIB_OUTNOROUTES = 15,
|
|
+ IPSTATS_MIB_REASMTIMEOUT = 16,
|
|
+ IPSTATS_MIB_REASMREQDS = 17,
|
|
+ IPSTATS_MIB_REASMOKS = 18,
|
|
+ IPSTATS_MIB_REASMFAILS = 19,
|
|
+ IPSTATS_MIB_FRAGOKS = 20,
|
|
+ IPSTATS_MIB_FRAGFAILS = 21,
|
|
+ IPSTATS_MIB_FRAGCREATES = 22,
|
|
+ IPSTATS_MIB_INMCASTPKTS = 23,
|
|
+ IPSTATS_MIB_OUTMCASTPKTS = 24,
|
|
+ IPSTATS_MIB_INBCASTPKTS = 25,
|
|
+ IPSTATS_MIB_OUTBCASTPKTS = 26,
|
|
+ IPSTATS_MIB_INMCASTOCTETS = 27,
|
|
+ IPSTATS_MIB_OUTMCASTOCTETS = 28,
|
|
+ IPSTATS_MIB_INBCASTOCTETS = 29,
|
|
+ IPSTATS_MIB_OUTBCASTOCTETS = 30,
|
|
+ IPSTATS_MIB_CSUMERRORS = 31,
|
|
+ IPSTATS_MIB_NOECTPKTS = 32,
|
|
+ IPSTATS_MIB_ECT1PKTS = 33,
|
|
+ IPSTATS_MIB_ECT0PKTS = 34,
|
|
+ IPSTATS_MIB_CEPKTS = 35,
|
|
+ IPSTATS_MIB_REASM_OVERLAPS = 36,
|
|
+ __IPSTATS_MIB_MAX = 37,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ICMP_MIB_NUM = 0,
|
|
+ ICMP_MIB_INMSGS = 1,
|
|
+ ICMP_MIB_INERRORS = 2,
|
|
+ ICMP_MIB_INDESTUNREACHS = 3,
|
|
+ ICMP_MIB_INTIMEEXCDS = 4,
|
|
+ ICMP_MIB_INPARMPROBS = 5,
|
|
+ ICMP_MIB_INSRCQUENCHS = 6,
|
|
+ ICMP_MIB_INREDIRECTS = 7,
|
|
+ ICMP_MIB_INECHOS = 8,
|
|
+ ICMP_MIB_INECHOREPS = 9,
|
|
+ ICMP_MIB_INTIMESTAMPS = 10,
|
|
+ ICMP_MIB_INTIMESTAMPREPS = 11,
|
|
+ ICMP_MIB_INADDRMASKS = 12,
|
|
+ ICMP_MIB_INADDRMASKREPS = 13,
|
|
+ ICMP_MIB_OUTMSGS = 14,
|
|
+ ICMP_MIB_OUTERRORS = 15,
|
|
+ ICMP_MIB_OUTDESTUNREACHS = 16,
|
|
+ ICMP_MIB_OUTTIMEEXCDS = 17,
|
|
+ ICMP_MIB_OUTPARMPROBS = 18,
|
|
+ ICMP_MIB_OUTSRCQUENCHS = 19,
|
|
+ ICMP_MIB_OUTREDIRECTS = 20,
|
|
+ ICMP_MIB_OUTECHOS = 21,
|
|
+ ICMP_MIB_OUTECHOREPS = 22,
|
|
+ ICMP_MIB_OUTTIMESTAMPS = 23,
|
|
+ ICMP_MIB_OUTTIMESTAMPREPS = 24,
|
|
+ ICMP_MIB_OUTADDRMASKS = 25,
|
|
+ ICMP_MIB_OUTADDRMASKREPS = 26,
|
|
+ ICMP_MIB_CSUMERRORS = 27,
|
|
+ __ICMP_MIB_MAX = 28,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ICMP6_MIB_NUM = 0,
|
|
+ ICMP6_MIB_INMSGS = 1,
|
|
+ ICMP6_MIB_INERRORS = 2,
|
|
+ ICMP6_MIB_OUTMSGS = 3,
|
|
+ ICMP6_MIB_OUTERRORS = 4,
|
|
+ ICMP6_MIB_CSUMERRORS = 5,
|
|
+ __ICMP6_MIB_MAX = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCP_MIB_NUM = 0,
|
|
+ TCP_MIB_RTOALGORITHM = 1,
|
|
+ TCP_MIB_RTOMIN = 2,
|
|
+ TCP_MIB_RTOMAX = 3,
|
|
+ TCP_MIB_MAXCONN = 4,
|
|
+ TCP_MIB_ACTIVEOPENS = 5,
|
|
+ TCP_MIB_PASSIVEOPENS = 6,
|
|
+ TCP_MIB_ATTEMPTFAILS = 7,
|
|
+ TCP_MIB_ESTABRESETS = 8,
|
|
+ TCP_MIB_CURRESTAB = 9,
|
|
+ TCP_MIB_INSEGS = 10,
|
|
+ TCP_MIB_OUTSEGS = 11,
|
|
+ TCP_MIB_RETRANSSEGS = 12,
|
|
+ TCP_MIB_INERRS = 13,
|
|
+ TCP_MIB_OUTRSTS = 14,
|
|
+ TCP_MIB_CSUMERRORS = 15,
|
|
+ __TCP_MIB_MAX = 16,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ UDP_MIB_NUM = 0,
|
|
+ UDP_MIB_INDATAGRAMS = 1,
|
|
+ UDP_MIB_NOPORTS = 2,
|
|
+ UDP_MIB_INERRORS = 3,
|
|
+ UDP_MIB_OUTDATAGRAMS = 4,
|
|
+ UDP_MIB_RCVBUFERRORS = 5,
|
|
+ UDP_MIB_SNDBUFERRORS = 6,
|
|
+ UDP_MIB_CSUMERRORS = 7,
|
|
+ UDP_MIB_IGNOREDMULTI = 8,
|
|
+ __UDP_MIB_MAX = 9,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LINUX_MIB_NUM = 0,
|
|
+ LINUX_MIB_SYNCOOKIESSENT = 1,
|
|
+ LINUX_MIB_SYNCOOKIESRECV = 2,
|
|
+ LINUX_MIB_SYNCOOKIESFAILED = 3,
|
|
+ LINUX_MIB_EMBRYONICRSTS = 4,
|
|
+ LINUX_MIB_PRUNECALLED = 5,
|
|
+ LINUX_MIB_RCVPRUNED = 6,
|
|
+ LINUX_MIB_OFOPRUNED = 7,
|
|
+ LINUX_MIB_OUTOFWINDOWICMPS = 8,
|
|
+ LINUX_MIB_LOCKDROPPEDICMPS = 9,
|
|
+ LINUX_MIB_ARPFILTER = 10,
|
|
+ LINUX_MIB_TIMEWAITED = 11,
|
|
+ LINUX_MIB_TIMEWAITRECYCLED = 12,
|
|
+ LINUX_MIB_TIMEWAITKILLED = 13,
|
|
+ LINUX_MIB_PAWSACTIVEREJECTED = 14,
|
|
+ LINUX_MIB_PAWSESTABREJECTED = 15,
|
|
+ LINUX_MIB_DELAYEDACKS = 16,
|
|
+ LINUX_MIB_DELAYEDACKLOCKED = 17,
|
|
+ LINUX_MIB_DELAYEDACKLOST = 18,
|
|
+ LINUX_MIB_LISTENOVERFLOWS = 19,
|
|
+ LINUX_MIB_LISTENDROPS = 20,
|
|
+ LINUX_MIB_TCPHPHITS = 21,
|
|
+ LINUX_MIB_TCPPUREACKS = 22,
|
|
+ LINUX_MIB_TCPHPACKS = 23,
|
|
+ LINUX_MIB_TCPRENORECOVERY = 24,
|
|
+ LINUX_MIB_TCPSACKRECOVERY = 25,
|
|
+ LINUX_MIB_TCPSACKRENEGING = 26,
|
|
+ LINUX_MIB_TCPSACKREORDER = 27,
|
|
+ LINUX_MIB_TCPRENOREORDER = 28,
|
|
+ LINUX_MIB_TCPTSREORDER = 29,
|
|
+ LINUX_MIB_TCPFULLUNDO = 30,
|
|
+ LINUX_MIB_TCPPARTIALUNDO = 31,
|
|
+ LINUX_MIB_TCPDSACKUNDO = 32,
|
|
+ LINUX_MIB_TCPLOSSUNDO = 33,
|
|
+ LINUX_MIB_TCPLOSTRETRANSMIT = 34,
|
|
+ LINUX_MIB_TCPRENOFAILURES = 35,
|
|
+ LINUX_MIB_TCPSACKFAILURES = 36,
|
|
+ LINUX_MIB_TCPLOSSFAILURES = 37,
|
|
+ LINUX_MIB_TCPFASTRETRANS = 38,
|
|
+ LINUX_MIB_TCPSLOWSTARTRETRANS = 39,
|
|
+ LINUX_MIB_TCPTIMEOUTS = 40,
|
|
+ LINUX_MIB_TCPLOSSPROBES = 41,
|
|
+ LINUX_MIB_TCPLOSSPROBERECOVERY = 42,
|
|
+ LINUX_MIB_TCPRENORECOVERYFAIL = 43,
|
|
+ LINUX_MIB_TCPSACKRECOVERYFAIL = 44,
|
|
+ LINUX_MIB_TCPRCVCOLLAPSED = 45,
|
|
+ LINUX_MIB_TCPDSACKOLDSENT = 46,
|
|
+ LINUX_MIB_TCPDSACKOFOSENT = 47,
|
|
+ LINUX_MIB_TCPDSACKRECV = 48,
|
|
+ LINUX_MIB_TCPDSACKOFORECV = 49,
|
|
+ LINUX_MIB_TCPABORTONDATA = 50,
|
|
+ LINUX_MIB_TCPABORTONCLOSE = 51,
|
|
+ LINUX_MIB_TCPABORTONMEMORY = 52,
|
|
+ LINUX_MIB_TCPABORTONTIMEOUT = 53,
|
|
+ LINUX_MIB_TCPABORTONLINGER = 54,
|
|
+ LINUX_MIB_TCPABORTFAILED = 55,
|
|
+ LINUX_MIB_TCPMEMORYPRESSURES = 56,
|
|
+ LINUX_MIB_TCPMEMORYPRESSURESCHRONO = 57,
|
|
+ LINUX_MIB_TCPSACKDISCARD = 58,
|
|
+ LINUX_MIB_TCPDSACKIGNOREDOLD = 59,
|
|
+ LINUX_MIB_TCPDSACKIGNOREDNOUNDO = 60,
|
|
+ LINUX_MIB_TCPSPURIOUSRTOS = 61,
|
|
+ LINUX_MIB_TCPMD5NOTFOUND = 62,
|
|
+ LINUX_MIB_TCPMD5UNEXPECTED = 63,
|
|
+ LINUX_MIB_TCPMD5FAILURE = 64,
|
|
+ LINUX_MIB_SACKSHIFTED = 65,
|
|
+ LINUX_MIB_SACKMERGED = 66,
|
|
+ LINUX_MIB_SACKSHIFTFALLBACK = 67,
|
|
+ LINUX_MIB_TCPBACKLOGDROP = 68,
|
|
+ LINUX_MIB_PFMEMALLOCDROP = 69,
|
|
+ LINUX_MIB_TCPMINTTLDROP = 70,
|
|
+ LINUX_MIB_TCPDEFERACCEPTDROP = 71,
|
|
+ LINUX_MIB_IPRPFILTER = 72,
|
|
+ LINUX_MIB_TCPTIMEWAITOVERFLOW = 73,
|
|
+ LINUX_MIB_TCPREQQFULLDOCOOKIES = 74,
|
|
+ LINUX_MIB_TCPREQQFULLDROP = 75,
|
|
+ LINUX_MIB_TCPRETRANSFAIL = 76,
|
|
+ LINUX_MIB_TCPRCVCOALESCE = 77,
|
|
+ LINUX_MIB_TCPBACKLOGCOALESCE = 78,
|
|
+ LINUX_MIB_TCPOFOQUEUE = 79,
|
|
+ LINUX_MIB_TCPOFODROP = 80,
|
|
+ LINUX_MIB_TCPOFOMERGE = 81,
|
|
+ LINUX_MIB_TCPCHALLENGEACK = 82,
|
|
+ LINUX_MIB_TCPSYNCHALLENGE = 83,
|
|
+ LINUX_MIB_TCPFASTOPENACTIVE = 84,
|
|
+ LINUX_MIB_TCPFASTOPENACTIVEFAIL = 85,
|
|
+ LINUX_MIB_TCPFASTOPENPASSIVE = 86,
|
|
+ LINUX_MIB_TCPFASTOPENPASSIVEFAIL = 87,
|
|
+ LINUX_MIB_TCPFASTOPENLISTENOVERFLOW = 88,
|
|
+ LINUX_MIB_TCPFASTOPENCOOKIEREQD = 89,
|
|
+ LINUX_MIB_TCPFASTOPENBLACKHOLE = 90,
|
|
+ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES = 91,
|
|
+ LINUX_MIB_BUSYPOLLRXPACKETS = 92,
|
|
+ LINUX_MIB_TCPAUTOCORKING = 93,
|
|
+ LINUX_MIB_TCPFROMZEROWINDOWADV = 94,
|
|
+ LINUX_MIB_TCPTOZEROWINDOWADV = 95,
|
|
+ LINUX_MIB_TCPWANTZEROWINDOWADV = 96,
|
|
+ LINUX_MIB_TCPSYNRETRANS = 97,
|
|
+ LINUX_MIB_TCPORIGDATASENT = 98,
|
|
+ LINUX_MIB_TCPHYSTARTTRAINDETECT = 99,
|
|
+ LINUX_MIB_TCPHYSTARTTRAINCWND = 100,
|
|
+ LINUX_MIB_TCPHYSTARTDELAYDETECT = 101,
|
|
+ LINUX_MIB_TCPHYSTARTDELAYCWND = 102,
|
|
+ LINUX_MIB_TCPACKSKIPPEDSYNRECV = 103,
|
|
+ LINUX_MIB_TCPACKSKIPPEDPAWS = 104,
|
|
+ LINUX_MIB_TCPACKSKIPPEDSEQ = 105,
|
|
+ LINUX_MIB_TCPACKSKIPPEDFINWAIT2 = 106,
|
|
+ LINUX_MIB_TCPACKSKIPPEDTIMEWAIT = 107,
|
|
+ LINUX_MIB_TCPACKSKIPPEDCHALLENGE = 108,
|
|
+ LINUX_MIB_TCPWINPROBE = 109,
|
|
+ LINUX_MIB_TCPKEEPALIVE = 110,
|
|
+ LINUX_MIB_TCPMTUPFAIL = 111,
|
|
+ LINUX_MIB_TCPMTUPSUCCESS = 112,
|
|
+ LINUX_MIB_TCPDELIVERED = 113,
|
|
+ LINUX_MIB_TCPDELIVEREDCE = 114,
|
|
+ LINUX_MIB_TCPACKCOMPRESSED = 115,
|
|
+ LINUX_MIB_TCPZEROWINDOWDROP = 116,
|
|
+ LINUX_MIB_TCPRCVQDROP = 117,
|
|
+ LINUX_MIB_TCPWQUEUETOOBIG = 118,
|
|
+ __LINUX_MIB_MAX = 119,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LINUX_MIB_XFRMNUM = 0,
|
|
+ LINUX_MIB_XFRMINERROR = 1,
|
|
+ LINUX_MIB_XFRMINBUFFERERROR = 2,
|
|
+ LINUX_MIB_XFRMINHDRERROR = 3,
|
|
+ LINUX_MIB_XFRMINNOSTATES = 4,
|
|
+ LINUX_MIB_XFRMINSTATEPROTOERROR = 5,
|
|
+ LINUX_MIB_XFRMINSTATEMODEERROR = 6,
|
|
+ LINUX_MIB_XFRMINSTATESEQERROR = 7,
|
|
+ LINUX_MIB_XFRMINSTATEEXPIRED = 8,
|
|
+ LINUX_MIB_XFRMINSTATEMISMATCH = 9,
|
|
+ LINUX_MIB_XFRMINSTATEINVALID = 10,
|
|
+ LINUX_MIB_XFRMINTMPLMISMATCH = 11,
|
|
+ LINUX_MIB_XFRMINNOPOLS = 12,
|
|
+ LINUX_MIB_XFRMINPOLBLOCK = 13,
|
|
+ LINUX_MIB_XFRMINPOLERROR = 14,
|
|
+ LINUX_MIB_XFRMOUTERROR = 15,
|
|
+ LINUX_MIB_XFRMOUTBUNDLEGENERROR = 16,
|
|
+ LINUX_MIB_XFRMOUTBUNDLECHECKERROR = 17,
|
|
+ LINUX_MIB_XFRMOUTNOSTATES = 18,
|
|
+ LINUX_MIB_XFRMOUTSTATEPROTOERROR = 19,
|
|
+ LINUX_MIB_XFRMOUTSTATEMODEERROR = 20,
|
|
+ LINUX_MIB_XFRMOUTSTATESEQERROR = 21,
|
|
+ LINUX_MIB_XFRMOUTSTATEEXPIRED = 22,
|
|
+ LINUX_MIB_XFRMOUTPOLBLOCK = 23,
|
|
+ LINUX_MIB_XFRMOUTPOLDEAD = 24,
|
|
+ LINUX_MIB_XFRMOUTPOLERROR = 25,
|
|
+ LINUX_MIB_XFRMFWDHDRERROR = 26,
|
|
+ LINUX_MIB_XFRMOUTSTATEINVALID = 27,
|
|
+ LINUX_MIB_XFRMACQUIREERROR = 28,
|
|
+ __LINUX_MIB_XFRMMAX = 29,
|
|
+};
|
|
+
|
|
+enum nf_inet_hooks {
|
|
+ NF_INET_PRE_ROUTING = 0,
|
|
+ NF_INET_LOCAL_IN = 1,
|
|
+ NF_INET_FORWARD = 2,
|
|
+ NF_INET_LOCAL_OUT = 3,
|
|
+ NF_INET_POST_ROUTING = 4,
|
|
+ NF_INET_NUMHOOKS = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NFPROTO_UNSPEC = 0,
|
|
+ NFPROTO_INET = 1,
|
|
+ NFPROTO_IPV4 = 2,
|
|
+ NFPROTO_ARP = 3,
|
|
+ NFPROTO_NETDEV = 5,
|
|
+ NFPROTO_BRIDGE = 7,
|
|
+ NFPROTO_IPV6 = 10,
|
|
+ NFPROTO_DECNET = 12,
|
|
+ NFPROTO_NUMPROTO = 13,
|
|
+};
|
|
+
|
|
+enum tcp_conntrack {
|
|
+ TCP_CONNTRACK_NONE = 0,
|
|
+ TCP_CONNTRACK_SYN_SENT = 1,
|
|
+ TCP_CONNTRACK_SYN_RECV = 2,
|
|
+ TCP_CONNTRACK_ESTABLISHED = 3,
|
|
+ TCP_CONNTRACK_FIN_WAIT = 4,
|
|
+ TCP_CONNTRACK_CLOSE_WAIT = 5,
|
|
+ TCP_CONNTRACK_LAST_ACK = 6,
|
|
+ TCP_CONNTRACK_TIME_WAIT = 7,
|
|
+ TCP_CONNTRACK_CLOSE = 8,
|
|
+ TCP_CONNTRACK_LISTEN = 9,
|
|
+ TCP_CONNTRACK_MAX = 10,
|
|
+ TCP_CONNTRACK_IGNORE = 11,
|
|
+ TCP_CONNTRACK_RETRANS = 12,
|
|
+ TCP_CONNTRACK_UNACK = 13,
|
|
+ TCP_CONNTRACK_TIMEOUT_MAX = 14,
|
|
+};
|
|
+
|
|
+enum ct_dccp_states {
|
|
+ CT_DCCP_NONE = 0,
|
|
+ CT_DCCP_REQUEST = 1,
|
|
+ CT_DCCP_RESPOND = 2,
|
|
+ CT_DCCP_PARTOPEN = 3,
|
|
+ CT_DCCP_OPEN = 4,
|
|
+ CT_DCCP_CLOSEREQ = 5,
|
|
+ CT_DCCP_CLOSING = 6,
|
|
+ CT_DCCP_TIMEWAIT = 7,
|
|
+ CT_DCCP_IGNORE = 8,
|
|
+ CT_DCCP_INVALID = 9,
|
|
+ __CT_DCCP_MAX = 10,
|
|
+};
|
|
+
|
|
+enum ip_conntrack_dir {
|
|
+ IP_CT_DIR_ORIGINAL = 0,
|
|
+ IP_CT_DIR_REPLY = 1,
|
|
+ IP_CT_DIR_MAX = 2,
|
|
+};
|
|
+
|
|
+enum sctp_conntrack {
|
|
+ SCTP_CONNTRACK_NONE = 0,
|
|
+ SCTP_CONNTRACK_CLOSED = 1,
|
|
+ SCTP_CONNTRACK_COOKIE_WAIT = 2,
|
|
+ SCTP_CONNTRACK_COOKIE_ECHOED = 3,
|
|
+ SCTP_CONNTRACK_ESTABLISHED = 4,
|
|
+ SCTP_CONNTRACK_SHUTDOWN_SENT = 5,
|
|
+ SCTP_CONNTRACK_SHUTDOWN_RECD = 6,
|
|
+ SCTP_CONNTRACK_SHUTDOWN_ACK_SENT = 7,
|
|
+ SCTP_CONNTRACK_HEARTBEAT_SENT = 8,
|
|
+ SCTP_CONNTRACK_HEARTBEAT_ACKED = 9,
|
|
+ SCTP_CONNTRACK_MAX = 10,
|
|
+};
|
|
+
|
|
+enum udp_conntrack {
|
|
+ UDP_CT_UNREPLIED = 0,
|
|
+ UDP_CT_REPLIED = 1,
|
|
+ UDP_CT_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ XFRM_POLICY_IN = 0,
|
|
+ XFRM_POLICY_OUT = 1,
|
|
+ XFRM_POLICY_FWD = 2,
|
|
+ XFRM_POLICY_MASK = 3,
|
|
+ XFRM_POLICY_MAX = 3,
|
|
+};
|
|
+
|
|
+enum sched_tunable_scaling {
|
|
+ SCHED_TUNABLESCALING_NONE = 0,
|
|
+ SCHED_TUNABLESCALING_LOG = 1,
|
|
+ SCHED_TUNABLESCALING_LINEAR = 2,
|
|
+ SCHED_TUNABLESCALING_END = 3,
|
|
+};
|
|
+
|
|
+typedef long int (*sys_call_ptr_t)(const struct pt_regs *);
|
|
+
|
|
+struct seccomp_data {
|
|
+ int nr;
|
|
+ __u32 arch;
|
|
+ __u64 instruction_pointer;
|
|
+ __u64 args[6];
|
|
+};
|
|
+
|
|
+struct ksignal {
|
|
+ struct k_sigaction ka;
|
|
+ siginfo_t info;
|
|
+ int sig;
|
|
+};
|
|
+
|
|
+struct __large_struct {
|
|
+ long unsigned int buf[100];
|
|
+};
|
|
+
|
|
+typedef u8 kprobe_opcode_t;
|
|
+
|
|
+struct arch_specific_insn {
|
|
+ kprobe_opcode_t *insn;
|
|
+ bool boostable;
|
|
+ bool if_modifier;
|
|
+};
|
|
+
|
|
+struct kprobe;
|
|
+
|
|
+struct prev_kprobe {
|
|
+ struct kprobe *kp;
|
|
+ long unsigned int status;
|
|
+ long unsigned int old_flags;
|
|
+ long unsigned int saved_flags;
|
|
+};
|
|
+
|
|
+typedef int (*kprobe_pre_handler_t)(struct kprobe *, struct pt_regs *);
|
|
+
|
|
+typedef void (*kprobe_post_handler_t)(struct kprobe *, struct pt_regs *, long unsigned int);
|
|
+
|
|
+typedef int (*kprobe_fault_handler_t)(struct kprobe *, struct pt_regs *, int);
|
|
+
|
|
+struct kprobe {
|
|
+ struct hlist_node hlist;
|
|
+ struct list_head list;
|
|
+ long unsigned int nmissed;
|
|
+ kprobe_opcode_t *addr;
|
|
+ const char *symbol_name;
|
|
+ unsigned int offset;
|
|
+ kprobe_pre_handler_t pre_handler;
|
|
+ kprobe_post_handler_t post_handler;
|
|
+ kprobe_fault_handler_t fault_handler;
|
|
+ kprobe_opcode_t opcode;
|
|
+ struct arch_specific_insn ainsn;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct kprobe_ctlblk {
|
|
+ long unsigned int kprobe_status;
|
|
+ long unsigned int kprobe_old_flags;
|
|
+ long unsigned int kprobe_saved_flags;
|
|
+ struct prev_kprobe prev_kprobe;
|
|
+};
|
|
+
|
|
+struct kretprobe_blackpoint {
|
|
+ const char *name;
|
|
+ void *addr;
|
|
+};
|
|
+
|
|
+struct kprobe_insn_cache {
|
|
+ struct mutex mutex;
|
|
+ void * (*alloc)();
|
|
+ void (*free)(void *);
|
|
+ struct list_head pages;
|
|
+ size_t insn_size;
|
|
+ int nr_garbage;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sys_enter {
|
|
+ struct trace_entry ent;
|
|
+ long int id;
|
|
+ long unsigned int args[6];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sys_exit {
|
|
+ struct trace_entry ent;
|
|
+ long int id;
|
|
+ long int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_sys_enter {};
|
|
+
|
|
+struct trace_event_data_offsets_sys_exit {};
|
|
+
|
|
+struct alt_instr {
|
|
+ s32 instr_offset;
|
|
+ s32 repl_offset;
|
|
+ u16 cpuid;
|
|
+ u8 instrlen;
|
|
+ u8 replacementlen;
|
|
+ u8 padlen;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum {
|
|
+ DESC_TSS = 9,
|
|
+ DESC_LDT = 2,
|
|
+ DESCTYPE_S = 16,
|
|
+};
|
|
+
|
|
+struct arch_clocksource_data {
|
|
+ int vclock_mode;
|
|
+};
|
|
+
|
|
+struct clocksource {
|
|
+ u64 (*read)(struct clocksource *);
|
|
+ u64 mask;
|
|
+ u32 mult;
|
|
+ u32 shift;
|
|
+ u64 max_idle_ns;
|
|
+ u32 maxadj;
|
|
+ struct arch_clocksource_data archdata;
|
|
+ u64 max_cycles;
|
|
+ const char *name;
|
|
+ struct list_head list;
|
|
+ int rating;
|
|
+ int (*enable)(struct clocksource *);
|
|
+ void (*disable)(struct clocksource *);
|
|
+ long unsigned int flags;
|
|
+ void (*suspend)(struct clocksource *);
|
|
+ void (*resume)(struct clocksource *);
|
|
+ void (*mark_unstable)(struct clocksource *);
|
|
+ void (*tick_stable)(struct clocksource *);
|
|
+ struct list_head wd_list;
|
|
+ u64 cs_last;
|
|
+ u64 wd_last;
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct vm_special_mapping {
|
|
+ const char *name;
|
|
+ struct page **pages;
|
|
+ vm_fault_t (*fault)(const struct vm_special_mapping *, struct vm_area_struct *, struct vm_fault *);
|
|
+ int (*mremap)(const struct vm_special_mapping *, struct vm_area_struct *);
|
|
+};
|
|
+
|
|
+struct pvclock_vcpu_time_info {
|
|
+ u32 version;
|
|
+ u32 pad0;
|
|
+ u64 tsc_timestamp;
|
|
+ u64 system_time;
|
|
+ u32 tsc_to_system_mul;
|
|
+ s8 tsc_shift;
|
|
+ u8 flags;
|
|
+ u8 pad[2];
|
|
+};
|
|
+
|
|
+struct pvclock_vsyscall_time_info {
|
|
+ struct pvclock_vcpu_time_info pvti;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+typedef long unsigned int gtod_long_t;
|
|
+
|
|
+struct vsyscall_gtod_data {
|
|
+ unsigned int seq;
|
|
+ int vclock_mode;
|
|
+ u64 cycle_last;
|
|
+ u64 mask;
|
|
+ u32 mult;
|
|
+ u32 shift;
|
|
+ u64 wall_time_snsec;
|
|
+ gtod_long_t wall_time_sec;
|
|
+ gtod_long_t monotonic_time_sec;
|
|
+ u64 monotonic_time_snsec;
|
|
+ gtod_long_t wall_time_coarse_sec;
|
|
+ gtod_long_t wall_time_coarse_nsec;
|
|
+ gtod_long_t monotonic_time_coarse_sec;
|
|
+ gtod_long_t monotonic_time_coarse_nsec;
|
|
+ int tz_minuteswest;
|
|
+ int tz_dsttime;
|
|
+};
|
|
+
|
|
+struct ms_hyperv_tsc_page {
|
|
+ volatile u32 tsc_sequence;
|
|
+ u32 reserved1;
|
|
+ volatile u64 tsc_scale;
|
|
+ volatile s64 tsc_offset;
|
|
+ u64 reserved2[509];
|
|
+};
|
|
+
|
|
+struct hv_vp_assist_page {
|
|
+ __u32 apic_assist;
|
|
+ __u32 reserved;
|
|
+ __u64 vtl_control[2];
|
|
+ __u64 nested_enlightenments_control[2];
|
|
+ __u32 enlighten_vmentry;
|
|
+ __u64 current_nested_vmcs;
|
|
+};
|
|
+
|
|
+struct ms_hyperv_info {
|
|
+ u32 features;
|
|
+ u32 misc_features;
|
|
+ u32 hints;
|
|
+ u32 nested_features;
|
|
+ u32 max_vp_index;
|
|
+ u32 max_lp_index;
|
|
+};
|
|
+
|
|
+enum migratetype {
|
|
+ MIGRATE_UNMOVABLE = 0,
|
|
+ MIGRATE_MOVABLE = 1,
|
|
+ MIGRATE_RECLAIMABLE = 2,
|
|
+ MIGRATE_PCPTYPES = 3,
|
|
+ MIGRATE_HIGHATOMIC = 3,
|
|
+ MIGRATE_ISOLATE = 4,
|
|
+ MIGRATE_TYPES = 5,
|
|
+};
|
|
+
|
|
+enum zone_watermarks {
|
|
+ WMARK_MIN = 0,
|
|
+ WMARK_LOW = 1,
|
|
+ WMARK_HIGH = 2,
|
|
+ NR_WMARK = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ZONELIST_FALLBACK = 0,
|
|
+ ZONELIST_NOFALLBACK = 1,
|
|
+ MAX_ZONELISTS = 2,
|
|
+};
|
|
+
|
|
+struct tk_read_base {
|
|
+ struct clocksource *clock;
|
|
+ u64 mask;
|
|
+ u64 cycle_last;
|
|
+ u32 mult;
|
|
+ u32 shift;
|
|
+ u64 xtime_nsec;
|
|
+ ktime_t base;
|
|
+ u64 base_real;
|
|
+};
|
|
+
|
|
+struct timekeeper {
|
|
+ struct tk_read_base tkr_mono;
|
|
+ struct tk_read_base tkr_raw;
|
|
+ u64 xtime_sec;
|
|
+ long unsigned int ktime_sec;
|
|
+ struct timespec64 wall_to_monotonic;
|
|
+ ktime_t offs_real;
|
|
+ ktime_t offs_boot;
|
|
+ ktime_t offs_tai;
|
|
+ s32 tai_offset;
|
|
+ unsigned int clock_was_set_seq;
|
|
+ u8 cs_was_changed_seq;
|
|
+ ktime_t next_leap_ktime;
|
|
+ u64 raw_sec;
|
|
+ u64 cycle_interval;
|
|
+ u64 xtime_interval;
|
|
+ s64 xtime_remainder;
|
|
+ u64 raw_interval;
|
|
+ u64 ntp_tick;
|
|
+ s64 ntp_error;
|
|
+ u32 ntp_error_shift;
|
|
+ u32 ntp_err_mult;
|
|
+ u32 skip_second_overflow;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ X86_TRAP_DE = 0,
|
|
+ X86_TRAP_DB = 1,
|
|
+ X86_TRAP_NMI = 2,
|
|
+ X86_TRAP_BP = 3,
|
|
+ X86_TRAP_OF = 4,
|
|
+ X86_TRAP_BR = 5,
|
|
+ X86_TRAP_UD = 6,
|
|
+ X86_TRAP_NM = 7,
|
|
+ X86_TRAP_DF = 8,
|
|
+ X86_TRAP_OLD_MF = 9,
|
|
+ X86_TRAP_TS = 10,
|
|
+ X86_TRAP_NP = 11,
|
|
+ X86_TRAP_SS = 12,
|
|
+ X86_TRAP_GP = 13,
|
|
+ X86_TRAP_PF = 14,
|
|
+ X86_TRAP_SPURIOUS = 15,
|
|
+ X86_TRAP_MF = 16,
|
|
+ X86_TRAP_AC = 17,
|
|
+ X86_TRAP_MC = 18,
|
|
+ X86_TRAP_XF = 19,
|
|
+ X86_TRAP_IRET = 32,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_emulate_vsyscall {
|
|
+ struct trace_entry ent;
|
|
+ int nr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_emulate_vsyscall {};
|
|
+
|
|
+enum {
|
|
+ EMULATE = 0,
|
|
+ NONE = 1,
|
|
+};
|
|
+
|
|
+enum perf_type_id {
|
|
+ PERF_TYPE_HARDWARE = 0,
|
|
+ PERF_TYPE_SOFTWARE = 1,
|
|
+ PERF_TYPE_TRACEPOINT = 2,
|
|
+ PERF_TYPE_HW_CACHE = 3,
|
|
+ PERF_TYPE_RAW = 4,
|
|
+ PERF_TYPE_BREAKPOINT = 5,
|
|
+ PERF_TYPE_MAX = 6,
|
|
+};
|
|
+
|
|
+enum perf_hw_id {
|
|
+ PERF_COUNT_HW_CPU_CYCLES = 0,
|
|
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
|
|
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
|
|
+ PERF_COUNT_HW_CACHE_MISSES = 3,
|
|
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
|
|
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
|
|
+ PERF_COUNT_HW_BUS_CYCLES = 6,
|
|
+ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
|
|
+ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
|
|
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
|
|
+ PERF_COUNT_HW_MAX = 10,
|
|
+};
|
|
+
|
|
+enum perf_hw_cache_id {
|
|
+ PERF_COUNT_HW_CACHE_L1D = 0,
|
|
+ PERF_COUNT_HW_CACHE_L1I = 1,
|
|
+ PERF_COUNT_HW_CACHE_LL = 2,
|
|
+ PERF_COUNT_HW_CACHE_DTLB = 3,
|
|
+ PERF_COUNT_HW_CACHE_ITLB = 4,
|
|
+ PERF_COUNT_HW_CACHE_BPU = 5,
|
|
+ PERF_COUNT_HW_CACHE_NODE = 6,
|
|
+ PERF_COUNT_HW_CACHE_MAX = 7,
|
|
+};
|
|
+
|
|
+enum perf_hw_cache_op_id {
|
|
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
|
|
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
|
|
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
|
|
+ PERF_COUNT_HW_CACHE_OP_MAX = 3,
|
|
+};
|
|
+
|
|
+enum perf_hw_cache_op_result_id {
|
|
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
|
|
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
|
|
+ PERF_COUNT_HW_CACHE_RESULT_MAX = 2,
|
|
+};
|
|
+
|
|
+enum perf_event_sample_format {
|
|
+ PERF_SAMPLE_IP = 1,
|
|
+ PERF_SAMPLE_TID = 2,
|
|
+ PERF_SAMPLE_TIME = 4,
|
|
+ PERF_SAMPLE_ADDR = 8,
|
|
+ PERF_SAMPLE_READ = 16,
|
|
+ PERF_SAMPLE_CALLCHAIN = 32,
|
|
+ PERF_SAMPLE_ID = 64,
|
|
+ PERF_SAMPLE_CPU = 128,
|
|
+ PERF_SAMPLE_PERIOD = 256,
|
|
+ PERF_SAMPLE_STREAM_ID = 512,
|
|
+ PERF_SAMPLE_RAW = 1024,
|
|
+ PERF_SAMPLE_BRANCH_STACK = 2048,
|
|
+ PERF_SAMPLE_REGS_USER = 4096,
|
|
+ PERF_SAMPLE_STACK_USER = 8192,
|
|
+ PERF_SAMPLE_WEIGHT = 16384,
|
|
+ PERF_SAMPLE_DATA_SRC = 32768,
|
|
+ PERF_SAMPLE_IDENTIFIER = 65536,
|
|
+ PERF_SAMPLE_TRANSACTION = 131072,
|
|
+ PERF_SAMPLE_REGS_INTR = 262144,
|
|
+ PERF_SAMPLE_PHYS_ADDR = 524288,
|
|
+ PERF_SAMPLE_MAX = 1048576,
|
|
+ __PERF_SAMPLE_CALLCHAIN_EARLY = 0,
|
|
+};
|
|
+
|
|
+enum perf_branch_sample_type {
|
|
+ PERF_SAMPLE_BRANCH_USER = 1,
|
|
+ PERF_SAMPLE_BRANCH_KERNEL = 2,
|
|
+ PERF_SAMPLE_BRANCH_HV = 4,
|
|
+ PERF_SAMPLE_BRANCH_ANY = 8,
|
|
+ PERF_SAMPLE_BRANCH_ANY_CALL = 16,
|
|
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 32,
|
|
+ PERF_SAMPLE_BRANCH_IND_CALL = 64,
|
|
+ PERF_SAMPLE_BRANCH_ABORT_TX = 128,
|
|
+ PERF_SAMPLE_BRANCH_IN_TX = 256,
|
|
+ PERF_SAMPLE_BRANCH_NO_TX = 512,
|
|
+ PERF_SAMPLE_BRANCH_COND = 1024,
|
|
+ PERF_SAMPLE_BRANCH_CALL_STACK = 2048,
|
|
+ PERF_SAMPLE_BRANCH_IND_JUMP = 4096,
|
|
+ PERF_SAMPLE_BRANCH_CALL = 8192,
|
|
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 16384,
|
|
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 32768,
|
|
+ PERF_SAMPLE_BRANCH_TYPE_SAVE = 65536,
|
|
+ PERF_SAMPLE_BRANCH_MAX = 131072,
|
|
+};
|
|
+
|
|
+struct perf_event_mmap_page {
|
|
+ __u32 version;
|
|
+ __u32 compat_version;
|
|
+ __u32 lock;
|
|
+ __u32 index;
|
|
+ __s64 offset;
|
|
+ __u64 time_enabled;
|
|
+ __u64 time_running;
|
|
+ union {
|
|
+ __u64 capabilities;
|
|
+ struct {
|
|
+ __u64 cap_bit0: 1;
|
|
+ __u64 cap_bit0_is_deprecated: 1;
|
|
+ __u64 cap_user_rdpmc: 1;
|
|
+ __u64 cap_user_time: 1;
|
|
+ __u64 cap_user_time_zero: 1;
|
|
+ __u64 cap_____res: 59;
|
|
+ };
|
|
+ };
|
|
+ __u16 pmc_width;
|
|
+ __u16 time_shift;
|
|
+ __u32 time_mult;
|
|
+ __u64 time_offset;
|
|
+ __u64 time_zero;
|
|
+ __u32 size;
|
|
+ __u8 __reserved[948];
|
|
+ __u64 data_head;
|
|
+ __u64 data_tail;
|
|
+ __u64 data_offset;
|
|
+ __u64 data_size;
|
|
+ __u64 aux_head;
|
|
+ __u64 aux_tail;
|
|
+ __u64 aux_offset;
|
|
+ __u64 aux_size;
|
|
+};
|
|
+
|
|
+struct ldt_struct {
|
|
+ struct desc_struct *entries;
|
|
+ unsigned int nr_entries;
|
|
+ int slot;
|
|
+};
|
|
+
|
|
+struct x86_pmu_capability {
|
|
+ int version;
|
|
+ int num_counters_gp;
|
|
+ int num_counters_fixed;
|
|
+ int bit_width_gp;
|
|
+ int bit_width_fixed;
|
|
+ unsigned int events_mask;
|
|
+ int events_mask_len;
|
|
+};
|
|
+
|
|
+enum stack_type {
|
|
+ STACK_TYPE_UNKNOWN = 0,
|
|
+ STACK_TYPE_TASK = 1,
|
|
+ STACK_TYPE_IRQ = 2,
|
|
+ STACK_TYPE_SOFTIRQ = 3,
|
|
+ STACK_TYPE_ENTRY = 4,
|
|
+ STACK_TYPE_EXCEPTION = 5,
|
|
+ STACK_TYPE_EXCEPTION_LAST = 8,
|
|
+};
|
|
+
|
|
+struct stack_info {
|
|
+ enum stack_type type;
|
|
+ long unsigned int *begin;
|
|
+ long unsigned int *end;
|
|
+ long unsigned int *next_sp;
|
|
+};
|
|
+
|
|
+struct stack_frame {
|
|
+ struct stack_frame *next_frame;
|
|
+ long unsigned int return_address;
|
|
+};
|
|
+
|
|
+struct stack_frame_ia32 {
|
|
+ u32 next_frame;
|
|
+ u32 return_address;
|
|
+};
|
|
+
|
|
+struct perf_guest_switch_msr {
|
|
+ unsigned int msr;
|
|
+ u64 host;
|
|
+ u64 guest;
|
|
+};
|
|
+
|
|
+struct device_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct device *, struct device_attribute *, char *);
|
|
+ ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t);
|
|
+};
|
|
+
|
|
+enum perf_event_x86_regs {
|
|
+ PERF_REG_X86_AX = 0,
|
|
+ PERF_REG_X86_BX = 1,
|
|
+ PERF_REG_X86_CX = 2,
|
|
+ PERF_REG_X86_DX = 3,
|
|
+ PERF_REG_X86_SI = 4,
|
|
+ PERF_REG_X86_DI = 5,
|
|
+ PERF_REG_X86_BP = 6,
|
|
+ PERF_REG_X86_SP = 7,
|
|
+ PERF_REG_X86_IP = 8,
|
|
+ PERF_REG_X86_FLAGS = 9,
|
|
+ PERF_REG_X86_CS = 10,
|
|
+ PERF_REG_X86_SS = 11,
|
|
+ PERF_REG_X86_DS = 12,
|
|
+ PERF_REG_X86_ES = 13,
|
|
+ PERF_REG_X86_FS = 14,
|
|
+ PERF_REG_X86_GS = 15,
|
|
+ PERF_REG_X86_R8 = 16,
|
|
+ PERF_REG_X86_R9 = 17,
|
|
+ PERF_REG_X86_R10 = 18,
|
|
+ PERF_REG_X86_R11 = 19,
|
|
+ PERF_REG_X86_R12 = 20,
|
|
+ PERF_REG_X86_R13 = 21,
|
|
+ PERF_REG_X86_R14 = 22,
|
|
+ PERF_REG_X86_R15 = 23,
|
|
+ PERF_REG_X86_32_MAX = 16,
|
|
+ PERF_REG_X86_64_MAX = 24,
|
|
+ PERF_REG_X86_XMM0 = 32,
|
|
+ PERF_REG_X86_XMM1 = 34,
|
|
+ PERF_REG_X86_XMM2 = 36,
|
|
+ PERF_REG_X86_XMM3 = 38,
|
|
+ PERF_REG_X86_XMM4 = 40,
|
|
+ PERF_REG_X86_XMM5 = 42,
|
|
+ PERF_REG_X86_XMM6 = 44,
|
|
+ PERF_REG_X86_XMM7 = 46,
|
|
+ PERF_REG_X86_XMM8 = 48,
|
|
+ PERF_REG_X86_XMM9 = 50,
|
|
+ PERF_REG_X86_XMM10 = 52,
|
|
+ PERF_REG_X86_XMM11 = 54,
|
|
+ PERF_REG_X86_XMM12 = 56,
|
|
+ PERF_REG_X86_XMM13 = 58,
|
|
+ PERF_REG_X86_XMM14 = 60,
|
|
+ PERF_REG_X86_XMM15 = 62,
|
|
+ PERF_REG_X86_XMM_MAX = 64,
|
|
+};
|
|
+
|
|
+struct perf_callchain_entry_ctx {
|
|
+ struct perf_callchain_entry *entry;
|
|
+ u32 max_stack;
|
|
+ u32 nr;
|
|
+ short int contexts;
|
|
+ bool contexts_maxed;
|
|
+};
|
|
+
|
|
+struct perf_pmu_events_attr {
|
|
+ struct device_attribute attr;
|
|
+ u64 id;
|
|
+ const char *event_str;
|
|
+};
|
|
+
|
|
+struct perf_pmu_events_ht_attr {
|
|
+ struct device_attribute attr;
|
|
+ u64 id;
|
|
+ const char *event_str_ht;
|
|
+ const char *event_str_noht;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NMI_LOCAL = 0,
|
|
+ NMI_UNKNOWN = 1,
|
|
+ NMI_SERR = 2,
|
|
+ NMI_IO_CHECK = 3,
|
|
+ NMI_MAX = 4,
|
|
+};
|
|
+
|
|
+typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
|
|
+
|
|
+struct nmiaction {
|
|
+ struct list_head list;
|
|
+ nmi_handler_t handler;
|
|
+ u64 max_duration;
|
|
+ struct irq_work irq_work;
|
|
+ long unsigned int flags;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct cyc2ns_data {
|
|
+ u32 cyc2ns_mul;
|
|
+ u32 cyc2ns_shift;
|
|
+ u64 cyc2ns_offset;
|
|
+};
|
|
+
|
|
+struct unwind_state {
|
|
+ struct stack_info stack_info;
|
|
+ long unsigned int stack_mask;
|
|
+ struct task_struct *task;
|
|
+ int graph_idx;
|
|
+ bool error;
|
|
+ bool signal;
|
|
+ bool full_regs;
|
|
+ long unsigned int sp;
|
|
+ long unsigned int bp;
|
|
+ long unsigned int ip;
|
|
+ struct pt_regs *regs;
|
|
+};
|
|
+
|
|
+enum extra_reg_type {
|
|
+ EXTRA_REG_NONE = -1,
|
|
+ EXTRA_REG_RSP_0 = 0,
|
|
+ EXTRA_REG_RSP_1 = 1,
|
|
+ EXTRA_REG_LBR = 2,
|
|
+ EXTRA_REG_LDLAT = 3,
|
|
+ EXTRA_REG_FE = 4,
|
|
+ EXTRA_REG_MAX = 5,
|
|
+};
|
|
+
|
|
+struct event_constraint {
|
|
+ union {
|
|
+ long unsigned int idxmsk[1];
|
|
+ u64 idxmsk64;
|
|
+ };
|
|
+ u64 code;
|
|
+ u64 cmask;
|
|
+ int weight;
|
|
+ int overlap;
|
|
+ int flags;
|
|
+ unsigned int size;
|
|
+};
|
|
+
|
|
+struct amd_nb {
|
|
+ int nb_id;
|
|
+ int refcnt;
|
|
+ struct perf_event *owners[64];
|
|
+ struct event_constraint event_constraints[64];
|
|
+};
|
|
+
|
|
+struct er_account {
|
|
+ raw_spinlock_t lock;
|
|
+ u64 config;
|
|
+ u64 reg;
|
|
+ atomic_t ref;
|
|
+};
|
|
+
|
|
+struct intel_shared_regs {
|
|
+ struct er_account regs[5];
|
|
+ int refcnt;
|
|
+ unsigned int core_id;
|
|
+};
|
|
+
|
|
+enum intel_excl_state_type {
|
|
+ INTEL_EXCL_UNUSED = 0,
|
|
+ INTEL_EXCL_SHARED = 1,
|
|
+ INTEL_EXCL_EXCLUSIVE = 2,
|
|
+};
|
|
+
|
|
+struct intel_excl_states {
|
|
+ enum intel_excl_state_type state[64];
|
|
+ bool sched_started;
|
|
+};
|
|
+
|
|
+struct intel_excl_cntrs {
|
|
+ raw_spinlock_t lock;
|
|
+ struct intel_excl_states states[2];
|
|
+ union {
|
|
+ u16 has_exclusive[2];
|
|
+ u32 exclusive_present;
|
|
+ };
|
|
+ int refcnt;
|
|
+ unsigned int core_id;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ X86_PERF_KFREE_SHARED = 0,
|
|
+ X86_PERF_KFREE_EXCL = 1,
|
|
+ X86_PERF_KFREE_MAX = 2,
|
|
+};
|
|
+
|
|
+struct x86_perf_task_context;
|
|
+
|
|
+struct cpu_hw_events {
|
|
+ struct perf_event *events[64];
|
|
+ long unsigned int active_mask[1];
|
|
+ long unsigned int running[1];
|
|
+ int enabled;
|
|
+ int n_events;
|
|
+ int n_added;
|
|
+ int n_txn;
|
|
+ int n_txn_metric;
|
|
+ int assign[64];
|
|
+ u64 tags[64];
|
|
+ struct perf_event *event_list[64];
|
|
+ struct event_constraint *event_constraint[64];
|
|
+ int n_excl;
|
|
+ unsigned int txn_flags;
|
|
+ int is_fake;
|
|
+ struct debug_store *ds;
|
|
+ void *ds_pebs_vaddr;
|
|
+ void *ds_bts_vaddr;
|
|
+ u64 pebs_enabled;
|
|
+ int n_pebs;
|
|
+ int n_large_pebs;
|
|
+ u64 pebs_data_cfg;
|
|
+ u64 active_pebs_data_cfg;
|
|
+ int pebs_record_size;
|
|
+ int lbr_users;
|
|
+ int lbr_pebs_users;
|
|
+ struct perf_branch_stack lbr_stack;
|
|
+ struct perf_branch_entry lbr_entries[32];
|
|
+ struct er_account *lbr_sel;
|
|
+ u64 br_sel;
|
|
+ struct x86_perf_task_context *last_task_ctx;
|
|
+ int last_log_id;
|
|
+ int lbr_select;
|
|
+ u64 intel_ctrl_guest_mask;
|
|
+ u64 intel_ctrl_host_mask;
|
|
+ struct perf_guest_switch_msr guest_switch_msrs[64];
|
|
+ u64 intel_cp_status;
|
|
+ struct intel_shared_regs *shared_regs;
|
|
+ struct event_constraint *constraint_list;
|
|
+ struct intel_excl_cntrs *excl_cntrs;
|
|
+ int excl_thread_id;
|
|
+ u64 tfa_shadow;
|
|
+ int n_metric;
|
|
+ struct amd_nb *amd_nb;
|
|
+ u64 perf_ctr_virt_mask;
|
|
+ int n_pair;
|
|
+ void *kfree_on_online[2];
|
|
+};
|
|
+
|
|
+struct x86_perf_task_context {
|
|
+ u64 lbr_from[32];
|
|
+ u64 lbr_to[32];
|
|
+ u64 lbr_info[32];
|
|
+ u64 lbr_sel;
|
|
+ int tos;
|
|
+ int valid_lbrs;
|
|
+ int lbr_callstack_users;
|
|
+ int lbr_stack_state;
|
|
+ int log_id;
|
|
+};
|
|
+
|
|
+struct extra_reg {
|
|
+ unsigned int event;
|
|
+ unsigned int msr;
|
|
+ u64 config_mask;
|
|
+ u64 valid_mask;
|
|
+ int idx;
|
|
+ bool extra_msr_access;
|
|
+};
|
|
+
|
|
+union perf_capabilities {
|
|
+ struct {
|
|
+ u64 lbr_format: 6;
|
|
+ u64 pebs_trap: 1;
|
|
+ u64 pebs_arch_reg: 1;
|
|
+ u64 pebs_format: 4;
|
|
+ u64 smm_freeze: 1;
|
|
+ u64 full_width_write: 1;
|
|
+ u64 pebs_baseline: 1;
|
|
+ u64 perf_metrics: 1;
|
|
+ };
|
|
+ u64 capabilities;
|
|
+};
|
|
+
|
|
+struct x86_pmu_quirk {
|
|
+ struct x86_pmu_quirk *next;
|
|
+ void (*func)();
|
|
+};
|
|
+
|
|
+enum {
|
|
+ x86_lbr_exclusive_lbr = 0,
|
|
+ x86_lbr_exclusive_bts = 1,
|
|
+ x86_lbr_exclusive_pt = 2,
|
|
+ x86_lbr_exclusive_max = 3,
|
|
+};
|
|
+
|
|
+struct x86_pmu {
|
|
+ const char *name;
|
|
+ int version;
|
|
+ int (*handle_irq)(struct pt_regs *);
|
|
+ void (*disable_all)();
|
|
+ void (*enable_all)(int);
|
|
+ void (*enable)(struct perf_event *);
|
|
+ void (*disable)(struct perf_event *);
|
|
+ void (*add)(struct perf_event *);
|
|
+ void (*del)(struct perf_event *);
|
|
+ void (*read)(struct perf_event *);
|
|
+ int (*hw_config)(struct perf_event *);
|
|
+ int (*schedule_events)(struct cpu_hw_events *, int, int *);
|
|
+ unsigned int eventsel;
|
|
+ unsigned int perfctr;
|
|
+ int (*addr_offset)(int, bool);
|
|
+ int (*rdpmc_index)(int);
|
|
+ u64 (*event_map)(int);
|
|
+ int max_events;
|
|
+ int num_counters;
|
|
+ int num_counters_fixed;
|
|
+ int cntval_bits;
|
|
+ u64 cntval_mask;
|
|
+ union {
|
|
+ long unsigned int events_maskl;
|
|
+ long unsigned int events_mask[1];
|
|
+ };
|
|
+ int events_mask_len;
|
|
+ int apic;
|
|
+ u64 max_period;
|
|
+ struct event_constraint * (*get_event_constraints)(struct cpu_hw_events *, int, struct perf_event *);
|
|
+ void (*put_event_constraints)(struct cpu_hw_events *, struct perf_event *);
|
|
+ void (*start_scheduling)(struct cpu_hw_events *);
|
|
+ void (*commit_scheduling)(struct cpu_hw_events *, int, int);
|
|
+ void (*stop_scheduling)(struct cpu_hw_events *);
|
|
+ struct event_constraint *event_constraints;
|
|
+ struct x86_pmu_quirk *quirks;
|
|
+ int perfctr_second_write;
|
|
+ u64 (*limit_period)(struct perf_event *, u64);
|
|
+ unsigned int late_ack: 1;
|
|
+ unsigned int enabled_ack: 1;
|
|
+ unsigned int counter_freezing: 1;
|
|
+ int attr_rdpmc_broken;
|
|
+ int attr_rdpmc;
|
|
+ struct attribute **format_attrs;
|
|
+ ssize_t (*events_sysfs_show)(char *, u64);
|
|
+ const struct attribute_group **attr_update;
|
|
+ long unsigned int attr_freeze_on_smi;
|
|
+ int (*cpu_prepare)(int);
|
|
+ void (*cpu_starting)(int);
|
|
+ void (*cpu_dying)(int);
|
|
+ void (*cpu_dead)(int);
|
|
+ void (*check_microcode)();
|
|
+ void (*sched_task)(struct perf_event_context *, bool);
|
|
+ u64 intel_ctrl;
|
|
+ union perf_capabilities intel_cap;
|
|
+ unsigned int bts: 1;
|
|
+ unsigned int bts_active: 1;
|
|
+ unsigned int pebs: 1;
|
|
+ unsigned int pebs_active: 1;
|
|
+ unsigned int pebs_broken: 1;
|
|
+ unsigned int pebs_prec_dist: 1;
|
|
+ unsigned int pebs_no_tlb: 1;
|
|
+ int pebs_record_size;
|
|
+ int pebs_buffer_size;
|
|
+ int max_pebs_events;
|
|
+ void (*drain_pebs)(struct pt_regs *);
|
|
+ struct event_constraint *pebs_constraints;
|
|
+ void (*pebs_aliases)(struct perf_event *);
|
|
+ long unsigned int large_pebs_flags;
|
|
+ u64 rtm_abort_event;
|
|
+ unsigned int lbr_tos;
|
|
+ unsigned int lbr_from;
|
|
+ unsigned int lbr_to;
|
|
+ unsigned int lbr_nr;
|
|
+ u64 lbr_sel_mask;
|
|
+ const int *lbr_sel_map;
|
|
+ bool lbr_double_abort;
|
|
+ bool lbr_pt_coexist;
|
|
+ atomic_t lbr_exclusive[3];
|
|
+ u64 (*update_topdown_event)(struct perf_event *);
|
|
+ int (*set_topdown_event_period)(struct perf_event *);
|
|
+ unsigned int amd_nb_constraints: 1;
|
|
+ u64 perf_ctr_pair_en;
|
|
+ struct extra_reg *extra_regs;
|
|
+ unsigned int flags;
|
|
+ struct perf_guest_switch_msr * (*guest_get_msrs)(int *);
|
|
+ int (*check_period)(struct perf_event *, u64);
|
|
+};
|
|
+
|
|
+struct sched_state {
|
|
+ int weight;
|
|
+ int event;
|
|
+ int counter;
|
|
+ int unassigned;
|
|
+ int nr_gp;
|
|
+ u64 used;
|
|
+};
|
|
+
|
|
+struct perf_sched {
|
|
+ int max_weight;
|
|
+ int max_events;
|
|
+ int max_gp;
|
|
+ int saved_states;
|
|
+ struct event_constraint **constraints;
|
|
+ struct sched_state state;
|
|
+ struct sched_state saved[2];
|
|
+};
|
|
+
|
|
+typedef int pao_T_____2;
|
|
+
|
|
+typedef int pto_T_____2;
|
|
+
|
|
+typedef unsigned int pao_T_____3;
|
|
+
|
|
+struct amd_uncore {
|
|
+ int id;
|
|
+ int refcnt;
|
|
+ int cpu;
|
|
+ int num_counters;
|
|
+ int rdpmc_base;
|
|
+ u32 msr_base;
|
|
+ cpumask_t *active_mask;
|
|
+ struct pmu *pmu;
|
|
+ struct perf_event *events[6];
|
|
+ struct hlist_node node;
|
|
+};
|
|
+
|
|
+typedef int pci_power_t;
|
|
+
|
|
+typedef unsigned int pci_channel_state_t;
|
|
+
|
|
+typedef short unsigned int pci_dev_flags_t;
|
|
+
|
|
+struct pci_bus;
|
|
+
|
|
+struct pci_slot;
|
|
+
|
|
+struct aer_stats;
|
|
+
|
|
+struct pci_driver;
|
|
+
|
|
+struct pcie_link_state;
|
|
+
|
|
+struct pci_vpd;
|
|
+
|
|
+struct pci_sriov;
|
|
+
|
|
+struct pci_dev {
|
|
+ struct list_head bus_list;
|
|
+ struct pci_bus *bus;
|
|
+ struct pci_bus *subordinate;
|
|
+ void *sysdata;
|
|
+ struct proc_dir_entry *procent;
|
|
+ struct pci_slot *slot;
|
|
+ unsigned int devfn;
|
|
+ short unsigned int vendor;
|
|
+ short unsigned int device;
|
|
+ short unsigned int subsystem_vendor;
|
|
+ short unsigned int subsystem_device;
|
|
+ unsigned int class;
|
|
+ u8 revision;
|
|
+ u8 hdr_type;
|
|
+ u16 aer_cap;
|
|
+ struct aer_stats *aer_stats;
|
|
+ u8 pcie_cap;
|
|
+ u8 msi_cap;
|
|
+ u8 msix_cap;
|
|
+ u8 pcie_mpss: 3;
|
|
+ u8 rom_base_reg;
|
|
+ u8 pin;
|
|
+ u16 pcie_flags_reg;
|
|
+ long unsigned int *dma_alias_mask;
|
|
+ struct pci_driver *driver;
|
|
+ u64 dma_mask;
|
|
+ struct device_dma_parameters dma_parms;
|
|
+ pci_power_t current_state;
|
|
+ u8 pm_cap;
|
|
+ unsigned int pme_support: 5;
|
|
+ unsigned int pme_poll: 1;
|
|
+ unsigned int d1_support: 1;
|
|
+ unsigned int d2_support: 1;
|
|
+ unsigned int no_d1d2: 1;
|
|
+ unsigned int no_d3cold: 1;
|
|
+ unsigned int bridge_d3: 1;
|
|
+ unsigned int d3cold_allowed: 1;
|
|
+ unsigned int mmio_always_on: 1;
|
|
+ unsigned int wakeup_prepared: 1;
|
|
+ unsigned int runtime_d3cold: 1;
|
|
+ unsigned int skip_bus_pm: 1;
|
|
+ unsigned int ignore_hotplug: 1;
|
|
+ unsigned int hotplug_user_indicators: 1;
|
|
+ unsigned int clear_retrain_link: 1;
|
|
+ unsigned int d3_delay;
|
|
+ unsigned int d3cold_delay;
|
|
+ struct pcie_link_state *link_state;
|
|
+ unsigned int ltr_path: 1;
|
|
+ unsigned int eetlp_prefix_path: 1;
|
|
+ pci_channel_state_t error_state;
|
|
+ struct device dev;
|
|
+ int cfg_size;
|
|
+ unsigned int irq;
|
|
+ struct resource resource[17];
|
|
+ bool match_driver;
|
|
+ unsigned int transparent: 1;
|
|
+ unsigned int io_window: 1;
|
|
+ unsigned int pref_window: 1;
|
|
+ unsigned int pref_64_window: 1;
|
|
+ unsigned int multifunction: 1;
|
|
+ unsigned int is_busmaster: 1;
|
|
+ unsigned int no_msi: 1;
|
|
+ unsigned int no_64bit_msi: 1;
|
|
+ unsigned int block_cfg_access: 1;
|
|
+ unsigned int broken_parity_status: 1;
|
|
+ unsigned int irq_reroute_variant: 2;
|
|
+ unsigned int msi_enabled: 1;
|
|
+ unsigned int msix_enabled: 1;
|
|
+ unsigned int ari_enabled: 1;
|
|
+ unsigned int ats_enabled: 1;
|
|
+ unsigned int pasid_enabled: 1;
|
|
+ unsigned int pri_enabled: 1;
|
|
+ unsigned int is_managed: 1;
|
|
+ unsigned int needs_freset: 1;
|
|
+ unsigned int state_saved: 1;
|
|
+ unsigned int is_physfn: 1;
|
|
+ unsigned int is_virtfn: 1;
|
|
+ unsigned int reset_fn: 1;
|
|
+ unsigned int is_hotplug_bridge: 1;
|
|
+ unsigned int shpc_managed: 1;
|
|
+ unsigned int is_thunderbolt: 1;
|
|
+ unsigned int __aer_firmware_first_valid: 1;
|
|
+ unsigned int __aer_firmware_first: 1;
|
|
+ unsigned int broken_intx_masking: 1;
|
|
+ unsigned int io_window_1k: 1;
|
|
+ unsigned int irq_managed: 1;
|
|
+ unsigned int has_secondary_link: 1;
|
|
+ unsigned int non_compliant_bars: 1;
|
|
+ unsigned int is_probed: 1;
|
|
+ pci_dev_flags_t dev_flags;
|
|
+ atomic_t enable_cnt;
|
|
+ u32 saved_config_space[16];
|
|
+ struct hlist_head saved_cap_space;
|
|
+ struct bin_attribute *rom_attr;
|
|
+ int rom_attr_enabled;
|
|
+ struct bin_attribute *res_attr[17];
|
|
+ struct bin_attribute *res_attr_wc[17];
|
|
+ unsigned int broken_cmd_compl: 1;
|
|
+ const struct attribute_group **msi_irq_groups;
|
|
+ struct pci_vpd *vpd;
|
|
+ union {
|
|
+ struct pci_sriov *sriov;
|
|
+ struct pci_dev *physfn;
|
|
+ };
|
|
+ u16 ats_cap;
|
|
+ u8 ats_stu;
|
|
+ atomic_t ats_ref_cnt;
|
|
+ u32 pri_reqs_alloc;
|
|
+ u16 pasid_features;
|
|
+ phys_addr_t rom;
|
|
+ size_t romlen;
|
|
+ char *driver_override;
|
|
+ long unsigned int priv_flags;
|
|
+ long unsigned int slot_being_removed_rescanned;
|
|
+ struct pci_dev *rpdev;
|
|
+ union {
|
|
+ struct {
|
|
+ unsigned int imm_ready: 1;
|
|
+ unsigned int link_active_reporting: 1;
|
|
+ };
|
|
+ long unsigned int padding;
|
|
+ };
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+};
|
|
+
|
|
+struct pci_device_id {
|
|
+ __u32 vendor;
|
|
+ __u32 device;
|
|
+ __u32 subvendor;
|
|
+ __u32 subdevice;
|
|
+ __u32 class;
|
|
+ __u32 class_mask;
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+struct hotplug_slot;
|
|
+
|
|
+struct pci_slot {
|
|
+ struct pci_bus *bus;
|
|
+ struct list_head list;
|
|
+ struct hotplug_slot *hotplug;
|
|
+ unsigned char number;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+typedef short unsigned int pci_bus_flags_t;
|
|
+
|
|
+struct pci_ops;
|
|
+
|
|
+struct msi_controller;
|
|
+
|
|
+struct pci_bus {
|
|
+ struct list_head node;
|
|
+ struct pci_bus *parent;
|
|
+ struct list_head children;
|
|
+ struct list_head devices;
|
|
+ struct pci_dev *self;
|
|
+ struct list_head slots;
|
|
+ struct resource *resource[4];
|
|
+ struct list_head resources;
|
|
+ struct resource busn_res;
|
|
+ struct pci_ops *ops;
|
|
+ struct pci_ops *backup_ops;
|
|
+ struct msi_controller *msi;
|
|
+ void *sysdata;
|
|
+ struct proc_dir_entry *procdir;
|
|
+ unsigned char number;
|
|
+ unsigned char primary;
|
|
+ unsigned char max_bus_speed;
|
|
+ unsigned char cur_bus_speed;
|
|
+ char name[48];
|
|
+ short unsigned int bridge_ctl;
|
|
+ pci_bus_flags_t bus_flags;
|
|
+ struct device *bridge;
|
|
+ struct device dev;
|
|
+ struct bin_attribute *legacy_io;
|
|
+ struct bin_attribute *legacy_mem;
|
|
+ unsigned int is_added: 1;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PCI_STD_RESOURCES = 0,
|
|
+ PCI_STD_RESOURCE_END = 5,
|
|
+ PCI_ROM_RESOURCE = 6,
|
|
+ PCI_IOV_RESOURCES = 7,
|
|
+ PCI_IOV_RESOURCE_END = 12,
|
|
+ PCI_BRIDGE_RESOURCES = 13,
|
|
+ PCI_BRIDGE_RESOURCE_END = 16,
|
|
+ PCI_NUM_RESOURCES = 17,
|
|
+ DEVICE_COUNT_RESOURCE = 17,
|
|
+};
|
|
+
|
|
+enum pci_channel_state {
|
|
+ pci_channel_io_normal = 1,
|
|
+ pci_channel_io_frozen = 2,
|
|
+ pci_channel_io_perm_failure = 3,
|
|
+};
|
|
+
|
|
+typedef unsigned int pcie_reset_state_t;
|
|
+
|
|
+struct pci_dynids {
|
|
+ spinlock_t lock;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct pci_error_handlers;
|
|
+
|
|
+struct pci_driver {
|
|
+ struct list_head node;
|
|
+ const char *name;
|
|
+ const struct pci_device_id *id_table;
|
|
+ int (*probe)(struct pci_dev *, const struct pci_device_id *);
|
|
+ void (*remove)(struct pci_dev *);
|
|
+ int (*suspend)(struct pci_dev *, pm_message_t);
|
|
+ int (*suspend_late)(struct pci_dev *, pm_message_t);
|
|
+ int (*resume_early)(struct pci_dev *);
|
|
+ int (*resume)(struct pci_dev *);
|
|
+ void (*shutdown)(struct pci_dev *);
|
|
+ int (*sriov_configure)(struct pci_dev *, int);
|
|
+ const struct pci_error_handlers *err_handler;
|
|
+ const struct attribute_group **groups;
|
|
+ struct device_driver driver;
|
|
+ struct pci_dynids dynids;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct pci_ops {
|
|
+ int (*add_bus)(struct pci_bus *);
|
|
+ void (*remove_bus)(struct pci_bus *);
|
|
+ void * (*map_bus)(struct pci_bus *, unsigned int, int);
|
|
+ int (*read)(struct pci_bus *, unsigned int, int, int, u32 *);
|
|
+ int (*write)(struct pci_bus *, unsigned int, int, int, u32);
|
|
+};
|
|
+
|
|
+typedef unsigned int pci_ers_result_t;
|
|
+
|
|
+struct pci_error_handlers {
|
|
+ pci_ers_result_t (*error_detected)(struct pci_dev *, enum pci_channel_state);
|
|
+ pci_ers_result_t (*mmio_enabled)(struct pci_dev *);
|
|
+ pci_ers_result_t (*slot_reset)(struct pci_dev *);
|
|
+ void (*reset_prepare)(struct pci_dev *);
|
|
+ void (*reset_done)(struct pci_dev *);
|
|
+ void (*resume)(struct pci_dev *);
|
|
+};
|
|
+
|
|
+enum pcie_bus_config_types {
|
|
+ PCIE_BUS_TUNE_OFF = 0,
|
|
+ PCIE_BUS_DEFAULT = 1,
|
|
+ PCIE_BUS_SAFE = 2,
|
|
+ PCIE_BUS_PERFORMANCE = 3,
|
|
+ PCIE_BUS_PEER2PEER = 4,
|
|
+};
|
|
+
|
|
+struct syscore_ops {
|
|
+ struct list_head node;
|
|
+ int (*suspend)();
|
|
+ void (*resume)();
|
|
+ void (*shutdown)();
|
|
+};
|
|
+
|
|
+enum ibs_states {
|
|
+ IBS_ENABLED = 0,
|
|
+ IBS_STARTED = 1,
|
|
+ IBS_STOPPING = 2,
|
|
+ IBS_STOPPED = 3,
|
|
+ IBS_MAX_STATES = 4,
|
|
+};
|
|
+
|
|
+struct cpu_perf_ibs {
|
|
+ struct perf_event *event;
|
|
+ long unsigned int state[1];
|
|
+};
|
|
+
|
|
+struct perf_ibs {
|
|
+ struct pmu pmu;
|
|
+ unsigned int msr;
|
|
+ u64 config_mask;
|
|
+ u64 cnt_mask;
|
|
+ u64 enable_mask;
|
|
+ u64 valid_mask;
|
|
+ u64 max_period;
|
|
+ long unsigned int offset_mask[1];
|
|
+ int offset_max;
|
|
+ struct cpu_perf_ibs *pcpu;
|
|
+ struct attribute **format_attrs;
|
|
+ struct attribute_group format_group;
|
|
+ const struct attribute_group *attr_groups[2];
|
|
+ u64 (*get_count)(u64);
|
|
+};
|
|
+
|
|
+struct perf_ibs_data {
|
|
+ u32 size;
|
|
+ union {
|
|
+ u32 data[0];
|
|
+ u32 caps;
|
|
+ };
|
|
+ u64 regs[8];
|
|
+};
|
|
+
|
|
+struct amd_iommu;
|
|
+
|
|
+struct perf_amd_iommu {
|
|
+ struct list_head list;
|
|
+ struct pmu pmu;
|
|
+ struct amd_iommu *iommu;
|
|
+ char name[16];
|
|
+ u8 max_banks;
|
|
+ u8 max_counters;
|
|
+ u64 cntr_assign_mask;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct amd_iommu_event_desc {
|
|
+ struct device_attribute attr;
|
|
+ const char *event;
|
|
+};
|
|
+
|
|
+enum perf_msr_id {
|
|
+ PERF_MSR_TSC = 0,
|
|
+ PERF_MSR_APERF = 1,
|
|
+ PERF_MSR_MPERF = 2,
|
|
+ PERF_MSR_PPERF = 3,
|
|
+ PERF_MSR_SMI = 4,
|
|
+ PERF_MSR_PTSC = 5,
|
|
+ PERF_MSR_IRPERF = 6,
|
|
+ PERF_MSR_THERM = 7,
|
|
+ PERF_MSR_THERM_SNAP = 8,
|
|
+ PERF_MSR_THERM_UNIT = 9,
|
|
+ PERF_MSR_EVENT_MAX = 10,
|
|
+};
|
|
+
|
|
+struct perf_msr {
|
|
+ u64 msr;
|
|
+ struct perf_pmu_events_attr *attr;
|
|
+ bool (*test)(int);
|
|
+};
|
|
+
|
|
+union cpuid10_eax {
|
|
+ struct {
|
|
+ unsigned int version_id: 8;
|
|
+ unsigned int num_counters: 8;
|
|
+ unsigned int bit_width: 8;
|
|
+ unsigned int mask_length: 8;
|
|
+ } split;
|
|
+ unsigned int full;
|
|
+};
|
|
+
|
|
+union cpuid10_ebx {
|
|
+ struct {
|
|
+ unsigned int no_unhalted_core_cycles: 1;
|
|
+ unsigned int no_instructions_retired: 1;
|
|
+ unsigned int no_unhalted_reference_cycles: 1;
|
|
+ unsigned int no_llc_reference: 1;
|
|
+ unsigned int no_llc_misses: 1;
|
|
+ unsigned int no_branch_instruction_retired: 1;
|
|
+ unsigned int no_branch_misses_retired: 1;
|
|
+ } split;
|
|
+ unsigned int full;
|
|
+};
|
|
+
|
|
+union cpuid10_edx {
|
|
+ struct {
|
|
+ unsigned int num_counters_fixed: 5;
|
|
+ unsigned int bit_width_fixed: 8;
|
|
+ unsigned int reserved: 19;
|
|
+ } split;
|
|
+ unsigned int full;
|
|
+};
|
|
+
|
|
+struct dev_ext_attribute {
|
|
+ struct device_attribute attr;
|
|
+ void *var;
|
|
+};
|
|
+
|
|
+union x86_pmu_config {
|
|
+ struct {
|
|
+ u64 event: 8;
|
|
+ u64 umask: 8;
|
|
+ u64 usr: 1;
|
|
+ u64 os: 1;
|
|
+ u64 edge: 1;
|
|
+ u64 pc: 1;
|
|
+ u64 interrupt: 1;
|
|
+ u64 __reserved1: 1;
|
|
+ u64 en: 1;
|
|
+ u64 inv: 1;
|
|
+ u64 cmask: 8;
|
|
+ u64 event2: 4;
|
|
+ u64 __reserved2: 4;
|
|
+ u64 go: 1;
|
|
+ u64 ho: 1;
|
|
+ } bits;
|
|
+ u64 value;
|
|
+};
|
|
+
|
|
+struct perf_output_handle {
|
|
+ struct perf_event *event;
|
|
+ struct ring_buffer *rb;
|
|
+ long unsigned int wakeup;
|
|
+ long unsigned int size;
|
|
+ u64 aux_flags;
|
|
+ union {
|
|
+ void *addr;
|
|
+ long unsigned int head;
|
|
+ };
|
|
+ int page;
|
|
+};
|
|
+
|
|
+struct bts_ctx {
|
|
+ struct perf_output_handle handle;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct debug_store ds_back;
|
|
+ int state;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BTS_STATE_STOPPED = 0,
|
|
+ BTS_STATE_INACTIVE = 1,
|
|
+ BTS_STATE_ACTIVE = 2,
|
|
+};
|
|
+
|
|
+struct bts_phys {
|
|
+ struct page *page;
|
|
+ long unsigned int size;
|
|
+ long unsigned int offset;
|
|
+ long unsigned int displacement;
|
|
+};
|
|
+
|
|
+struct bts_buffer {
|
|
+ size_t real_size;
|
|
+ unsigned int nr_pages;
|
|
+ unsigned int nr_bufs;
|
|
+ unsigned int cur_buf;
|
|
+ bool snapshot;
|
|
+ local_t data_size;
|
|
+ local_t head;
|
|
+ long unsigned int end;
|
|
+ void **data_pages;
|
|
+ struct bts_phys buf[0];
|
|
+};
|
|
+
|
|
+struct pebs_basic {
|
|
+ u64 format_size;
|
|
+ u64 ip;
|
|
+ u64 applicable_counters;
|
|
+ u64 tsc;
|
|
+};
|
|
+
|
|
+struct pebs_meminfo {
|
|
+ u64 address;
|
|
+ u64 aux;
|
|
+ u64 latency;
|
|
+ u64 tsx_tuning;
|
|
+};
|
|
+
|
|
+struct pebs_gprs {
|
|
+ u64 flags;
|
|
+ u64 ip;
|
|
+ u64 ax;
|
|
+ u64 cx;
|
|
+ u64 dx;
|
|
+ u64 bx;
|
|
+ u64 sp;
|
|
+ u64 bp;
|
|
+ u64 si;
|
|
+ u64 di;
|
|
+ u64 r8;
|
|
+ u64 r9;
|
|
+ u64 r10;
|
|
+ u64 r11;
|
|
+ u64 r12;
|
|
+ u64 r13;
|
|
+ u64 r14;
|
|
+ u64 r15;
|
|
+};
|
|
+
|
|
+struct pebs_xmm {
|
|
+ u64 xmm[32];
|
|
+};
|
|
+
|
|
+struct pebs_lbr_entry {
|
|
+ u64 from;
|
|
+ u64 to;
|
|
+ u64 info;
|
|
+};
|
|
+
|
|
+struct pebs_lbr {
|
|
+ struct pebs_lbr_entry lbr[0];
|
|
+};
|
|
+
|
|
+struct x86_perf_regs {
|
|
+ struct pt_regs regs;
|
|
+ u64 *xmm_regs;
|
|
+};
|
|
+
|
|
+typedef unsigned int insn_attr_t;
|
|
+
|
|
+typedef unsigned char insn_byte_t;
|
|
+
|
|
+typedef int insn_value_t;
|
|
+
|
|
+struct insn_field {
|
|
+ union {
|
|
+ insn_value_t value;
|
|
+ insn_byte_t bytes[4];
|
|
+ };
|
|
+ unsigned char got;
|
|
+ unsigned char nbytes;
|
|
+};
|
|
+
|
|
+struct insn {
|
|
+ struct insn_field prefixes;
|
|
+ struct insn_field rex_prefix;
|
|
+ struct insn_field vex_prefix;
|
|
+ struct insn_field opcode;
|
|
+ struct insn_field modrm;
|
|
+ struct insn_field sib;
|
|
+ struct insn_field displacement;
|
|
+ union {
|
|
+ struct insn_field immediate;
|
|
+ struct insn_field moffset1;
|
|
+ struct insn_field immediate1;
|
|
+ };
|
|
+ union {
|
|
+ struct insn_field moffset2;
|
|
+ struct insn_field immediate2;
|
|
+ };
|
|
+ insn_attr_t attr;
|
|
+ unsigned char opnd_bytes;
|
|
+ unsigned char addr_bytes;
|
|
+ unsigned char length;
|
|
+ unsigned char x86_64;
|
|
+ const insn_byte_t *kaddr;
|
|
+ const insn_byte_t *end_kaddr;
|
|
+ const insn_byte_t *next_byte;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PERF_TXN_ELISION = 1,
|
|
+ PERF_TXN_TRANSACTION = 2,
|
|
+ PERF_TXN_SYNC = 4,
|
|
+ PERF_TXN_ASYNC = 8,
|
|
+ PERF_TXN_RETRY = 16,
|
|
+ PERF_TXN_CONFLICT = 32,
|
|
+ PERF_TXN_CAPACITY_WRITE = 64,
|
|
+ PERF_TXN_CAPACITY_READ = 128,
|
|
+ PERF_TXN_MAX = 256,
|
|
+ PERF_TXN_ABORT_MASK = 0,
|
|
+ PERF_TXN_ABORT_SHIFT = 32,
|
|
+};
|
|
+
|
|
+struct perf_event_header {
|
|
+ __u32 type;
|
|
+ __u16 misc;
|
|
+ __u16 size;
|
|
+};
|
|
+
|
|
+union intel_x86_pebs_dse {
|
|
+ u64 val;
|
|
+ struct {
|
|
+ unsigned int ld_dse: 4;
|
|
+ unsigned int ld_stlb_miss: 1;
|
|
+ unsigned int ld_locked: 1;
|
|
+ unsigned int ld_reserved: 26;
|
|
+ };
|
|
+ struct {
|
|
+ unsigned int st_l1d_hit: 1;
|
|
+ unsigned int st_reserved1: 3;
|
|
+ unsigned int st_stlb_miss: 1;
|
|
+ unsigned int st_locked: 1;
|
|
+ unsigned int st_reserved2: 26;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct pebs_record_core {
|
|
+ u64 flags;
|
|
+ u64 ip;
|
|
+ u64 ax;
|
|
+ u64 bx;
|
|
+ u64 cx;
|
|
+ u64 dx;
|
|
+ u64 si;
|
|
+ u64 di;
|
|
+ u64 bp;
|
|
+ u64 sp;
|
|
+ u64 r8;
|
|
+ u64 r9;
|
|
+ u64 r10;
|
|
+ u64 r11;
|
|
+ u64 r12;
|
|
+ u64 r13;
|
|
+ u64 r14;
|
|
+ u64 r15;
|
|
+};
|
|
+
|
|
+struct pebs_record_nhm {
|
|
+ u64 flags;
|
|
+ u64 ip;
|
|
+ u64 ax;
|
|
+ u64 bx;
|
|
+ u64 cx;
|
|
+ u64 dx;
|
|
+ u64 si;
|
|
+ u64 di;
|
|
+ u64 bp;
|
|
+ u64 sp;
|
|
+ u64 r8;
|
|
+ u64 r9;
|
|
+ u64 r10;
|
|
+ u64 r11;
|
|
+ u64 r12;
|
|
+ u64 r13;
|
|
+ u64 r14;
|
|
+ u64 r15;
|
|
+ u64 status;
|
|
+ u64 dla;
|
|
+ u64 dse;
|
|
+ u64 lat;
|
|
+};
|
|
+
|
|
+union hsw_tsx_tuning {
|
|
+ struct {
|
|
+ u32 cycles_last_block: 32;
|
|
+ u32 hle_abort: 1;
|
|
+ u32 rtm_abort: 1;
|
|
+ u32 instruction_abort: 1;
|
|
+ u32 non_instruction_abort: 1;
|
|
+ u32 retry: 1;
|
|
+ u32 data_conflict: 1;
|
|
+ u32 capacity_writes: 1;
|
|
+ u32 capacity_reads: 1;
|
|
+ };
|
|
+ u64 value;
|
|
+};
|
|
+
|
|
+struct pebs_record_skl {
|
|
+ u64 flags;
|
|
+ u64 ip;
|
|
+ u64 ax;
|
|
+ u64 bx;
|
|
+ u64 cx;
|
|
+ u64 dx;
|
|
+ u64 si;
|
|
+ u64 di;
|
|
+ u64 bp;
|
|
+ u64 sp;
|
|
+ u64 r8;
|
|
+ u64 r9;
|
|
+ u64 r10;
|
|
+ u64 r11;
|
|
+ u64 r12;
|
|
+ u64 r13;
|
|
+ u64 r14;
|
|
+ u64 r15;
|
|
+ u64 status;
|
|
+ u64 dla;
|
|
+ u64 dse;
|
|
+ u64 lat;
|
|
+ u64 real_ip;
|
|
+ u64 tsx_tuning;
|
|
+ u64 tsc;
|
|
+};
|
|
+
|
|
+struct bts_record {
|
|
+ u64 from;
|
|
+ u64 to;
|
|
+ u64 flags;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PERF_BR_UNKNOWN = 0,
|
|
+ PERF_BR_COND = 1,
|
|
+ PERF_BR_UNCOND = 2,
|
|
+ PERF_BR_IND = 3,
|
|
+ PERF_BR_CALL = 4,
|
|
+ PERF_BR_IND_CALL = 5,
|
|
+ PERF_BR_RET = 6,
|
|
+ PERF_BR_SYSCALL = 7,
|
|
+ PERF_BR_SYSRET = 8,
|
|
+ PERF_BR_COND_CALL = 9,
|
|
+ PERF_BR_COND_RET = 10,
|
|
+ PERF_BR_MAX = 11,
|
|
+};
|
|
+
|
|
+struct x86_pmu_lbr {
|
|
+ unsigned int nr;
|
|
+ unsigned int from;
|
|
+ unsigned int to;
|
|
+ unsigned int info;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LBR_FORMAT_32 = 0,
|
|
+ LBR_FORMAT_LIP = 1,
|
|
+ LBR_FORMAT_EIP = 2,
|
|
+ LBR_FORMAT_EIP_FLAGS = 3,
|
|
+ LBR_FORMAT_EIP_FLAGS2 = 4,
|
|
+ LBR_FORMAT_INFO = 5,
|
|
+ LBR_FORMAT_TIME = 6,
|
|
+ LBR_FORMAT_MAX_KNOWN = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ X86_BR_NONE = 0,
|
|
+ X86_BR_USER = 1,
|
|
+ X86_BR_KERNEL = 2,
|
|
+ X86_BR_CALL = 4,
|
|
+ X86_BR_RET = 8,
|
|
+ X86_BR_SYSCALL = 16,
|
|
+ X86_BR_SYSRET = 32,
|
|
+ X86_BR_INT = 64,
|
|
+ X86_BR_IRET = 128,
|
|
+ X86_BR_JCC = 256,
|
|
+ X86_BR_JMP = 512,
|
|
+ X86_BR_IRQ = 1024,
|
|
+ X86_BR_IND_CALL = 2048,
|
|
+ X86_BR_ABORT = 4096,
|
|
+ X86_BR_IN_TX = 8192,
|
|
+ X86_BR_NO_TX = 16384,
|
|
+ X86_BR_ZERO_CALL = 32768,
|
|
+ X86_BR_CALL_STACK = 65536,
|
|
+ X86_BR_IND_JMP = 131072,
|
|
+ X86_BR_TYPE_SAVE = 262144,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LBR_NONE = 0,
|
|
+ LBR_VALID = 1,
|
|
+};
|
|
+
|
|
+enum P4_EVENTS {
|
|
+ P4_EVENT_TC_DELIVER_MODE = 0,
|
|
+ P4_EVENT_BPU_FETCH_REQUEST = 1,
|
|
+ P4_EVENT_ITLB_REFERENCE = 2,
|
|
+ P4_EVENT_MEMORY_CANCEL = 3,
|
|
+ P4_EVENT_MEMORY_COMPLETE = 4,
|
|
+ P4_EVENT_LOAD_PORT_REPLAY = 5,
|
|
+ P4_EVENT_STORE_PORT_REPLAY = 6,
|
|
+ P4_EVENT_MOB_LOAD_REPLAY = 7,
|
|
+ P4_EVENT_PAGE_WALK_TYPE = 8,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE = 9,
|
|
+ P4_EVENT_IOQ_ALLOCATION = 10,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES = 11,
|
|
+ P4_EVENT_FSB_DATA_ACTIVITY = 12,
|
|
+ P4_EVENT_BSQ_ALLOCATION = 13,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES = 14,
|
|
+ P4_EVENT_SSE_INPUT_ASSIST = 15,
|
|
+ P4_EVENT_PACKED_SP_UOP = 16,
|
|
+ P4_EVENT_PACKED_DP_UOP = 17,
|
|
+ P4_EVENT_SCALAR_SP_UOP = 18,
|
|
+ P4_EVENT_SCALAR_DP_UOP = 19,
|
|
+ P4_EVENT_64BIT_MMX_UOP = 20,
|
|
+ P4_EVENT_128BIT_MMX_UOP = 21,
|
|
+ P4_EVENT_X87_FP_UOP = 22,
|
|
+ P4_EVENT_TC_MISC = 23,
|
|
+ P4_EVENT_GLOBAL_POWER_EVENTS = 24,
|
|
+ P4_EVENT_TC_MS_XFER = 25,
|
|
+ P4_EVENT_UOP_QUEUE_WRITES = 26,
|
|
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE = 27,
|
|
+ P4_EVENT_RETIRED_BRANCH_TYPE = 28,
|
|
+ P4_EVENT_RESOURCE_STALL = 29,
|
|
+ P4_EVENT_WC_BUFFER = 30,
|
|
+ P4_EVENT_B2B_CYCLES = 31,
|
|
+ P4_EVENT_BNR = 32,
|
|
+ P4_EVENT_SNOOP = 33,
|
|
+ P4_EVENT_RESPONSE = 34,
|
|
+ P4_EVENT_FRONT_END_EVENT = 35,
|
|
+ P4_EVENT_EXECUTION_EVENT = 36,
|
|
+ P4_EVENT_REPLAY_EVENT = 37,
|
|
+ P4_EVENT_INSTR_RETIRED = 38,
|
|
+ P4_EVENT_UOPS_RETIRED = 39,
|
|
+ P4_EVENT_UOP_TYPE = 40,
|
|
+ P4_EVENT_BRANCH_RETIRED = 41,
|
|
+ P4_EVENT_MISPRED_BRANCH_RETIRED = 42,
|
|
+ P4_EVENT_X87_ASSIST = 43,
|
|
+ P4_EVENT_MACHINE_CLEAR = 44,
|
|
+ P4_EVENT_INSTR_COMPLETED = 45,
|
|
+};
|
|
+
|
|
+enum P4_EVENT_OPCODES {
|
|
+ P4_EVENT_TC_DELIVER_MODE_OPCODE = 257,
|
|
+ P4_EVENT_BPU_FETCH_REQUEST_OPCODE = 768,
|
|
+ P4_EVENT_ITLB_REFERENCE_OPCODE = 6147,
|
|
+ P4_EVENT_MEMORY_CANCEL_OPCODE = 517,
|
|
+ P4_EVENT_MEMORY_COMPLETE_OPCODE = 2050,
|
|
+ P4_EVENT_LOAD_PORT_REPLAY_OPCODE = 1026,
|
|
+ P4_EVENT_STORE_PORT_REPLAY_OPCODE = 1282,
|
|
+ P4_EVENT_MOB_LOAD_REPLAY_OPCODE = 770,
|
|
+ P4_EVENT_PAGE_WALK_TYPE_OPCODE = 260,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE_OPCODE = 3079,
|
|
+ P4_EVENT_IOQ_ALLOCATION_OPCODE = 774,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES_OPCODE = 6662,
|
|
+ P4_EVENT_FSB_DATA_ACTIVITY_OPCODE = 5894,
|
|
+ P4_EVENT_BSQ_ALLOCATION_OPCODE = 1287,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES_OPCODE = 1543,
|
|
+ P4_EVENT_SSE_INPUT_ASSIST_OPCODE = 13313,
|
|
+ P4_EVENT_PACKED_SP_UOP_OPCODE = 2049,
|
|
+ P4_EVENT_PACKED_DP_UOP_OPCODE = 3073,
|
|
+ P4_EVENT_SCALAR_SP_UOP_OPCODE = 2561,
|
|
+ P4_EVENT_SCALAR_DP_UOP_OPCODE = 3585,
|
|
+ P4_EVENT_64BIT_MMX_UOP_OPCODE = 513,
|
|
+ P4_EVENT_128BIT_MMX_UOP_OPCODE = 6657,
|
|
+ P4_EVENT_X87_FP_UOP_OPCODE = 1025,
|
|
+ P4_EVENT_TC_MISC_OPCODE = 1537,
|
|
+ P4_EVENT_GLOBAL_POWER_EVENTS_OPCODE = 4870,
|
|
+ P4_EVENT_TC_MS_XFER_OPCODE = 1280,
|
|
+ P4_EVENT_UOP_QUEUE_WRITES_OPCODE = 2304,
|
|
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE_OPCODE = 1282,
|
|
+ P4_EVENT_RETIRED_BRANCH_TYPE_OPCODE = 1026,
|
|
+ P4_EVENT_RESOURCE_STALL_OPCODE = 257,
|
|
+ P4_EVENT_WC_BUFFER_OPCODE = 1285,
|
|
+ P4_EVENT_B2B_CYCLES_OPCODE = 5635,
|
|
+ P4_EVENT_BNR_OPCODE = 2051,
|
|
+ P4_EVENT_SNOOP_OPCODE = 1539,
|
|
+ P4_EVENT_RESPONSE_OPCODE = 1027,
|
|
+ P4_EVENT_FRONT_END_EVENT_OPCODE = 2053,
|
|
+ P4_EVENT_EXECUTION_EVENT_OPCODE = 3077,
|
|
+ P4_EVENT_REPLAY_EVENT_OPCODE = 2309,
|
|
+ P4_EVENT_INSTR_RETIRED_OPCODE = 516,
|
|
+ P4_EVENT_UOPS_RETIRED_OPCODE = 260,
|
|
+ P4_EVENT_UOP_TYPE_OPCODE = 514,
|
|
+ P4_EVENT_BRANCH_RETIRED_OPCODE = 1541,
|
|
+ P4_EVENT_MISPRED_BRANCH_RETIRED_OPCODE = 772,
|
|
+ P4_EVENT_X87_ASSIST_OPCODE = 773,
|
|
+ P4_EVENT_MACHINE_CLEAR_OPCODE = 517,
|
|
+ P4_EVENT_INSTR_COMPLETED_OPCODE = 1796,
|
|
+};
|
|
+
|
|
+enum P4_ESCR_EMASKS {
|
|
+ P4_EVENT_TC_DELIVER_MODE__DD = 512,
|
|
+ P4_EVENT_TC_DELIVER_MODE__DB = 1024,
|
|
+ P4_EVENT_TC_DELIVER_MODE__DI = 2048,
|
|
+ P4_EVENT_TC_DELIVER_MODE__BD = 4096,
|
|
+ P4_EVENT_TC_DELIVER_MODE__BB = 8192,
|
|
+ P4_EVENT_TC_DELIVER_MODE__BI = 16384,
|
|
+ P4_EVENT_TC_DELIVER_MODE__ID = 32768,
|
|
+ P4_EVENT_BPU_FETCH_REQUEST__TCMISS = 512,
|
|
+ P4_EVENT_ITLB_REFERENCE__HIT = 512,
|
|
+ P4_EVENT_ITLB_REFERENCE__MISS = 1024,
|
|
+ P4_EVENT_ITLB_REFERENCE__HIT_UK = 2048,
|
|
+ P4_EVENT_MEMORY_CANCEL__ST_RB_FULL = 2048,
|
|
+ P4_EVENT_MEMORY_CANCEL__64K_CONF = 4096,
|
|
+ P4_EVENT_MEMORY_COMPLETE__LSC = 512,
|
|
+ P4_EVENT_MEMORY_COMPLETE__SSC = 1024,
|
|
+ P4_EVENT_LOAD_PORT_REPLAY__SPLIT_LD = 1024,
|
|
+ P4_EVENT_STORE_PORT_REPLAY__SPLIT_ST = 1024,
|
|
+ P4_EVENT_MOB_LOAD_REPLAY__NO_STA = 1024,
|
|
+ P4_EVENT_MOB_LOAD_REPLAY__NO_STD = 4096,
|
|
+ P4_EVENT_MOB_LOAD_REPLAY__PARTIAL_DATA = 8192,
|
|
+ P4_EVENT_MOB_LOAD_REPLAY__UNALGN_ADDR = 16384,
|
|
+ P4_EVENT_PAGE_WALK_TYPE__DTMISS = 512,
|
|
+ P4_EVENT_PAGE_WALK_TYPE__ITMISS = 1024,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITS = 512,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITE = 1024,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_HITM = 2048,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITS = 4096,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITE = 8192,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_HITM = 16384,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_2ndL_MISS = 131072,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__RD_3rdL_MISS = 262144,
|
|
+ P4_EVENT_BSQ_CACHE_REFERENCE__WR_2ndL_MISS = 524288,
|
|
+ P4_EVENT_IOQ_ALLOCATION__DEFAULT = 512,
|
|
+ P4_EVENT_IOQ_ALLOCATION__ALL_READ = 16384,
|
|
+ P4_EVENT_IOQ_ALLOCATION__ALL_WRITE = 32768,
|
|
+ P4_EVENT_IOQ_ALLOCATION__MEM_UC = 65536,
|
|
+ P4_EVENT_IOQ_ALLOCATION__MEM_WC = 131072,
|
|
+ P4_EVENT_IOQ_ALLOCATION__MEM_WT = 262144,
|
|
+ P4_EVENT_IOQ_ALLOCATION__MEM_WP = 524288,
|
|
+ P4_EVENT_IOQ_ALLOCATION__MEM_WB = 1048576,
|
|
+ P4_EVENT_IOQ_ALLOCATION__OWN = 4194304,
|
|
+ P4_EVENT_IOQ_ALLOCATION__OTHER = 8388608,
|
|
+ P4_EVENT_IOQ_ALLOCATION__PREFETCH = 16777216,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__DEFAULT = 512,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__ALL_READ = 16384,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__ALL_WRITE = 32768,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_UC = 65536,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WC = 131072,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WT = 262144,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WP = 524288,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__MEM_WB = 1048576,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__OWN = 4194304,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__OTHER = 8388608,
|
|
+ P4_EVENT_IOQ_ACTIVE_ENTRIES__PREFETCH = 16777216,
|
|
+ P4_EVENT_FSB_DATA_ACTIVITY__DRDY_DRV = 512,
|
|
+ P4_EVENT_FSB_DATA_ACTIVITY__DRDY_OWN = 1024,
|
|
+ P4_EVENT_FSB_DATA_ACTIVITY__DRDY_OTHER = 2048,
|
|
+ P4_EVENT_FSB_DATA_ACTIVITY__DBSY_DRV = 4096,
|
|
+ P4_EVENT_FSB_DATA_ACTIVITY__DBSY_OWN = 8192,
|
|
+ P4_EVENT_FSB_DATA_ACTIVITY__DBSY_OTHER = 16384,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_TYPE0 = 512,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_TYPE1 = 1024,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_LEN0 = 2048,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_LEN1 = 4096,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_IO_TYPE = 16384,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_LOCK_TYPE = 32768,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_CACHE_TYPE = 65536,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_SPLIT_TYPE = 131072,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_DEM_TYPE = 262144,
|
|
+ P4_EVENT_BSQ_ALLOCATION__REQ_ORD_TYPE = 524288,
|
|
+ P4_EVENT_BSQ_ALLOCATION__MEM_TYPE0 = 1048576,
|
|
+ P4_EVENT_BSQ_ALLOCATION__MEM_TYPE1 = 2097152,
|
|
+ P4_EVENT_BSQ_ALLOCATION__MEM_TYPE2 = 4194304,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_TYPE0 = 512,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_TYPE1 = 1024,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LEN0 = 2048,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LEN1 = 4096,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_IO_TYPE = 16384,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_LOCK_TYPE = 32768,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_CACHE_TYPE = 65536,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_SPLIT_TYPE = 131072,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_DEM_TYPE = 262144,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__REQ_ORD_TYPE = 524288,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE0 = 1048576,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE1 = 2097152,
|
|
+ P4_EVENT_BSQ_ACTIVE_ENTRIES__MEM_TYPE2 = 4194304,
|
|
+ P4_EVENT_SSE_INPUT_ASSIST__ALL = 16777216,
|
|
+ P4_EVENT_PACKED_SP_UOP__ALL = 16777216,
|
|
+ P4_EVENT_PACKED_DP_UOP__ALL = 16777216,
|
|
+ P4_EVENT_SCALAR_SP_UOP__ALL = 16777216,
|
|
+ P4_EVENT_SCALAR_DP_UOP__ALL = 16777216,
|
|
+ P4_EVENT_64BIT_MMX_UOP__ALL = 16777216,
|
|
+ P4_EVENT_128BIT_MMX_UOP__ALL = 16777216,
|
|
+ P4_EVENT_X87_FP_UOP__ALL = 16777216,
|
|
+ P4_EVENT_TC_MISC__FLUSH = 8192,
|
|
+ P4_EVENT_GLOBAL_POWER_EVENTS__RUNNING = 512,
|
|
+ P4_EVENT_TC_MS_XFER__CISC = 512,
|
|
+ P4_EVENT_UOP_QUEUE_WRITES__FROM_TC_BUILD = 512,
|
|
+ P4_EVENT_UOP_QUEUE_WRITES__FROM_TC_DELIVER = 1024,
|
|
+ P4_EVENT_UOP_QUEUE_WRITES__FROM_ROM = 2048,
|
|
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__CONDITIONAL = 1024,
|
|
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__CALL = 2048,
|
|
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__RETURN = 4096,
|
|
+ P4_EVENT_RETIRED_MISPRED_BRANCH_TYPE__INDIRECT = 8192,
|
|
+ P4_EVENT_RETIRED_BRANCH_TYPE__CONDITIONAL = 1024,
|
|
+ P4_EVENT_RETIRED_BRANCH_TYPE__CALL = 2048,
|
|
+ P4_EVENT_RETIRED_BRANCH_TYPE__RETURN = 4096,
|
|
+ P4_EVENT_RETIRED_BRANCH_TYPE__INDIRECT = 8192,
|
|
+ P4_EVENT_RESOURCE_STALL__SBFULL = 16384,
|
|
+ P4_EVENT_WC_BUFFER__WCB_EVICTS = 512,
|
|
+ P4_EVENT_WC_BUFFER__WCB_FULL_EVICTS = 1024,
|
|
+ P4_EVENT_FRONT_END_EVENT__NBOGUS = 512,
|
|
+ P4_EVENT_FRONT_END_EVENT__BOGUS = 1024,
|
|
+ P4_EVENT_EXECUTION_EVENT__NBOGUS0 = 512,
|
|
+ P4_EVENT_EXECUTION_EVENT__NBOGUS1 = 1024,
|
|
+ P4_EVENT_EXECUTION_EVENT__NBOGUS2 = 2048,
|
|
+ P4_EVENT_EXECUTION_EVENT__NBOGUS3 = 4096,
|
|
+ P4_EVENT_EXECUTION_EVENT__BOGUS0 = 8192,
|
|
+ P4_EVENT_EXECUTION_EVENT__BOGUS1 = 16384,
|
|
+ P4_EVENT_EXECUTION_EVENT__BOGUS2 = 32768,
|
|
+ P4_EVENT_EXECUTION_EVENT__BOGUS3 = 65536,
|
|
+ P4_EVENT_REPLAY_EVENT__NBOGUS = 512,
|
|
+ P4_EVENT_REPLAY_EVENT__BOGUS = 1024,
|
|
+ P4_EVENT_INSTR_RETIRED__NBOGUSNTAG = 512,
|
|
+ P4_EVENT_INSTR_RETIRED__NBOGUSTAG = 1024,
|
|
+ P4_EVENT_INSTR_RETIRED__BOGUSNTAG = 2048,
|
|
+ P4_EVENT_INSTR_RETIRED__BOGUSTAG = 4096,
|
|
+ P4_EVENT_UOPS_RETIRED__NBOGUS = 512,
|
|
+ P4_EVENT_UOPS_RETIRED__BOGUS = 1024,
|
|
+ P4_EVENT_UOP_TYPE__TAGLOADS = 1024,
|
|
+ P4_EVENT_UOP_TYPE__TAGSTORES = 2048,
|
|
+ P4_EVENT_BRANCH_RETIRED__MMNP = 512,
|
|
+ P4_EVENT_BRANCH_RETIRED__MMNM = 1024,
|
|
+ P4_EVENT_BRANCH_RETIRED__MMTP = 2048,
|
|
+ P4_EVENT_BRANCH_RETIRED__MMTM = 4096,
|
|
+ P4_EVENT_MISPRED_BRANCH_RETIRED__NBOGUS = 512,
|
|
+ P4_EVENT_X87_ASSIST__FPSU = 512,
|
|
+ P4_EVENT_X87_ASSIST__FPSO = 1024,
|
|
+ P4_EVENT_X87_ASSIST__POAO = 2048,
|
|
+ P4_EVENT_X87_ASSIST__POAU = 4096,
|
|
+ P4_EVENT_X87_ASSIST__PREA = 8192,
|
|
+ P4_EVENT_MACHINE_CLEAR__CLEAR = 512,
|
|
+ P4_EVENT_MACHINE_CLEAR__MOCLEAR = 1024,
|
|
+ P4_EVENT_MACHINE_CLEAR__SMCLEAR = 2048,
|
|
+ P4_EVENT_INSTR_COMPLETED__NBOGUS = 512,
|
|
+ P4_EVENT_INSTR_COMPLETED__BOGUS = 1024,
|
|
+};
|
|
+
|
|
+enum P4_PEBS_METRIC {
|
|
+ P4_PEBS_METRIC__none = 0,
|
|
+ P4_PEBS_METRIC__1stl_cache_load_miss_retired = 1,
|
|
+ P4_PEBS_METRIC__2ndl_cache_load_miss_retired = 2,
|
|
+ P4_PEBS_METRIC__dtlb_load_miss_retired = 3,
|
|
+ P4_PEBS_METRIC__dtlb_store_miss_retired = 4,
|
|
+ P4_PEBS_METRIC__dtlb_all_miss_retired = 5,
|
|
+ P4_PEBS_METRIC__tagged_mispred_branch = 6,
|
|
+ P4_PEBS_METRIC__mob_load_replay_retired = 7,
|
|
+ P4_PEBS_METRIC__split_load_retired = 8,
|
|
+ P4_PEBS_METRIC__split_store_retired = 9,
|
|
+ P4_PEBS_METRIC__max = 10,
|
|
+};
|
|
+
|
|
+struct p4_event_bind {
|
|
+ unsigned int opcode;
|
|
+ unsigned int escr_msr[2];
|
|
+ unsigned int escr_emask;
|
|
+ unsigned int shared;
|
|
+ char cntr[6];
|
|
+};
|
|
+
|
|
+struct p4_pebs_bind {
|
|
+ unsigned int metric_pebs;
|
|
+ unsigned int metric_vert;
|
|
+};
|
|
+
|
|
+struct p4_event_alias {
|
|
+ u64 original;
|
|
+ u64 alternative;
|
|
+};
|
|
+
|
|
+enum cpuid_regs_idx {
|
|
+ CPUID_EAX = 0,
|
|
+ CPUID_EBX = 1,
|
|
+ CPUID_ECX = 2,
|
|
+ CPUID_EDX = 3,
|
|
+};
|
|
+
|
|
+enum perf_addr_filter_action_t {
|
|
+ PERF_ADDR_FILTER_ACTION_STOP = 0,
|
|
+ PERF_ADDR_FILTER_ACTION_START = 1,
|
|
+ PERF_ADDR_FILTER_ACTION_FILTER = 2,
|
|
+};
|
|
+
|
|
+struct perf_addr_filter {
|
|
+ struct list_head entry;
|
|
+ struct path path;
|
|
+ long unsigned int offset;
|
|
+ long unsigned int size;
|
|
+ enum perf_addr_filter_action_t action;
|
|
+};
|
|
+
|
|
+struct topa_entry {
|
|
+ u64 end: 1;
|
|
+ u64 rsvd0: 1;
|
|
+ u64 intr: 1;
|
|
+ u64 rsvd1: 1;
|
|
+ u64 stop: 1;
|
|
+ u64 rsvd2: 1;
|
|
+ u64 size: 4;
|
|
+ u64 rsvd3: 2;
|
|
+ u64 base: 36;
|
|
+ u64 rsvd4: 16;
|
|
+};
|
|
+
|
|
+enum pt_capabilities {
|
|
+ PT_CAP_max_subleaf = 0,
|
|
+ PT_CAP_cr3_filtering = 1,
|
|
+ PT_CAP_psb_cyc = 2,
|
|
+ PT_CAP_ip_filtering = 3,
|
|
+ PT_CAP_mtc = 4,
|
|
+ PT_CAP_ptwrite = 5,
|
|
+ PT_CAP_power_event_trace = 6,
|
|
+ PT_CAP_topa_output = 7,
|
|
+ PT_CAP_topa_multiple_entries = 8,
|
|
+ PT_CAP_single_range_output = 9,
|
|
+ PT_CAP_payloads_lip = 10,
|
|
+ PT_CAP_num_address_ranges = 11,
|
|
+ PT_CAP_mtc_periods = 12,
|
|
+ PT_CAP_cycle_thresholds = 13,
|
|
+ PT_CAP_psb_periods = 14,
|
|
+};
|
|
+
|
|
+struct pt_pmu {
|
|
+ struct pmu pmu;
|
|
+ u32 caps[8];
|
|
+ bool vmx;
|
|
+ bool branch_en_always_on;
|
|
+ long unsigned int max_nonturbo_ratio;
|
|
+ unsigned int tsc_art_num;
|
|
+ unsigned int tsc_art_den;
|
|
+};
|
|
+
|
|
+struct topa;
|
|
+
|
|
+struct pt_buffer {
|
|
+ int cpu;
|
|
+ struct list_head tables;
|
|
+ struct topa *first;
|
|
+ struct topa *last;
|
|
+ struct topa *cur;
|
|
+ unsigned int cur_idx;
|
|
+ size_t output_off;
|
|
+ long unsigned int nr_pages;
|
|
+ local_t data_size;
|
|
+ local64_t head;
|
|
+ bool snapshot;
|
|
+ long unsigned int stop_pos;
|
|
+ long unsigned int intr_pos;
|
|
+ void **data_pages;
|
|
+ struct topa_entry *topa_index[0];
|
|
+};
|
|
+
|
|
+struct topa {
|
|
+ struct topa_entry table[506];
|
|
+ struct list_head list;
|
|
+ u64 phys;
|
|
+ u64 offset;
|
|
+ size_t size;
|
|
+ int last;
|
|
+};
|
|
+
|
|
+struct pt_filter {
|
|
+ long unsigned int msr_a;
|
|
+ long unsigned int msr_b;
|
|
+ long unsigned int config;
|
|
+};
|
|
+
|
|
+struct pt_filters {
|
|
+ struct pt_filter filter[4];
|
|
+ unsigned int nr_filters;
|
|
+};
|
|
+
|
|
+struct pt {
|
|
+ struct perf_output_handle handle;
|
|
+ struct pt_filters filters;
|
|
+ int handle_nmi;
|
|
+ int vmx_on;
|
|
+};
|
|
+
|
|
+struct pt_cap_desc {
|
|
+ const char *name;
|
|
+ u32 leaf;
|
|
+ u8 reg;
|
|
+ u32 mask;
|
|
+};
|
|
+
|
|
+struct pt_address_range {
|
|
+ long unsigned int msr_a;
|
|
+ long unsigned int msr_b;
|
|
+ unsigned int reg_off;
|
|
+};
|
|
+
|
|
+typedef void (*exitcall_t)();
|
|
+
|
|
+struct x86_cpu_id {
|
|
+ __u16 vendor;
|
|
+ __u16 family;
|
|
+ __u16 model;
|
|
+ __u16 feature;
|
|
+ kernel_ulong_t driver_data;
|
|
+ __u16 steppings;
|
|
+};
|
|
+
|
|
+enum hrtimer_mode {
|
|
+ HRTIMER_MODE_ABS = 0,
|
|
+ HRTIMER_MODE_REL = 1,
|
|
+ HRTIMER_MODE_PINNED = 2,
|
|
+ HRTIMER_MODE_SOFT = 4,
|
|
+ HRTIMER_MODE_ABS_PINNED = 2,
|
|
+ HRTIMER_MODE_REL_PINNED = 3,
|
|
+ HRTIMER_MODE_ABS_SOFT = 4,
|
|
+ HRTIMER_MODE_REL_SOFT = 5,
|
|
+ HRTIMER_MODE_ABS_PINNED_SOFT = 6,
|
|
+ HRTIMER_MODE_REL_PINNED_SOFT = 7,
|
|
+};
|
|
+
|
|
+struct zhaoxin_uncore_pmu;
|
|
+
|
|
+struct zhaoxin_uncore_ops;
|
|
+
|
|
+struct uncore_event_desc;
|
|
+
|
|
+struct zhaoxin_uncore_type {
|
|
+ const char *name;
|
|
+ int num_counters;
|
|
+ int num_boxes;
|
|
+ int perf_ctr_bits;
|
|
+ int fixed_ctr_bits;
|
|
+ unsigned int perf_ctr;
|
|
+ unsigned int event_ctl;
|
|
+ unsigned int event_mask;
|
|
+ unsigned int event_mask_ext;
|
|
+ unsigned int fixed_ctr;
|
|
+ unsigned int fixed_ctl;
|
|
+ unsigned int box_ctl;
|
|
+ unsigned int msr_offset;
|
|
+ unsigned int num_shared_regs: 8;
|
|
+ unsigned int single_fixed: 1;
|
|
+ unsigned int pair_ctr_ctl: 1;
|
|
+ unsigned int *msr_offsets;
|
|
+ struct event_constraint unconstrainted;
|
|
+ struct event_constraint *constraints;
|
|
+ struct zhaoxin_uncore_pmu *pmus;
|
|
+ struct zhaoxin_uncore_ops *ops;
|
|
+ struct uncore_event_desc *event_descs;
|
|
+ const struct attribute_group *attr_groups[4];
|
|
+ struct pmu *pmu;
|
|
+};
|
|
+
|
|
+struct zhaoxin_uncore_box;
|
|
+
|
|
+struct zhaoxin_uncore_pmu {
|
|
+ struct pmu pmu;
|
|
+ char name[32];
|
|
+ int pmu_idx;
|
|
+ int func_id;
|
|
+ bool registered;
|
|
+ atomic_t activeboxes;
|
|
+ struct zhaoxin_uncore_type *type;
|
|
+ struct zhaoxin_uncore_box **boxes;
|
|
+};
|
|
+
|
|
+struct zhaoxin_uncore_ops {
|
|
+ void (*init_box)(struct zhaoxin_uncore_box *);
|
|
+ void (*exit_box)(struct zhaoxin_uncore_box *);
|
|
+ void (*disable_box)(struct zhaoxin_uncore_box *);
|
|
+ void (*enable_box)(struct zhaoxin_uncore_box *);
|
|
+ void (*disable_event)(struct zhaoxin_uncore_box *, struct perf_event *);
|
|
+ void (*enable_event)(struct zhaoxin_uncore_box *, struct perf_event *);
|
|
+ u64 (*read_counter)(struct zhaoxin_uncore_box *, struct perf_event *);
|
|
+ int (*hw_config)(struct zhaoxin_uncore_box *, struct perf_event *);
|
|
+ struct event_constraint * (*get_constraint)(struct zhaoxin_uncore_box *, struct perf_event *);
|
|
+ void (*put_constraint)(struct zhaoxin_uncore_box *, struct perf_event *);
|
|
+};
|
|
+
|
|
+struct uncore_event_desc {
|
|
+ struct kobj_attribute attr;
|
|
+ const char *config;
|
|
+};
|
|
+
|
|
+struct zhaoxin_uncore_extra_reg {
|
|
+ raw_spinlock_t lock;
|
|
+ u64 config;
|
|
+ u64 config1;
|
|
+ u64 config2;
|
|
+ atomic_t ref;
|
|
+};
|
|
+
|
|
+struct zhaoxin_uncore_box {
|
|
+ int pci_phys_id;
|
|
+ int package_id;
|
|
+ int n_active;
|
|
+ int n_events;
|
|
+ int cpu;
|
|
+ long unsigned int flags;
|
|
+ atomic_t refcnt;
|
|
+ struct perf_event *events[5];
|
|
+ struct perf_event *event_list[5];
|
|
+ struct event_constraint *event_constraint[5];
|
|
+ long unsigned int active_mask[1];
|
|
+ u64 tags[5];
|
|
+ struct pci_dev *pci_dev;
|
|
+ struct zhaoxin_uncore_pmu *pmu;
|
|
+ u64 hrtimer_duration;
|
|
+ struct hrtimer hrtimer;
|
|
+ struct list_head list;
|
|
+ struct list_head active_list;
|
|
+ void *io_addr;
|
|
+ struct zhaoxin_uncore_extra_reg shared_regs[0];
|
|
+};
|
|
+
|
|
+struct zhaoxin_uncore_init_fun {
|
|
+ void (*cpu_init)();
|
|
+};
|
|
+
|
|
+typedef s8 int8_t;
|
|
+
|
|
+typedef long unsigned int xen_pfn_t;
|
|
+
|
|
+typedef long unsigned int xen_ulong_t;
|
|
+
|
|
+struct arch_shared_info {
|
|
+ long unsigned int max_pfn;
|
|
+ xen_pfn_t pfn_to_mfn_frame_list_list;
|
|
+ long unsigned int nmi_reason;
|
|
+ long unsigned int p2m_cr3;
|
|
+ long unsigned int p2m_vaddr;
|
|
+ long unsigned int p2m_generation;
|
|
+};
|
|
+
|
|
+struct arch_vcpu_info {
|
|
+ long unsigned int cr2;
|
|
+ long unsigned int pad;
|
|
+};
|
|
+
|
|
+struct pvclock_wall_clock {
|
|
+ u32 version;
|
|
+ u32 sec;
|
|
+ u32 nsec;
|
|
+};
|
|
+
|
|
+struct vcpu_info {
|
|
+ uint8_t evtchn_upcall_pending;
|
|
+ uint8_t evtchn_upcall_mask;
|
|
+ xen_ulong_t evtchn_pending_sel;
|
|
+ struct arch_vcpu_info arch;
|
|
+ struct pvclock_vcpu_time_info time;
|
|
+};
|
|
+
|
|
+struct shared_info {
|
|
+ struct vcpu_info vcpu_info[32];
|
|
+ xen_ulong_t evtchn_pending[64];
|
|
+ xen_ulong_t evtchn_mask[64];
|
|
+ struct pvclock_wall_clock wc;
|
|
+ struct arch_shared_info arch;
|
|
+};
|
|
+
|
|
+struct start_info {
|
|
+ char magic[32];
|
|
+ long unsigned int nr_pages;
|
|
+ long unsigned int shared_info;
|
|
+ uint32_t flags;
|
|
+ xen_pfn_t store_mfn;
|
|
+ uint32_t store_evtchn;
|
|
+ union {
|
|
+ struct {
|
|
+ xen_pfn_t mfn;
|
|
+ uint32_t evtchn;
|
|
+ } domU;
|
|
+ struct {
|
|
+ uint32_t info_off;
|
|
+ uint32_t info_size;
|
|
+ } dom0;
|
|
+ } console;
|
|
+ long unsigned int pt_base;
|
|
+ long unsigned int nr_pt_frames;
|
|
+ long unsigned int mfn_list;
|
|
+ long unsigned int mod_start;
|
|
+ long unsigned int mod_len;
|
|
+ int8_t cmd_line[1024];
|
|
+ long unsigned int first_p2m_pfn;
|
|
+ long unsigned int nr_p2m_frames;
|
|
+};
|
|
+
|
|
+struct xen_memory_region {
|
|
+ long unsigned int start_pfn;
|
|
+ long unsigned int n_pfns;
|
|
+};
|
|
+
|
|
+struct sched_shutdown {
|
|
+ unsigned int reason;
|
|
+};
|
|
+
|
|
+struct sched_pin_override {
|
|
+ int32_t pcpu;
|
|
+};
|
|
+
|
|
+struct vcpu_register_vcpu_info {
|
|
+ uint64_t mfn;
|
|
+ uint32_t offset;
|
|
+ uint32_t rsvd;
|
|
+};
|
|
+
|
|
+enum xen_mc_flush_reason {
|
|
+ XEN_MC_FL_NONE = 0,
|
|
+ XEN_MC_FL_BATCH = 1,
|
|
+ XEN_MC_FL_ARGS = 2,
|
|
+ XEN_MC_FL_CALLBACK = 3,
|
|
+};
|
|
+
|
|
+enum xen_mc_extend_args {
|
|
+ XEN_MC_XE_OK = 0,
|
|
+ XEN_MC_XE_BAD_OP = 1,
|
|
+ XEN_MC_XE_NO_SPACE = 2,
|
|
+};
|
|
+
|
|
+typedef void (*xen_mc_callback_fn_t)(void *);
|
|
+
|
|
+typedef long int xen_long_t;
|
|
+
|
|
+struct multicall_entry {
|
|
+ xen_ulong_t op;
|
|
+ xen_long_t result;
|
|
+ xen_ulong_t args[6];
|
|
+};
|
|
+
|
|
+struct multicall_space {
|
|
+ struct multicall_entry *mc;
|
|
+ void *args;
|
|
+};
|
|
+
|
|
+struct callback {
|
|
+ void (*fn)(void *);
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct mc_buffer {
|
|
+ unsigned int mcidx;
|
|
+ unsigned int argidx;
|
|
+ unsigned int cbidx;
|
|
+ struct multicall_entry entries[32];
|
|
+ unsigned char args[512];
|
|
+ struct callback callbacks[32];
|
|
+};
|
|
+
|
|
+enum paravirt_lazy_mode {
|
|
+ PARAVIRT_LAZY_NONE = 0,
|
|
+ PARAVIRT_LAZY_MMU = 1,
|
|
+ PARAVIRT_LAZY_CPU = 2,
|
|
+};
|
|
+
|
|
+struct mmuext_op {
|
|
+ unsigned int cmd;
|
|
+ union {
|
|
+ xen_pfn_t mfn;
|
|
+ long unsigned int linear_addr;
|
|
+ } arg1;
|
|
+ union {
|
|
+ unsigned int nr_ents;
|
|
+ void *vcpumask;
|
|
+ xen_pfn_t src_mfn;
|
|
+ } arg2;
|
|
+};
|
|
+
|
|
+typedef uint16_t domid_t;
|
|
+
|
|
+struct mmu_update {
|
|
+ uint64_t ptr;
|
|
+ uint64_t val;
|
|
+};
|
|
+
|
|
+struct xmaddr {
|
|
+ phys_addr_t maddr;
|
|
+};
|
|
+
|
|
+typedef struct xmaddr xmaddr_t;
|
|
+
|
|
+struct xpaddr {
|
|
+ phys_addr_t paddr;
|
|
+};
|
|
+
|
|
+typedef struct xpaddr xpaddr_t;
|
|
+
|
|
+struct remap_data {
|
|
+ xen_pfn_t *pfn;
|
|
+ bool contiguous;
|
|
+ bool no_translate;
|
|
+ pgprot_t prot;
|
|
+ struct mmu_update *mmu_update;
|
|
+};
|
|
+
|
|
+typedef long unsigned int irq_hw_number_t;
|
|
+
|
|
+enum irqreturn {
|
|
+ IRQ_NONE = 0,
|
|
+ IRQ_HANDLED = 1,
|
|
+ IRQ_WAKE_THREAD = 2,
|
|
+};
|
|
+
|
|
+typedef enum irqreturn irqreturn_t;
|
|
+
|
|
+typedef irqreturn_t (*irq_handler_t)(int, void *);
|
|
+
|
|
+struct irqaction {
|
|
+ irq_handler_t handler;
|
|
+ void *dev_id;
|
|
+ void *percpu_dev_id;
|
|
+ struct irqaction *next;
|
|
+ irq_handler_t thread_fn;
|
|
+ struct task_struct *thread;
|
|
+ struct irqaction *secondary;
|
|
+ unsigned int irq;
|
|
+ unsigned int flags;
|
|
+ long unsigned int thread_flags;
|
|
+ long unsigned int thread_mask;
|
|
+ const char *name;
|
|
+ struct proc_dir_entry *dir;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct irq_affinity_notify {
|
|
+ unsigned int irq;
|
|
+ struct kref kref;
|
|
+ struct work_struct work;
|
|
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *);
|
|
+ void (*release)(struct kref *);
|
|
+};
|
|
+
|
|
+enum irqchip_irq_state {
|
|
+ IRQCHIP_STATE_PENDING = 0,
|
|
+ IRQCHIP_STATE_ACTIVE = 1,
|
|
+ IRQCHIP_STATE_MASKED = 2,
|
|
+ IRQCHIP_STATE_LINE_LEVEL = 3,
|
|
+};
|
|
+
|
|
+struct irq_desc;
|
|
+
|
|
+typedef void (*irq_flow_handler_t)(struct irq_desc *);
|
|
+
|
|
+struct msi_desc;
|
|
+
|
|
+struct irq_common_data {
|
|
+ unsigned int state_use_accessors;
|
|
+ unsigned int node;
|
|
+ void *handler_data;
|
|
+ struct msi_desc *msi_desc;
|
|
+ cpumask_var_t affinity;
|
|
+ cpumask_var_t effective_affinity;
|
|
+};
|
|
+
|
|
+struct irq_chip;
|
|
+
|
|
+struct irq_data {
|
|
+ u32 mask;
|
|
+ unsigned int irq;
|
|
+ long unsigned int hwirq;
|
|
+ struct irq_common_data *common;
|
|
+ struct irq_chip *chip;
|
|
+ struct irq_domain *domain;
|
|
+ struct irq_data *parent_data;
|
|
+ void *chip_data;
|
|
+};
|
|
+
|
|
+struct irq_desc {
|
|
+ struct irq_common_data irq_common_data;
|
|
+ struct irq_data irq_data;
|
|
+ unsigned int *kstat_irqs;
|
|
+ irq_flow_handler_t handle_irq;
|
|
+ struct irqaction *action;
|
|
+ unsigned int status_use_accessors;
|
|
+ unsigned int core_internal_state__do_not_mess_with_it;
|
|
+ unsigned int depth;
|
|
+ unsigned int wake_depth;
|
|
+ unsigned int tot_count;
|
|
+ unsigned int irq_count;
|
|
+ long unsigned int last_unhandled;
|
|
+ unsigned int irqs_unhandled;
|
|
+ atomic_t threads_handled;
|
|
+ int threads_handled_last;
|
|
+ raw_spinlock_t lock;
|
|
+ struct cpumask *percpu_enabled;
|
|
+ const struct cpumask *percpu_affinity;
|
|
+ const struct cpumask *affinity_hint;
|
|
+ struct irq_affinity_notify *affinity_notify;
|
|
+ cpumask_var_t pending_mask;
|
|
+ long unsigned int threads_oneshot;
|
|
+ atomic_t threads_active;
|
|
+ wait_queue_head_t wait_for_threads;
|
|
+ unsigned int nr_actions;
|
|
+ unsigned int no_suspend_depth;
|
|
+ unsigned int cond_suspend_depth;
|
|
+ unsigned int force_resume_depth;
|
|
+ struct proc_dir_entry *dir;
|
|
+ struct callback_head rcu;
|
|
+ struct kobject kobj;
|
|
+ struct mutex request_mutex;
|
|
+ int parent_irq;
|
|
+ struct module *owner;
|
|
+ const char *name;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct msi_msg {
|
|
+ u32 address_lo;
|
|
+ u32 address_hi;
|
|
+ u32 data;
|
|
+};
|
|
+
|
|
+struct platform_msi_priv_data;
|
|
+
|
|
+struct platform_msi_desc {
|
|
+ struct platform_msi_priv_data *msi_priv_data;
|
|
+ u16 msi_index;
|
|
+};
|
|
+
|
|
+struct fsl_mc_msi_desc {
|
|
+ u16 msi_index;
|
|
+};
|
|
+
|
|
+struct msi_desc {
|
|
+ struct list_head list;
|
|
+ unsigned int irq;
|
|
+ unsigned int nvec_used;
|
|
+ struct device *dev;
|
|
+ struct msi_msg msg;
|
|
+ struct cpumask *affinity;
|
|
+ union {
|
|
+ struct {
|
|
+ u32 masked;
|
|
+ struct {
|
|
+ __u8 is_msix: 1;
|
|
+ __u8 multiple: 3;
|
|
+ __u8 multi_cap: 3;
|
|
+ __u8 maskbit: 1;
|
|
+ __u8 is_64: 1;
|
|
+ __u16 entry_nr;
|
|
+ unsigned int default_irq;
|
|
+ } msi_attrib;
|
|
+ union {
|
|
+ u8 mask_pos;
|
|
+ void *mask_base;
|
|
+ };
|
|
+ };
|
|
+ struct platform_msi_desc platform;
|
|
+ struct fsl_mc_msi_desc fsl_mc;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct irq_chip {
|
|
+ struct device *parent_device;
|
|
+ const char *name;
|
|
+ unsigned int (*irq_startup)(struct irq_data *);
|
|
+ void (*irq_shutdown)(struct irq_data *);
|
|
+ void (*irq_enable)(struct irq_data *);
|
|
+ void (*irq_disable)(struct irq_data *);
|
|
+ void (*irq_ack)(struct irq_data *);
|
|
+ void (*irq_mask)(struct irq_data *);
|
|
+ void (*irq_mask_ack)(struct irq_data *);
|
|
+ void (*irq_unmask)(struct irq_data *);
|
|
+ void (*irq_eoi)(struct irq_data *);
|
|
+ int (*irq_set_affinity)(struct irq_data *, const struct cpumask *, bool);
|
|
+ int (*irq_retrigger)(struct irq_data *);
|
|
+ int (*irq_set_type)(struct irq_data *, unsigned int);
|
|
+ int (*irq_set_wake)(struct irq_data *, unsigned int);
|
|
+ void (*irq_bus_lock)(struct irq_data *);
|
|
+ void (*irq_bus_sync_unlock)(struct irq_data *);
|
|
+ void (*irq_cpu_online)(struct irq_data *);
|
|
+ void (*irq_cpu_offline)(struct irq_data *);
|
|
+ void (*irq_suspend)(struct irq_data *);
|
|
+ void (*irq_resume)(struct irq_data *);
|
|
+ void (*irq_pm_shutdown)(struct irq_data *);
|
|
+ void (*irq_calc_mask)(struct irq_data *);
|
|
+ void (*irq_print_chip)(struct irq_data *, struct seq_file *);
|
|
+ int (*irq_request_resources)(struct irq_data *);
|
|
+ void (*irq_release_resources)(struct irq_data *);
|
|
+ void (*irq_compose_msi_msg)(struct irq_data *, struct msi_msg *);
|
|
+ void (*irq_write_msi_msg)(struct irq_data *, struct msi_msg *);
|
|
+ int (*irq_get_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool *);
|
|
+ int (*irq_set_irqchip_state)(struct irq_data *, enum irqchip_irq_state, bool);
|
|
+ int (*irq_set_vcpu_affinity)(struct irq_data *, void *);
|
|
+ void (*ipi_send_single)(struct irq_data *, unsigned int);
|
|
+ void (*ipi_send_mask)(struct irq_data *, const struct cpumask *);
|
|
+ int (*irq_nmi_setup)(struct irq_data *);
|
|
+ void (*irq_nmi_teardown)(struct irq_data *);
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+enum irq_domain_bus_token {
|
|
+ DOMAIN_BUS_ANY = 0,
|
|
+ DOMAIN_BUS_WIRED = 1,
|
|
+ DOMAIN_BUS_PCI_MSI = 2,
|
|
+ DOMAIN_BUS_PLATFORM_MSI = 3,
|
|
+ DOMAIN_BUS_NEXUS = 4,
|
|
+ DOMAIN_BUS_IPI = 5,
|
|
+ DOMAIN_BUS_FSL_MC_MSI = 6,
|
|
+};
|
|
+
|
|
+struct irq_domain_ops;
|
|
+
|
|
+struct irq_domain_chip_generic;
|
|
+
|
|
+struct irq_domain {
|
|
+ struct list_head link;
|
|
+ const char *name;
|
|
+ const struct irq_domain_ops *ops;
|
|
+ void *host_data;
|
|
+ unsigned int flags;
|
|
+ unsigned int mapcount;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ enum irq_domain_bus_token bus_token;
|
|
+ struct irq_domain_chip_generic *gc;
|
|
+ struct irq_domain *parent;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ irq_hw_number_t hwirq_max;
|
|
+ unsigned int revmap_direct_max_irq;
|
|
+ unsigned int revmap_size;
|
|
+ struct radix_tree_root revmap_tree;
|
|
+ struct mutex revmap_tree_mutex;
|
|
+ unsigned int linear_revmap[0];
|
|
+};
|
|
+
|
|
+typedef struct irq_desc *vector_irq_t[256];
|
|
+
|
|
+struct irq_chip_regs {
|
|
+ long unsigned int enable;
|
|
+ long unsigned int disable;
|
|
+ long unsigned int mask;
|
|
+ long unsigned int ack;
|
|
+ long unsigned int eoi;
|
|
+ long unsigned int type;
|
|
+ long unsigned int polarity;
|
|
+};
|
|
+
|
|
+struct irq_chip_type {
|
|
+ struct irq_chip chip;
|
|
+ struct irq_chip_regs regs;
|
|
+ irq_flow_handler_t handler;
|
|
+ u32 type;
|
|
+ u32 mask_cache_priv;
|
|
+ u32 *mask_cache;
|
|
+};
|
|
+
|
|
+struct irq_chip_generic {
|
|
+ raw_spinlock_t lock;
|
|
+ void *reg_base;
|
|
+ u32 (*reg_readl)(void *);
|
|
+ void (*reg_writel)(u32, void *);
|
|
+ void (*suspend)(struct irq_chip_generic *);
|
|
+ void (*resume)(struct irq_chip_generic *);
|
|
+ unsigned int irq_base;
|
|
+ unsigned int irq_cnt;
|
|
+ u32 mask_cache;
|
|
+ u32 type_cache;
|
|
+ u32 polarity_cache;
|
|
+ u32 wake_enabled;
|
|
+ u32 wake_active;
|
|
+ unsigned int num_ct;
|
|
+ void *private;
|
|
+ long unsigned int installed;
|
|
+ long unsigned int unused;
|
|
+ struct irq_domain *domain;
|
|
+ struct list_head list;
|
|
+ struct irq_chip_type chip_types[0];
|
|
+};
|
|
+
|
|
+enum irq_gc_flags {
|
|
+ IRQ_GC_INIT_MASK_CACHE = 1,
|
|
+ IRQ_GC_INIT_NESTED_LOCK = 2,
|
|
+ IRQ_GC_MASK_CACHE_PER_TYPE = 4,
|
|
+ IRQ_GC_NO_MASK = 8,
|
|
+ IRQ_GC_BE_IO = 16,
|
|
+};
|
|
+
|
|
+struct irq_domain_chip_generic {
|
|
+ unsigned int irqs_per_chip;
|
|
+ unsigned int num_chips;
|
|
+ unsigned int irq_flags_to_clear;
|
|
+ unsigned int irq_flags_to_set;
|
|
+ enum irq_gc_flags gc_flags;
|
|
+ struct irq_chip_generic *gc[0];
|
|
+};
|
|
+
|
|
+struct irq_fwspec {
|
|
+ struct fwnode_handle *fwnode;
|
|
+ int param_count;
|
|
+ u32 param[16];
|
|
+};
|
|
+
|
|
+struct irq_domain_ops {
|
|
+ int (*match)(struct irq_domain *, struct device_node *, enum irq_domain_bus_token);
|
|
+ int (*select)(struct irq_domain *, struct irq_fwspec *, enum irq_domain_bus_token);
|
|
+ int (*map)(struct irq_domain *, unsigned int, irq_hw_number_t);
|
|
+ void (*unmap)(struct irq_domain *, unsigned int);
|
|
+ int (*xlate)(struct irq_domain *, struct device_node *, const u32 *, unsigned int, long unsigned int *, unsigned int *);
|
|
+ int (*alloc)(struct irq_domain *, unsigned int, unsigned int, void *);
|
|
+ void (*free)(struct irq_domain *, unsigned int, unsigned int);
|
|
+ int (*activate)(struct irq_domain *, struct irq_data *, bool);
|
|
+ void (*deactivate)(struct irq_domain *, struct irq_data *);
|
|
+ int (*translate)(struct irq_domain *, struct irq_fwspec *, long unsigned int *, unsigned int *);
|
|
+};
|
|
+
|
|
+enum clock_event_state {
|
|
+ CLOCK_EVT_STATE_DETACHED = 0,
|
|
+ CLOCK_EVT_STATE_SHUTDOWN = 1,
|
|
+ CLOCK_EVT_STATE_PERIODIC = 2,
|
|
+ CLOCK_EVT_STATE_ONESHOT = 3,
|
|
+ CLOCK_EVT_STATE_ONESHOT_STOPPED = 4,
|
|
+};
|
|
+
|
|
+struct clock_event_device {
|
|
+ void (*event_handler)(struct clock_event_device *);
|
|
+ int (*set_next_event)(long unsigned int, struct clock_event_device *);
|
|
+ int (*set_next_ktime)(ktime_t, struct clock_event_device *);
|
|
+ ktime_t next_event;
|
|
+ u64 max_delta_ns;
|
|
+ u64 min_delta_ns;
|
|
+ u32 mult;
|
|
+ u32 shift;
|
|
+ enum clock_event_state state_use_accessors;
|
|
+ unsigned int features;
|
|
+ long unsigned int retries;
|
|
+ int (*set_state_periodic)(struct clock_event_device *);
|
|
+ int (*set_state_oneshot)(struct clock_event_device *);
|
|
+ int (*set_state_oneshot_stopped)(struct clock_event_device *);
|
|
+ int (*set_state_shutdown)(struct clock_event_device *);
|
|
+ int (*tick_resume)(struct clock_event_device *);
|
|
+ void (*broadcast)(const struct cpumask *);
|
|
+ void (*suspend)(struct clock_event_device *);
|
|
+ void (*resume)(struct clock_event_device *);
|
|
+ long unsigned int min_delta_ticks;
|
|
+ long unsigned int max_delta_ticks;
|
|
+ const char *name;
|
|
+ int rating;
|
|
+ int irq;
|
|
+ int bound_on;
|
|
+ const struct cpumask *cpumask;
|
|
+ struct list_head list;
|
|
+ struct module *owner;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct vcpu_time_info {
|
|
+ uint32_t version;
|
|
+ uint32_t pad0;
|
|
+ uint64_t tsc_timestamp;
|
|
+ uint64_t system_time;
|
|
+ uint32_t tsc_to_system_mul;
|
|
+ int8_t tsc_shift;
|
|
+ int8_t pad1[3];
|
|
+};
|
|
+
|
|
+struct vcpu_set_singleshot_timer {
|
|
+ uint64_t timeout_abs_ns;
|
|
+ uint32_t flags;
|
|
+};
|
|
+
|
|
+typedef struct vcpu_time_info *__guest_handle_vcpu_time_info;
|
|
+
|
|
+struct vcpu_register_time_memory_area {
|
|
+ union {
|
|
+ __guest_handle_vcpu_time_info h;
|
|
+ struct pvclock_vcpu_time_info *v;
|
|
+ uint64_t p;
|
|
+ } addr;
|
|
+};
|
|
+
|
|
+struct xen_clock_event_device {
|
|
+ struct clock_event_device evt;
|
|
+ char name[16];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+typedef uint16_t grant_status_t;
|
|
+
|
|
+struct grant_frames {
|
|
+ xen_pfn_t *pfn;
|
|
+ unsigned int count;
|
|
+ void *vaddr;
|
|
+};
|
|
+
|
|
+struct gnttab_vm_area {
|
|
+ struct vm_struct *area;
|
|
+ pte_t **ptes;
|
|
+};
|
|
+
|
|
+typedef u64 pto_T_____3;
|
|
+
|
|
+struct vcpu_info___2;
|
|
+
|
|
+struct start_info___2;
|
|
+
|
|
+struct shared_info___2;
|
|
+
|
|
+struct xen_add_to_physmap {
|
|
+ domid_t domid;
|
|
+ uint16_t size;
|
|
+ unsigned int space;
|
|
+ xen_ulong_t idx;
|
|
+ xen_pfn_t gpfn;
|
|
+};
|
|
+
|
|
+struct machine_ops {
|
|
+ void (*restart)(char *);
|
|
+ void (*halt)();
|
|
+ void (*power_off)();
|
|
+ void (*shutdown)();
|
|
+ void (*crash_shutdown)(struct pt_regs *);
|
|
+ void (*emergency_restart)();
|
|
+};
|
|
+
|
|
+enum x86_hypervisor_type {
|
|
+ X86_HYPER_NATIVE = 0,
|
|
+ X86_HYPER_VMWARE = 1,
|
|
+ X86_HYPER_MS_HYPERV = 2,
|
|
+ X86_HYPER_XEN_PV = 3,
|
|
+ X86_HYPER_XEN_HVM = 4,
|
|
+ X86_HYPER_KVM = 5,
|
|
+ X86_HYPER_JAILHOUSE = 6,
|
|
+};
|
|
+
|
|
+struct hypervisor_x86 {
|
|
+ const char *name;
|
|
+ uint32_t (*detect)();
|
|
+ enum x86_hypervisor_type type;
|
|
+ struct x86_hyper_init init;
|
|
+ struct x86_hyper_runtime runtime;
|
|
+};
|
|
+
|
|
+typedef uint32_t pto_T_____4;
|
|
+
|
|
+struct xen_hvm_pagetable_dying {
|
|
+ domid_t domid;
|
|
+ __u64 gpa;
|
|
+};
|
|
+
|
|
+enum hvmmem_type_t {
|
|
+ HVMMEM_ram_rw = 0,
|
|
+ HVMMEM_ram_ro = 1,
|
|
+ HVMMEM_mmio_dm = 2,
|
|
+};
|
|
+
|
|
+struct xen_hvm_get_mem_type {
|
|
+ domid_t domid;
|
|
+ uint16_t mem_type;
|
|
+ uint16_t pad[2];
|
|
+ uint64_t pfn;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mc__batch {
|
|
+ struct trace_entry ent;
|
|
+ enum paravirt_lazy_mode mode;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mc_entry {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int op;
|
|
+ unsigned int nargs;
|
|
+ long unsigned int args[6];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mc_entry_alloc {
|
|
+ struct trace_entry ent;
|
|
+ size_t args;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mc_callback {
|
|
+ struct trace_entry ent;
|
|
+ xen_mc_callback_fn_t fn;
|
|
+ void *data;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mc_flush_reason {
|
|
+ struct trace_entry ent;
|
|
+ enum xen_mc_flush_reason reason;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mc_flush {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int mcidx;
|
|
+ unsigned int argidx;
|
|
+ unsigned int cbidx;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mc_extend_args {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int op;
|
|
+ size_t args;
|
|
+ enum xen_mc_extend_args res;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu__set_pte {
|
|
+ struct trace_entry ent;
|
|
+ pte_t *ptep;
|
|
+ pteval_t pteval;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_set_pte_at {
|
|
+ struct trace_entry ent;
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int addr;
|
|
+ pte_t *ptep;
|
|
+ pteval_t pteval;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_set_pmd {
|
|
+ struct trace_entry ent;
|
|
+ pmd_t *pmdp;
|
|
+ pmdval_t pmdval;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_set_pud {
|
|
+ struct trace_entry ent;
|
|
+ pud_t *pudp;
|
|
+ pudval_t pudval;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_set_p4d {
|
|
+ struct trace_entry ent;
|
|
+ p4d_t *p4dp;
|
|
+ p4d_t *user_p4dp;
|
|
+ p4dval_t p4dval;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_ptep_modify_prot {
|
|
+ struct trace_entry ent;
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int addr;
|
|
+ pte_t *ptep;
|
|
+ pteval_t pteval;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_alloc_ptpage {
|
|
+ struct trace_entry ent;
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int pfn;
|
|
+ unsigned int level;
|
|
+ bool pinned;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_release_ptpage {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ unsigned int level;
|
|
+ bool pinned;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_pgd {
|
|
+ struct trace_entry ent;
|
|
+ struct mm_struct *mm;
|
|
+ pgd_t *pgd;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_flush_tlb_one_user {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int addr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_flush_tlb_others {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int ncpus;
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int addr;
|
|
+ long unsigned int end;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_mmu_write_cr3 {
|
|
+ struct trace_entry ent;
|
|
+ bool kernel;
|
|
+ long unsigned int cr3;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_cpu_write_ldt_entry {
|
|
+ struct trace_entry ent;
|
|
+ struct desc_struct *dt;
|
|
+ int entrynum;
|
|
+ u64 desc;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_cpu_write_idt_entry {
|
|
+ struct trace_entry ent;
|
|
+ gate_desc *dt;
|
|
+ int entrynum;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_cpu_load_idt {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int addr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_cpu_write_gdt_entry {
|
|
+ struct trace_entry ent;
|
|
+ u64 desc;
|
|
+ struct desc_struct *dt;
|
|
+ int entrynum;
|
|
+ int type;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xen_cpu_set_ldt {
|
|
+ struct trace_entry ent;
|
|
+ const void *addr;
|
|
+ unsigned int entries;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mc__batch {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mc_entry {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mc_entry_alloc {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mc_callback {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mc_flush_reason {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mc_flush {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mc_extend_args {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu__set_pte {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_set_pte_at {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_set_pmd {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_set_pud {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_set_p4d {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_ptep_modify_prot {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_alloc_ptpage {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_release_ptpage {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_pgd {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_flush_tlb_one_user {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_flush_tlb_others {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_mmu_write_cr3 {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_cpu_write_ldt_entry {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_cpu_write_idt_entry {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_cpu_load_idt {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_cpu_write_gdt_entry {};
|
|
+
|
|
+struct trace_event_data_offsets_xen_cpu_set_ldt {};
|
|
+
|
|
+enum ipi_vector {
|
|
+ XEN_RESCHEDULE_VECTOR = 0,
|
|
+ XEN_CALL_FUNCTION_VECTOR = 1,
|
|
+ XEN_CALL_FUNCTION_SINGLE_VECTOR = 2,
|
|
+ XEN_SPIN_UNLOCK_VECTOR = 3,
|
|
+ XEN_IRQ_WORK_VECTOR = 4,
|
|
+ XEN_NMI_VECTOR = 5,
|
|
+ XEN_NR_IPIS = 6,
|
|
+};
|
|
+
|
|
+struct xen_common_irq {
|
|
+ int irq;
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+struct iommu_table_entry {
|
|
+ initcall_t detect;
|
|
+ initcall_t depend;
|
|
+ void (*early_init)();
|
|
+ void (*late_init)();
|
|
+ int flags;
|
|
+};
|
|
+
|
|
+typedef s16 int16_t;
|
|
+
|
|
+enum efi_secureboot_mode {
|
|
+ efi_secureboot_mode_unset = 0,
|
|
+ efi_secureboot_mode_unknown = 1,
|
|
+ efi_secureboot_mode_disabled = 2,
|
|
+ efi_secureboot_mode_enabled = 3,
|
|
+};
|
|
+
|
|
+typedef unsigned char *__guest_handle_uchar;
|
|
+
|
|
+typedef char *__guest_handle_char;
|
|
+
|
|
+typedef void *__guest_handle_void;
|
|
+
|
|
+typedef uint64_t *__guest_handle_uint64_t;
|
|
+
|
|
+typedef uint32_t *__guest_handle_uint32_t;
|
|
+
|
|
+struct xenpf_settime32 {
|
|
+ uint32_t secs;
|
|
+ uint32_t nsecs;
|
|
+ uint64_t system_time;
|
|
+};
|
|
+
|
|
+struct xenpf_settime64 {
|
|
+ uint64_t secs;
|
|
+ uint32_t nsecs;
|
|
+ uint32_t mbz;
|
|
+ uint64_t system_time;
|
|
+};
|
|
+
|
|
+struct xenpf_add_memtype {
|
|
+ xen_pfn_t mfn;
|
|
+ uint64_t nr_mfns;
|
|
+ uint32_t type;
|
|
+ uint32_t handle;
|
|
+ uint32_t reg;
|
|
+};
|
|
+
|
|
+struct xenpf_del_memtype {
|
|
+ uint32_t handle;
|
|
+ uint32_t reg;
|
|
+};
|
|
+
|
|
+struct xenpf_read_memtype {
|
|
+ uint32_t reg;
|
|
+ xen_pfn_t mfn;
|
|
+ uint64_t nr_mfns;
|
|
+ uint32_t type;
|
|
+};
|
|
+
|
|
+struct xenpf_microcode_update {
|
|
+ __guest_handle_void data;
|
|
+ uint32_t length;
|
|
+};
|
|
+
|
|
+struct xenpf_platform_quirk {
|
|
+ uint32_t quirk_id;
|
|
+};
|
|
+
|
|
+struct xenpf_efi_time {
|
|
+ uint16_t year;
|
|
+ uint8_t month;
|
|
+ uint8_t day;
|
|
+ uint8_t hour;
|
|
+ uint8_t min;
|
|
+ uint8_t sec;
|
|
+ uint32_t ns;
|
|
+ int16_t tz;
|
|
+ uint8_t daylight;
|
|
+};
|
|
+
|
|
+struct xenpf_efi_guid {
|
|
+ uint32_t data1;
|
|
+ uint16_t data2;
|
|
+ uint16_t data3;
|
|
+ uint8_t data4[8];
|
|
+};
|
|
+
|
|
+struct xenpf_efi_runtime_call {
|
|
+ uint32_t function;
|
|
+ uint32_t misc;
|
|
+ xen_ulong_t status;
|
|
+ union {
|
|
+ struct {
|
|
+ struct xenpf_efi_time time;
|
|
+ uint32_t resolution;
|
|
+ uint32_t accuracy;
|
|
+ } get_time;
|
|
+ struct xenpf_efi_time set_time;
|
|
+ struct xenpf_efi_time get_wakeup_time;
|
|
+ struct xenpf_efi_time set_wakeup_time;
|
|
+ struct {
|
|
+ __guest_handle_void name;
|
|
+ xen_ulong_t size;
|
|
+ __guest_handle_void data;
|
|
+ struct xenpf_efi_guid vendor_guid;
|
|
+ } get_variable;
|
|
+ struct {
|
|
+ __guest_handle_void name;
|
|
+ xen_ulong_t size;
|
|
+ __guest_handle_void data;
|
|
+ struct xenpf_efi_guid vendor_guid;
|
|
+ } set_variable;
|
|
+ struct {
|
|
+ xen_ulong_t size;
|
|
+ __guest_handle_void name;
|
|
+ struct xenpf_efi_guid vendor_guid;
|
|
+ } get_next_variable_name;
|
|
+ struct {
|
|
+ uint32_t attr;
|
|
+ uint64_t max_store_size;
|
|
+ uint64_t remain_store_size;
|
|
+ uint64_t max_size;
|
|
+ } query_variable_info;
|
|
+ struct {
|
|
+ __guest_handle_void capsule_header_array;
|
|
+ xen_ulong_t capsule_count;
|
|
+ uint64_t max_capsule_size;
|
|
+ uint32_t reset_type;
|
|
+ } query_capsule_capabilities;
|
|
+ struct {
|
|
+ __guest_handle_void capsule_header_array;
|
|
+ xen_ulong_t capsule_count;
|
|
+ uint64_t sg_list;
|
|
+ } update_capsule;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+union xenpf_efi_info {
|
|
+ uint32_t version;
|
|
+ struct {
|
|
+ uint64_t addr;
|
|
+ uint32_t nent;
|
|
+ } cfg;
|
|
+ struct {
|
|
+ uint32_t revision;
|
|
+ uint32_t bufsz;
|
|
+ __guest_handle_void name;
|
|
+ } vendor;
|
|
+ struct {
|
|
+ uint64_t addr;
|
|
+ uint64_t size;
|
|
+ uint64_t attr;
|
|
+ uint32_t type;
|
|
+ } mem;
|
|
+};
|
|
+
|
|
+struct xenpf_firmware_info {
|
|
+ uint32_t type;
|
|
+ uint32_t index;
|
|
+ union {
|
|
+ struct {
|
|
+ uint8_t device;
|
|
+ uint8_t version;
|
|
+ uint16_t interface_support;
|
|
+ uint16_t legacy_max_cylinder;
|
|
+ uint8_t legacy_max_head;
|
|
+ uint8_t legacy_sectors_per_track;
|
|
+ __guest_handle_void edd_params;
|
|
+ } disk_info;
|
|
+ struct {
|
|
+ uint8_t device;
|
|
+ uint32_t mbr_signature;
|
|
+ } disk_mbr_signature;
|
|
+ struct {
|
|
+ uint8_t capabilities;
|
|
+ uint8_t edid_transfer_time;
|
|
+ __guest_handle_uchar edid;
|
|
+ } vbeddc_info;
|
|
+ union xenpf_efi_info efi_info;
|
|
+ uint8_t kbd_shift_flags;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct xenpf_enter_acpi_sleep {
|
|
+ uint16_t val_a;
|
|
+ uint16_t val_b;
|
|
+ uint32_t sleep_state;
|
|
+ uint32_t flags;
|
|
+};
|
|
+
|
|
+struct xenpf_change_freq {
|
|
+ uint32_t flags;
|
|
+ uint32_t cpu;
|
|
+ uint64_t freq;
|
|
+};
|
|
+
|
|
+struct xenpf_getidletime {
|
|
+ __guest_handle_uchar cpumap_bitmap;
|
|
+ uint32_t cpumap_nr_cpus;
|
|
+ __guest_handle_uint64_t idletime;
|
|
+ uint64_t now;
|
|
+};
|
|
+
|
|
+struct xen_power_register {
|
|
+ uint32_t space_id;
|
|
+ uint32_t bit_width;
|
|
+ uint32_t bit_offset;
|
|
+ uint32_t access_size;
|
|
+ uint64_t address;
|
|
+};
|
|
+
|
|
+struct xen_processor_csd {
|
|
+ uint32_t domain;
|
|
+ uint32_t coord_type;
|
|
+ uint32_t num;
|
|
+};
|
|
+
|
|
+typedef struct xen_processor_csd *__guest_handle_xen_processor_csd;
|
|
+
|
|
+struct xen_processor_cx {
|
|
+ struct xen_power_register reg;
|
|
+ uint8_t type;
|
|
+ uint32_t latency;
|
|
+ uint32_t power;
|
|
+ uint32_t dpcnt;
|
|
+ __guest_handle_xen_processor_csd dp;
|
|
+};
|
|
+
|
|
+typedef struct xen_processor_cx *__guest_handle_xen_processor_cx;
|
|
+
|
|
+struct xen_processor_flags {
|
|
+ uint32_t bm_control: 1;
|
|
+ uint32_t bm_check: 1;
|
|
+ uint32_t has_cst: 1;
|
|
+ uint32_t power_setup_done: 1;
|
|
+ uint32_t bm_rld_set: 1;
|
|
+};
|
|
+
|
|
+struct xen_processor_power {
|
|
+ uint32_t count;
|
|
+ struct xen_processor_flags flags;
|
|
+ __guest_handle_xen_processor_cx states;
|
|
+};
|
|
+
|
|
+struct xen_pct_register {
|
|
+ uint8_t descriptor;
|
|
+ uint16_t length;
|
|
+ uint8_t space_id;
|
|
+ uint8_t bit_width;
|
|
+ uint8_t bit_offset;
|
|
+ uint8_t reserved;
|
|
+ uint64_t address;
|
|
+};
|
|
+
|
|
+struct xen_processor_px {
|
|
+ uint64_t core_frequency;
|
|
+ uint64_t power;
|
|
+ uint64_t transition_latency;
|
|
+ uint64_t bus_master_latency;
|
|
+ uint64_t control;
|
|
+ uint64_t status;
|
|
+};
|
|
+
|
|
+typedef struct xen_processor_px *__guest_handle_xen_processor_px;
|
|
+
|
|
+struct xen_psd_package {
|
|
+ uint64_t num_entries;
|
|
+ uint64_t revision;
|
|
+ uint64_t domain;
|
|
+ uint64_t coord_type;
|
|
+ uint64_t num_processors;
|
|
+};
|
|
+
|
|
+struct xen_processor_performance {
|
|
+ uint32_t flags;
|
|
+ uint32_t platform_limit;
|
|
+ struct xen_pct_register control_register;
|
|
+ struct xen_pct_register status_register;
|
|
+ uint32_t state_count;
|
|
+ __guest_handle_xen_processor_px states;
|
|
+ struct xen_psd_package domain_info;
|
|
+ uint32_t shared_type;
|
|
+};
|
|
+
|
|
+struct xenpf_set_processor_pminfo {
|
|
+ uint32_t id;
|
|
+ uint32_t type;
|
|
+ union {
|
|
+ struct xen_processor_power power;
|
|
+ struct xen_processor_performance perf;
|
|
+ __guest_handle_uint32_t pdc;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct xenpf_pcpuinfo {
|
|
+ uint32_t xen_cpuid;
|
|
+ uint32_t max_present;
|
|
+ uint32_t flags;
|
|
+ uint32_t apic_id;
|
|
+ uint32_t acpi_id;
|
|
+};
|
|
+
|
|
+struct xenpf_cpu_ol {
|
|
+ uint32_t cpuid;
|
|
+};
|
|
+
|
|
+struct xenpf_cpu_hotadd {
|
|
+ uint32_t apic_id;
|
|
+ uint32_t acpi_id;
|
|
+ uint32_t pxm;
|
|
+};
|
|
+
|
|
+struct xenpf_mem_hotadd {
|
|
+ uint64_t spfn;
|
|
+ uint64_t epfn;
|
|
+ uint32_t pxm;
|
|
+ uint32_t flags;
|
|
+};
|
|
+
|
|
+struct xenpf_core_parking {
|
|
+ uint32_t type;
|
|
+ uint32_t idle_nums;
|
|
+};
|
|
+
|
|
+struct xenpf_symdata {
|
|
+ uint32_t namelen;
|
|
+ uint32_t symnum;
|
|
+ __guest_handle_char name;
|
|
+ uint64_t address;
|
|
+ char type;
|
|
+};
|
|
+
|
|
+struct xen_platform_op {
|
|
+ uint32_t cmd;
|
|
+ uint32_t interface_version;
|
|
+ union {
|
|
+ struct xenpf_settime32 settime32;
|
|
+ struct xenpf_settime64 settime64;
|
|
+ struct xenpf_add_memtype add_memtype;
|
|
+ struct xenpf_del_memtype del_memtype;
|
|
+ struct xenpf_read_memtype read_memtype;
|
|
+ struct xenpf_microcode_update microcode;
|
|
+ struct xenpf_platform_quirk platform_quirk;
|
|
+ struct xenpf_efi_runtime_call efi_runtime_call;
|
|
+ struct xenpf_firmware_info firmware_info;
|
|
+ struct xenpf_enter_acpi_sleep enter_acpi_sleep;
|
|
+ struct xenpf_change_freq change_freq;
|
|
+ struct xenpf_getidletime getidletime;
|
|
+ struct xenpf_set_processor_pminfo set_pminfo;
|
|
+ struct xenpf_pcpuinfo pcpu_info;
|
|
+ struct xenpf_cpu_ol cpu_ol;
|
|
+ struct xenpf_cpu_hotadd cpu_add;
|
|
+ struct xenpf_mem_hotadd mem_add;
|
|
+ struct xenpf_core_parking core_parking;
|
|
+ struct xenpf_symdata symdata;
|
|
+ uint8_t pad[128];
|
|
+ } u;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ WORK_STRUCT_PENDING_BIT = 0,
|
|
+ WORK_STRUCT_DELAYED_BIT = 1,
|
|
+ WORK_STRUCT_PWQ_BIT = 2,
|
|
+ WORK_STRUCT_LINKED_BIT = 3,
|
|
+ WORK_STRUCT_COLOR_SHIFT = 4,
|
|
+ WORK_STRUCT_COLOR_BITS = 4,
|
|
+ WORK_STRUCT_PENDING = 1,
|
|
+ WORK_STRUCT_DELAYED = 2,
|
|
+ WORK_STRUCT_PWQ = 4,
|
|
+ WORK_STRUCT_LINKED = 8,
|
|
+ WORK_STRUCT_STATIC = 0,
|
|
+ WORK_NR_COLORS = 15,
|
|
+ WORK_NO_COLOR = 15,
|
|
+ WORK_CPU_UNBOUND = 8192,
|
|
+ WORK_STRUCT_FLAG_BITS = 8,
|
|
+ WORK_OFFQ_FLAG_BASE = 4,
|
|
+ __WORK_OFFQ_CANCELING = 4,
|
|
+ WORK_OFFQ_CANCELING = 16,
|
|
+ WORK_OFFQ_FLAG_BITS = 1,
|
|
+ WORK_OFFQ_POOL_SHIFT = 5,
|
|
+ WORK_OFFQ_LEFT = 59,
|
|
+ WORK_OFFQ_POOL_BITS = 31,
|
|
+ WORK_OFFQ_POOL_NONE = 2147483647,
|
|
+ WORK_STRUCT_FLAG_MASK = 255,
|
|
+ WORK_STRUCT_WQ_DATA_MASK = -256,
|
|
+ WORK_STRUCT_NO_POOL = -32,
|
|
+ WORK_BUSY_PENDING = 1,
|
|
+ WORK_BUSY_RUNNING = 2,
|
|
+ WORK_FLUSH_FROM_CANCEL = 1,
|
|
+ WORK_FLUSH_AT_NICE = 2,
|
|
+ WORKER_DESC_LEN = 24,
|
|
+};
|
|
+
|
|
+union hv_x64_msr_hypercall_contents {
|
|
+ u64 as_uint64;
|
|
+ struct {
|
|
+ u64 enable: 1;
|
|
+ u64 reserved: 11;
|
|
+ u64 guest_physical_address: 52;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct hv_reenlightenment_control {
|
|
+ __u64 vector: 8;
|
|
+ __u64 reserved1: 8;
|
|
+ __u64 enabled: 1;
|
|
+ __u64 reserved2: 15;
|
|
+ __u64 target_vp: 32;
|
|
+};
|
|
+
|
|
+struct hv_tsc_emulation_control {
|
|
+ __u64 enabled: 1;
|
|
+ __u64 reserved: 63;
|
|
+};
|
|
+
|
|
+struct hv_tsc_emulation_status {
|
|
+ __u64 inprogress: 1;
|
|
+ __u64 reserved: 63;
|
|
+};
|
|
+
|
|
+typedef u8 pto_T_____5;
|
|
+
|
|
+struct mmu_gather_batch {
|
|
+ struct mmu_gather_batch *next;
|
|
+ unsigned int nr;
|
|
+ unsigned int max;
|
|
+ struct page *pages[0];
|
|
+};
|
|
+
|
|
+struct mmu_table_batch;
|
|
+
|
|
+struct mmu_gather {
|
|
+ struct mm_struct *mm;
|
|
+ struct mmu_table_batch *batch;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+ unsigned int fullmm: 1;
|
|
+ unsigned int need_flush_all: 1;
|
|
+ unsigned int freed_tables: 1;
|
|
+ unsigned int cleared_ptes: 1;
|
|
+ unsigned int cleared_pmds: 1;
|
|
+ unsigned int cleared_puds: 1;
|
|
+ unsigned int cleared_p4ds: 1;
|
|
+ struct mmu_gather_batch *active;
|
|
+ struct mmu_gather_batch local;
|
|
+ struct page *__pages[8];
|
|
+ unsigned int batch_count;
|
|
+ int page_size;
|
|
+};
|
|
+
|
|
+struct clocksource___2;
|
|
+
|
|
+struct mmu_notifier_mm {
|
|
+ struct hlist_head list;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+enum HV_GENERIC_SET_FORMAT {
|
|
+ HV_GENERIC_SET_SPARSE_4K = 0,
|
|
+ HV_GENERIC_SET_ALL = 1,
|
|
+};
|
|
+
|
|
+struct hv_vpset {
|
|
+ u64 format;
|
|
+ u64 valid_bank_mask;
|
|
+ u64 bank_contents[0];
|
|
+};
|
|
+
|
|
+struct hv_tlb_flush {
|
|
+ u64 address_space;
|
|
+ u64 flags;
|
|
+ u64 processor_mask;
|
|
+ u64 gva_list[0];
|
|
+};
|
|
+
|
|
+struct hv_tlb_flush_ex {
|
|
+ u64 address_space;
|
|
+ u64 flags;
|
|
+ struct hv_vpset hv_vp_set;
|
|
+ u64 gva_list[0];
|
|
+};
|
|
+
|
|
+struct mmu_table_batch {
|
|
+ struct callback_head rcu;
|
|
+ unsigned int nr;
|
|
+ void *tables[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_hyperv_mmu_flush_tlb_others {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int ncpus;
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int addr;
|
|
+ long unsigned int end;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_hyperv_nested_flush_guest_mapping {
|
|
+ struct trace_entry ent;
|
|
+ u64 as;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_hyperv_send_ipi_mask {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int ncpus;
|
|
+ int vector;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_hyperv_mmu_flush_tlb_others {};
|
|
+
|
|
+struct trace_event_data_offsets_hyperv_nested_flush_guest_mapping {};
|
|
+
|
|
+struct trace_event_data_offsets_hyperv_send_ipi_mask {};
|
|
+
|
|
+struct hv_guest_mapping_flush {
|
|
+ u64 address_space;
|
|
+ u64 flags;
|
|
+};
|
|
+
|
|
+struct hv_send_ipi {
|
|
+ u32 vector;
|
|
+ u32 reserved;
|
|
+ u64 cpu_mask;
|
|
+};
|
|
+
|
|
+struct hv_send_ipi_ex {
|
|
+ u32 vector;
|
|
+ u32 reserved;
|
|
+ struct hv_vpset vp_set;
|
|
+};
|
|
+
|
|
+struct trampoline_header {
|
|
+ u64 start;
|
|
+ u64 efer;
|
|
+ u32 cr4;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ EI_ETYPE_NONE = 0,
|
|
+ EI_ETYPE_NULL = 1,
|
|
+ EI_ETYPE_ERRNO = 2,
|
|
+ EI_ETYPE_ERRNO_NULL = 3,
|
|
+};
|
|
+
|
|
+enum show_regs_mode {
|
|
+ SHOW_REGS_SHORT = 0,
|
|
+ SHOW_REGS_USER = 1,
|
|
+ SHOW_REGS_ALL = 2,
|
|
+};
|
|
+
|
|
+struct inactive_task_frame {
|
|
+ long unsigned int flags;
|
|
+ long unsigned int r15;
|
|
+ long unsigned int r14;
|
|
+ long unsigned int r13;
|
|
+ long unsigned int r12;
|
|
+ long unsigned int bx;
|
|
+ long unsigned int bp;
|
|
+ long unsigned int ret_addr;
|
|
+};
|
|
+
|
|
+struct fork_frame {
|
|
+ struct inactive_task_frame frame;
|
|
+ struct pt_regs regs;
|
|
+};
|
|
+
|
|
+struct syscall_metadata {
|
|
+ const char *name;
|
|
+ int syscall_nr;
|
|
+ int nb_args;
|
|
+ const char **types;
|
|
+ const char **args;
|
|
+ struct list_head enter_fields;
|
|
+ struct trace_event_call *enter_event;
|
|
+ struct trace_event_call *exit_event;
|
|
+};
|
|
+
|
|
+struct user_desc {
|
|
+ unsigned int entry_number;
|
|
+ unsigned int base_addr;
|
|
+ unsigned int limit;
|
|
+ unsigned int seg_32bit: 1;
|
|
+ unsigned int contents: 2;
|
|
+ unsigned int read_exec_only: 1;
|
|
+ unsigned int limit_in_pages: 1;
|
|
+ unsigned int seg_not_present: 1;
|
|
+ unsigned int useable: 1;
|
|
+ unsigned int lm: 1;
|
|
+};
|
|
+
|
|
+enum which_selector {
|
|
+ FS = 0,
|
|
+ GS = 1,
|
|
+};
|
|
+
|
|
+typedef struct task_struct *pto_T_____6;
|
|
+
|
|
+typedef struct fpu *pto_T_____7;
|
|
+
|
|
+struct sigcontext_64 {
|
|
+ __u64 r8;
|
|
+ __u64 r9;
|
|
+ __u64 r10;
|
|
+ __u64 r11;
|
|
+ __u64 r12;
|
|
+ __u64 r13;
|
|
+ __u64 r14;
|
|
+ __u64 r15;
|
|
+ __u64 di;
|
|
+ __u64 si;
|
|
+ __u64 bp;
|
|
+ __u64 bx;
|
|
+ __u64 dx;
|
|
+ __u64 ax;
|
|
+ __u64 cx;
|
|
+ __u64 sp;
|
|
+ __u64 ip;
|
|
+ __u64 flags;
|
|
+ __u16 cs;
|
|
+ __u16 gs;
|
|
+ __u16 fs;
|
|
+ __u16 ss;
|
|
+ __u64 err;
|
|
+ __u64 trapno;
|
|
+ __u64 oldmask;
|
|
+ __u64 cr2;
|
|
+ __u64 fpstate;
|
|
+ __u64 reserved1[8];
|
|
+};
|
|
+
|
|
+struct sigaltstack {
|
|
+ void *ss_sp;
|
|
+ int ss_flags;
|
|
+ size_t ss_size;
|
|
+};
|
|
+
|
|
+typedef struct sigaltstack stack_t;
|
|
+
|
|
+typedef u32 compat_sigset_word;
|
|
+
|
|
+typedef struct {
|
|
+ compat_sigset_word sig[2];
|
|
+} compat_sigset_t;
|
|
+
|
|
+struct ucontext {
|
|
+ long unsigned int uc_flags;
|
|
+ struct ucontext *uc_link;
|
|
+ stack_t uc_stack;
|
|
+ struct sigcontext_64 uc_mcontext;
|
|
+ sigset_t uc_sigmask;
|
|
+};
|
|
+
|
|
+struct mce {
|
|
+ __u64 status;
|
|
+ __u64 misc;
|
|
+ __u64 addr;
|
|
+ __u64 mcgstatus;
|
|
+ __u64 ip;
|
|
+ __u64 tsc;
|
|
+ __u64 time;
|
|
+ __u8 cpuvendor;
|
|
+ __u8 inject_flags;
|
|
+ __u8 severity;
|
|
+ __u8 pad;
|
|
+ __u32 cpuid;
|
|
+ __u8 cs;
|
|
+ __u8 bank;
|
|
+ __u8 cpu;
|
|
+ __u8 finished;
|
|
+ __u32 extcpu;
|
|
+ __u32 socketid;
|
|
+ __u32 apicid;
|
|
+ __u64 mcgcap;
|
|
+ __u64 synd;
|
|
+ __u64 ipid;
|
|
+ __u64 ppin;
|
|
+ __u32 microcode;
|
|
+};
|
|
+
|
|
+typedef long unsigned int mce_banks_t[1];
|
|
+
|
|
+struct smca_hwid {
|
|
+ unsigned int bank_type;
|
|
+ u32 hwid_mcatype;
|
|
+ u32 xec_bitmap;
|
|
+ u8 count;
|
|
+};
|
|
+
|
|
+struct smca_bank {
|
|
+ struct smca_hwid *hwid;
|
|
+ u32 id;
|
|
+ u8 sysfs_id;
|
|
+};
|
|
+
|
|
+struct kernel_vm86_regs {
|
|
+ struct pt_regs pt;
|
|
+ short unsigned int es;
|
|
+ short unsigned int __esh;
|
|
+ short unsigned int ds;
|
|
+ short unsigned int __dsh;
|
|
+ short unsigned int fs;
|
|
+ short unsigned int __fsh;
|
|
+ short unsigned int gs;
|
|
+ short unsigned int __gsh;
|
|
+};
|
|
+
|
|
+struct rt_sigframe {
|
|
+ char *pretcode;
|
|
+ struct ucontext uc;
|
|
+ struct siginfo info;
|
|
+};
|
|
+
|
|
+typedef s32 compat_clock_t;
|
|
+
|
|
+typedef s32 compat_pid_t;
|
|
+
|
|
+typedef u32 __compat_uid32_t;
|
|
+
|
|
+typedef s32 compat_timer_t;
|
|
+
|
|
+typedef s32 compat_int_t;
|
|
+
|
|
+union compat_sigval {
|
|
+ compat_int_t sival_int;
|
|
+ compat_uptr_t sival_ptr;
|
|
+};
|
|
+
|
|
+typedef union compat_sigval compat_sigval_t;
|
|
+
|
|
+struct compat_siginfo {
|
|
+ int si_signo;
|
|
+ int si_errno;
|
|
+ int si_code;
|
|
+ union {
|
|
+ int _pad[29];
|
|
+ struct {
|
|
+ compat_pid_t _pid;
|
|
+ __compat_uid32_t _uid;
|
|
+ } _kill;
|
|
+ struct {
|
|
+ compat_timer_t _tid;
|
|
+ int _overrun;
|
|
+ compat_sigval_t _sigval;
|
|
+ } _timer;
|
|
+ struct {
|
|
+ compat_pid_t _pid;
|
|
+ __compat_uid32_t _uid;
|
|
+ compat_sigval_t _sigval;
|
|
+ } _rt;
|
|
+ struct {
|
|
+ compat_pid_t _pid;
|
|
+ __compat_uid32_t _uid;
|
|
+ int _status;
|
|
+ compat_clock_t _utime;
|
|
+ compat_clock_t _stime;
|
|
+ } _sigchld;
|
|
+ struct {
|
|
+ compat_uptr_t _addr;
|
|
+ union {
|
|
+ short int _addr_lsb;
|
|
+ struct {
|
|
+ char _dummy_bnd[4];
|
|
+ compat_uptr_t _lower;
|
|
+ compat_uptr_t _upper;
|
|
+ } _addr_bnd;
|
|
+ struct {
|
|
+ char _dummy_pkey[4];
|
|
+ u32 _pkey;
|
|
+ } _addr_pkey;
|
|
+ };
|
|
+ } _sigfault;
|
|
+ struct {
|
|
+ compat_long_t _band;
|
|
+ int _fd;
|
|
+ } _sigpoll;
|
|
+ struct {
|
|
+ compat_uptr_t _call_addr;
|
|
+ int _syscall;
|
|
+ unsigned int _arch;
|
|
+ } _sigsys;
|
|
+ } _sifields;
|
|
+};
|
|
+
|
|
+typedef struct compat_siginfo compat_siginfo_t;
|
|
+
|
|
+enum bug_trap_type {
|
|
+ BUG_TRAP_TYPE_NONE = 0,
|
|
+ BUG_TRAP_TYPE_WARN = 1,
|
|
+ BUG_TRAP_TYPE_BUG = 2,
|
|
+};
|
|
+
|
|
+enum xfeature {
|
|
+ XFEATURE_FP = 0,
|
|
+ XFEATURE_SSE = 1,
|
|
+ XFEATURE_YMM = 2,
|
|
+ XFEATURE_BNDREGS = 3,
|
|
+ XFEATURE_BNDCSR = 4,
|
|
+ XFEATURE_OPMASK = 5,
|
|
+ XFEATURE_ZMM_Hi256 = 6,
|
|
+ XFEATURE_Hi16_ZMM = 7,
|
|
+ XFEATURE_PT_UNIMPLEMENTED_SO_FAR = 8,
|
|
+ XFEATURE_PKRU = 9,
|
|
+ XFEATURE_MAX = 10,
|
|
+};
|
|
+
|
|
+struct mpx_bndcsr {
|
|
+ u64 bndcfgu;
|
|
+ u64 bndstatus;
|
|
+};
|
|
+
|
|
+enum die_val {
|
|
+ DIE_OOPS = 1,
|
|
+ DIE_INT3 = 2,
|
|
+ DIE_DEBUG = 3,
|
|
+ DIE_PANIC = 4,
|
|
+ DIE_NMI = 5,
|
|
+ DIE_DIE = 6,
|
|
+ DIE_KERNELDEBUG = 7,
|
|
+ DIE_TRAP = 8,
|
|
+ DIE_GPF = 9,
|
|
+ DIE_CALL = 10,
|
|
+ DIE_PAGE_FAULT = 11,
|
|
+ DIE_NMIUNKNOWN = 12,
|
|
+};
|
|
+
|
|
+struct bad_iret_stack {
|
|
+ void *error_entry_ret;
|
|
+ struct pt_regs regs;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ GATE_INTERRUPT = 14,
|
|
+ GATE_TRAP = 15,
|
|
+ GATE_CALL = 12,
|
|
+ GATE_TASK = 5,
|
|
+};
|
|
+
|
|
+struct irq_desc___2;
|
|
+
|
|
+typedef struct irq_desc___2 *vector_irq_t___2[256];
|
|
+
|
|
+struct idt_data {
|
|
+ unsigned int vector;
|
|
+ unsigned int segment;
|
|
+ struct idt_bits bits;
|
|
+ const void *addr;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_x86_irq_vector {
|
|
+ struct trace_entry ent;
|
|
+ int vector;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_config {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ unsigned int vector;
|
|
+ unsigned int cpu;
|
|
+ unsigned int apicdest;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_mod {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ unsigned int vector;
|
|
+ unsigned int cpu;
|
|
+ unsigned int prev_vector;
|
|
+ unsigned int prev_cpu;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_reserve {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_alloc {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ unsigned int vector;
|
|
+ bool reserved;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_alloc_managed {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ unsigned int vector;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_activate {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ bool is_managed;
|
|
+ bool can_reserve;
|
|
+ bool reserve;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_teardown {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ bool is_managed;
|
|
+ bool has_reserved;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_setup {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ bool is_legacy;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_vector_free_moved {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int irq;
|
|
+ unsigned int cpu;
|
|
+ unsigned int vector;
|
|
+ bool is_managed;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_x86_irq_vector {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_config {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_mod {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_reserve {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_alloc {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_alloc_managed {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_activate {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_teardown {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_setup {};
|
|
+
|
|
+struct trace_event_data_offsets_vector_free_moved {};
|
|
+
|
|
+typedef struct irq_desc *pto_T_____8;
|
|
+
|
|
+typedef struct pt_regs *pto_T_____9;
|
|
+
|
|
+struct legacy_pic {
|
|
+ int nr_legacy_irqs;
|
|
+ struct irq_chip *chip;
|
|
+ void (*mask)(unsigned int);
|
|
+ void (*unmask)(unsigned int);
|
|
+ void (*mask_all)();
|
|
+ void (*restore_mask)();
|
|
+ void (*init)(int);
|
|
+ int (*probe)();
|
|
+ int (*irq_pending)(unsigned int);
|
|
+ void (*make_irq)(unsigned int);
|
|
+};
|
|
+
|
|
+struct ldttss_desc {
|
|
+ u16 limit0;
|
|
+ u16 base0;
|
|
+ u16 base1: 8;
|
|
+ u16 type: 5;
|
|
+ u16 dpl: 2;
|
|
+ u16 p: 1;
|
|
+ u16 limit1: 4;
|
|
+ u16 zero0: 3;
|
|
+ u16 g: 1;
|
|
+ u16 base2: 8;
|
|
+ u32 base3;
|
|
+ u32 zero1;
|
|
+};
|
|
+
|
|
+typedef struct ldttss_desc tss_desc;
|
|
+
|
|
+typedef bool pto_T_____10;
|
|
+
|
|
+enum lockdep_ok {
|
|
+ LOCKDEP_STILL_OK = 0,
|
|
+ LOCKDEP_NOW_UNRELIABLE = 1,
|
|
+};
|
|
+
|
|
+typedef long unsigned int uintptr_t;
|
|
+
|
|
+struct trace_event_raw_nmi_handler {
|
|
+ struct trace_entry ent;
|
|
+ void *handler;
|
|
+ s64 delta_ns;
|
|
+ int handled;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_nmi_handler {};
|
|
+
|
|
+struct nmi_desc {
|
|
+ raw_spinlock_t lock;
|
|
+ struct list_head head;
|
|
+};
|
|
+
|
|
+struct nmi_stats {
|
|
+ unsigned int normal;
|
|
+ unsigned int unknown;
|
|
+ unsigned int external;
|
|
+ unsigned int swallow;
|
|
+};
|
|
+
|
|
+enum nmi_states {
|
|
+ NMI_NOT_RUNNING = 0,
|
|
+ NMI_EXECUTING = 1,
|
|
+ NMI_LATCHED = 2,
|
|
+};
|
|
+
|
|
+typedef enum nmi_states pto_T_____11;
|
|
+
|
|
+struct setup_data {
|
|
+ __u64 next;
|
|
+ __u32 type;
|
|
+ __u32 len;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct acpi_table_ibft {
|
|
+ struct acpi_table_header header;
|
|
+ u8 reserved[12];
|
|
+};
|
|
+
|
|
+enum cpufreq_table_sorting {
|
|
+ CPUFREQ_TABLE_UNSORTED = 0,
|
|
+ CPUFREQ_TABLE_SORTED_ASCENDING = 1,
|
|
+ CPUFREQ_TABLE_SORTED_DESCENDING = 2,
|
|
+};
|
|
+
|
|
+struct cpufreq_cpuinfo {
|
|
+ unsigned int max_freq;
|
|
+ unsigned int min_freq;
|
|
+ unsigned int transition_latency;
|
|
+};
|
|
+
|
|
+struct cpufreq_user_policy {
|
|
+ unsigned int min;
|
|
+ unsigned int max;
|
|
+};
|
|
+
|
|
+struct clk;
|
|
+
|
|
+struct cpufreq_governor;
|
|
+
|
|
+struct cpufreq_frequency_table;
|
|
+
|
|
+struct cpufreq_stats;
|
|
+
|
|
+struct cpufreq_policy {
|
|
+ cpumask_var_t cpus;
|
|
+ cpumask_var_t related_cpus;
|
|
+ cpumask_var_t real_cpus;
|
|
+ unsigned int shared_type;
|
|
+ unsigned int cpu;
|
|
+ struct clk *clk;
|
|
+ struct cpufreq_cpuinfo cpuinfo;
|
|
+ unsigned int min;
|
|
+ unsigned int max;
|
|
+ unsigned int cur;
|
|
+ unsigned int restore_freq;
|
|
+ unsigned int suspend_freq;
|
|
+ unsigned int policy;
|
|
+ unsigned int last_policy;
|
|
+ struct cpufreq_governor *governor;
|
|
+ void *governor_data;
|
|
+ char last_governor[16];
|
|
+ struct work_struct update;
|
|
+ struct cpufreq_user_policy user_policy;
|
|
+ struct cpufreq_frequency_table *freq_table;
|
|
+ enum cpufreq_table_sorting freq_table_sorted;
|
|
+ struct list_head policy_list;
|
|
+ struct kobject kobj;
|
|
+ struct completion kobj_unregister;
|
|
+ struct rw_semaphore rwsem;
|
|
+ bool fast_switch_possible;
|
|
+ bool fast_switch_enabled;
|
|
+ unsigned int transition_delay_us;
|
|
+ bool dvfs_possible_from_any_cpu;
|
|
+ unsigned int cached_target_freq;
|
|
+ int cached_resolved_idx;
|
|
+ bool transition_ongoing;
|
|
+ spinlock_t transition_lock;
|
|
+ wait_queue_head_t transition_wait;
|
|
+ struct task_struct *transition_task;
|
|
+ struct cpufreq_stats *stats;
|
|
+ void *driver_data;
|
|
+};
|
|
+
|
|
+struct cpufreq_governor {
|
|
+ char name[16];
|
|
+ int (*init)(struct cpufreq_policy *);
|
|
+ void (*exit)(struct cpufreq_policy *);
|
|
+ int (*start)(struct cpufreq_policy *);
|
|
+ void (*stop)(struct cpufreq_policy *);
|
|
+ void (*limits)(struct cpufreq_policy *);
|
|
+ ssize_t (*show_setspeed)(struct cpufreq_policy *, char *);
|
|
+ int (*store_setspeed)(struct cpufreq_policy *, unsigned int);
|
|
+ bool dynamic_switching;
|
|
+ struct list_head governor_list;
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct cpufreq_frequency_table {
|
|
+ unsigned int flags;
|
|
+ unsigned int driver_data;
|
|
+ unsigned int frequency;
|
|
+};
|
|
+
|
|
+struct freq_attr {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct cpufreq_policy *, char *);
|
|
+ ssize_t (*store)(struct cpufreq_policy *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct tboot_mac_region {
|
|
+ u64 start;
|
|
+ u32 size;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tboot_acpi_generic_address {
|
|
+ u8 space_id;
|
|
+ u8 bit_width;
|
|
+ u8 bit_offset;
|
|
+ u8 access_width;
|
|
+ u64 address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tboot_acpi_sleep_info {
|
|
+ struct tboot_acpi_generic_address pm1a_cnt_blk;
|
|
+ struct tboot_acpi_generic_address pm1b_cnt_blk;
|
|
+ struct tboot_acpi_generic_address pm1a_evt_blk;
|
|
+ struct tboot_acpi_generic_address pm1b_evt_blk;
|
|
+ u16 pm1a_cnt_val;
|
|
+ u16 pm1b_cnt_val;
|
|
+ u64 wakeup_vector;
|
|
+ u32 vector_width;
|
|
+ u64 kernel_s3_resume_vector;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tboot {
|
|
+ u8 uuid[16];
|
|
+ u32 version;
|
|
+ u32 log_addr;
|
|
+ u32 shutdown_entry;
|
|
+ u32 shutdown_type;
|
|
+ struct tboot_acpi_sleep_info acpi_sinfo;
|
|
+ u32 tboot_base;
|
|
+ u32 tboot_size;
|
|
+ u8 num_mac_regions;
|
|
+ struct tboot_mac_region mac_regions[32];
|
|
+ u8 s3_key[64];
|
|
+ u8 reserved_align[3];
|
|
+ u32 num_in_wfs;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct efi_scratch {
|
|
+ u64 phys_stack;
|
|
+ struct mm_struct *prev_mm;
|
|
+};
|
|
+
|
|
+struct amd_nb_bus_dev_range {
|
|
+ u8 bus;
|
|
+ u8 dev_base;
|
|
+ u8 dev_limit;
|
|
+};
|
|
+
|
|
+struct msi_controller {
|
|
+ struct module *owner;
|
|
+ struct device *dev;
|
|
+ struct device_node *of_node;
|
|
+ struct list_head list;
|
|
+ int (*setup_irq)(struct msi_controller *, struct pci_dev *, struct msi_desc *);
|
|
+ int (*setup_irqs)(struct msi_controller *, struct pci_dev *, int, int);
|
|
+ void (*teardown_irq)(struct msi_controller *, unsigned int);
|
|
+};
|
|
+
|
|
+struct pci_raw_ops {
|
|
+ int (*read)(unsigned int, unsigned int, unsigned int, int, int, u32 *);
|
|
+ int (*write)(unsigned int, unsigned int, unsigned int, int, int, u32);
|
|
+};
|
|
+
|
|
+struct clock_event_device___2;
|
|
+
|
|
+enum jump_label_type {
|
|
+ JUMP_LABEL_NOP = 0,
|
|
+ JUMP_LABEL_JMP = 1,
|
|
+};
|
|
+
|
|
+union jump_code_union {
|
|
+ char code[5];
|
|
+ struct {
|
|
+ char jump;
|
|
+ int offset;
|
|
+ } __attribute__((packed));
|
|
+};
|
|
+
|
|
+enum {
|
|
+ JL_STATE_START = 0,
|
|
+ JL_STATE_NO_UPDATE = 1,
|
|
+ JL_STATE_UPDATE = 2,
|
|
+};
|
|
+
|
|
+struct vm_unmapped_area_info {
|
|
+ long unsigned int flags;
|
|
+ long unsigned int length;
|
|
+ long unsigned int low_limit;
|
|
+ long unsigned int high_limit;
|
|
+ long unsigned int align_mask;
|
|
+ long unsigned int align_offset;
|
|
+};
|
|
+
|
|
+enum align_flags {
|
|
+ ALIGN_VA_32 = 1,
|
|
+ ALIGN_VA_64 = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MEMREMAP_WB = 1,
|
|
+ MEMREMAP_WT = 2,
|
|
+ MEMREMAP_WC = 4,
|
|
+ MEMREMAP_ENC = 8,
|
|
+ MEMREMAP_DEC = 16,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IORES_DESC_NONE = 0,
|
|
+ IORES_DESC_CRASH_KERNEL = 1,
|
|
+ IORES_DESC_ACPI_TABLES = 2,
|
|
+ IORES_DESC_ACPI_NV_STORAGE = 3,
|
|
+ IORES_DESC_PERSISTENT_MEMORY = 4,
|
|
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
|
|
+ IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
|
|
+ IORES_DESC_DEVICE_PUBLIC_MEMORY = 7,
|
|
+ IORES_DESC_QUICK_KEXEC = 8,
|
|
+};
|
|
+
|
|
+struct change_member {
|
|
+ struct e820_entry *entry;
|
|
+ long long unsigned int addr;
|
|
+};
|
|
+
|
|
+enum dmi_field {
|
|
+ DMI_NONE = 0,
|
|
+ DMI_BIOS_VENDOR = 1,
|
|
+ DMI_BIOS_VERSION = 2,
|
|
+ DMI_BIOS_DATE = 3,
|
|
+ DMI_SYS_VENDOR = 4,
|
|
+ DMI_PRODUCT_NAME = 5,
|
|
+ DMI_PRODUCT_VERSION = 6,
|
|
+ DMI_PRODUCT_SERIAL = 7,
|
|
+ DMI_PRODUCT_UUID = 8,
|
|
+ DMI_PRODUCT_SKU = 9,
|
|
+ DMI_PRODUCT_FAMILY = 10,
|
|
+ DMI_BOARD_VENDOR = 11,
|
|
+ DMI_BOARD_NAME = 12,
|
|
+ DMI_BOARD_VERSION = 13,
|
|
+ DMI_BOARD_SERIAL = 14,
|
|
+ DMI_BOARD_ASSET_TAG = 15,
|
|
+ DMI_CHASSIS_VENDOR = 16,
|
|
+ DMI_CHASSIS_TYPE = 17,
|
|
+ DMI_CHASSIS_VERSION = 18,
|
|
+ DMI_CHASSIS_SERIAL = 19,
|
|
+ DMI_CHASSIS_ASSET_TAG = 20,
|
|
+ DMI_STRING_MAX = 21,
|
|
+ DMI_OEM_STRING = 22,
|
|
+};
|
|
+
|
|
+struct acpi_device;
|
|
+
|
|
+struct pci_sysdata {
|
|
+ int domain;
|
|
+ int node;
|
|
+ struct acpi_device *companion;
|
|
+ void *iommu;
|
|
+ void *fwnode;
|
|
+ bool vmd_domain;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NONE_FORCE_HPET_RESUME = 0,
|
|
+ OLD_ICH_FORCE_HPET_RESUME = 1,
|
|
+ ICH_FORCE_HPET_RESUME = 2,
|
|
+ VT8237_FORCE_HPET_RESUME = 3,
|
|
+ NVIDIA_FORCE_HPET_RESUME = 4,
|
|
+ ATI_FORCE_HPET_RESUME = 5,
|
|
+};
|
|
+
|
|
+enum meminit_context {
|
|
+ MEMINIT_EARLY = 0,
|
|
+ MEMINIT_HOTPLUG = 1,
|
|
+};
|
|
+
|
|
+struct cpu {
|
|
+ int node_id;
|
|
+ int hotpluggable;
|
|
+ struct device dev;
|
|
+};
|
|
+
|
|
+struct x86_cpu {
|
|
+ struct cpu cpu;
|
|
+};
|
|
+
|
|
+struct debugfs_blob_wrapper {
|
|
+ void *data;
|
|
+ long unsigned int size;
|
|
+};
|
|
+
|
|
+struct setup_data_node {
|
|
+ u64 paddr;
|
|
+ u32 type;
|
|
+ u32 len;
|
|
+};
|
|
+
|
|
+struct smp_alt_module {
|
|
+ struct module *mod;
|
|
+ char *name;
|
|
+ const s32 *locks;
|
|
+ const s32 *locks_end;
|
|
+ u8 *text;
|
|
+ u8 *text_end;
|
|
+ struct list_head next;
|
|
+};
|
|
+
|
|
+struct die_args {
|
|
+ struct pt_regs *regs;
|
|
+ const char *str;
|
|
+ long int err;
|
|
+ int trapnr;
|
|
+ int signr;
|
|
+};
|
|
+
|
|
+struct user_i387_struct {
|
|
+ short unsigned int cwd;
|
|
+ short unsigned int swd;
|
|
+ short unsigned int twd;
|
|
+ short unsigned int fop;
|
|
+ __u64 rip;
|
|
+ __u64 rdp;
|
|
+ __u32 mxcsr;
|
|
+ __u32 mxcsr_mask;
|
|
+ __u32 st_space[32];
|
|
+ __u32 xmm_space[64];
|
|
+ __u32 padding[24];
|
|
+};
|
|
+
|
|
+struct user_regs_struct {
|
|
+ long unsigned int r15;
|
|
+ long unsigned int r14;
|
|
+ long unsigned int r13;
|
|
+ long unsigned int r12;
|
|
+ long unsigned int bp;
|
|
+ long unsigned int bx;
|
|
+ long unsigned int r11;
|
|
+ long unsigned int r10;
|
|
+ long unsigned int r9;
|
|
+ long unsigned int r8;
|
|
+ long unsigned int ax;
|
|
+ long unsigned int cx;
|
|
+ long unsigned int dx;
|
|
+ long unsigned int si;
|
|
+ long unsigned int di;
|
|
+ long unsigned int orig_ax;
|
|
+ long unsigned int ip;
|
|
+ long unsigned int cs;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int sp;
|
|
+ long unsigned int ss;
|
|
+ long unsigned int fs_base;
|
|
+ long unsigned int gs_base;
|
|
+ long unsigned int ds;
|
|
+ long unsigned int es;
|
|
+ long unsigned int fs;
|
|
+ long unsigned int gs;
|
|
+};
|
|
+
|
|
+struct user {
|
|
+ struct user_regs_struct regs;
|
|
+ int u_fpvalid;
|
|
+ int pad0;
|
|
+ struct user_i387_struct i387;
|
|
+ long unsigned int u_tsize;
|
|
+ long unsigned int u_dsize;
|
|
+ long unsigned int u_ssize;
|
|
+ long unsigned int start_code;
|
|
+ long unsigned int start_stack;
|
|
+ long int signal;
|
|
+ int reserved;
|
|
+ int pad1;
|
|
+ long unsigned int u_ar0;
|
|
+ struct user_i387_struct *u_fpstate;
|
|
+ long unsigned int magic;
|
|
+ char u_comm[32];
|
|
+ long unsigned int u_debugreg[8];
|
|
+ long unsigned int error_code;
|
|
+ long unsigned int fault_address;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ HW_BREAKPOINT_LEN_1 = 1,
|
|
+ HW_BREAKPOINT_LEN_2 = 2,
|
|
+ HW_BREAKPOINT_LEN_3 = 3,
|
|
+ HW_BREAKPOINT_LEN_4 = 4,
|
|
+ HW_BREAKPOINT_LEN_5 = 5,
|
|
+ HW_BREAKPOINT_LEN_6 = 6,
|
|
+ HW_BREAKPOINT_LEN_7 = 7,
|
|
+ HW_BREAKPOINT_LEN_8 = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ HW_BREAKPOINT_EMPTY = 0,
|
|
+ HW_BREAKPOINT_R = 1,
|
|
+ HW_BREAKPOINT_W = 2,
|
|
+ HW_BREAKPOINT_RW = 3,
|
|
+ HW_BREAKPOINT_X = 4,
|
|
+ HW_BREAKPOINT_INVALID = 7,
|
|
+};
|
|
+
|
|
+typedef unsigned int u_int;
|
|
+
|
|
+typedef long long unsigned int cycles_t;
|
|
+
|
|
+struct system_counterval_t {
|
|
+ u64 cycles;
|
|
+ struct clocksource *cs;
|
|
+};
|
|
+
|
|
+struct cpufreq_freqs {
|
|
+ unsigned int cpu;
|
|
+ unsigned int old;
|
|
+ unsigned int new;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+struct cyc2ns {
|
|
+ struct cyc2ns_data data[2];
|
|
+ seqcount_t seq;
|
|
+};
|
|
+
|
|
+struct freq_desc {
|
|
+ u8 msr_plat;
|
|
+ u32 freqs[9];
|
|
+};
|
|
+
|
|
+struct dmi_strmatch {
|
|
+ unsigned char slot: 7;
|
|
+ unsigned char exact_match: 1;
|
|
+ char substr[79];
|
|
+};
|
|
+
|
|
+struct dmi_system_id {
|
|
+ int (*callback)(const struct dmi_system_id *);
|
|
+ const char *ident;
|
|
+ struct dmi_strmatch matches[4];
|
|
+ void *driver_data;
|
|
+};
|
|
+
|
|
+struct pdev_archdata {};
|
|
+
|
|
+struct platform_device_id;
|
|
+
|
|
+struct mfd_cell;
|
|
+
|
|
+struct platform_device {
|
|
+ const char *name;
|
|
+ int id;
|
|
+ bool id_auto;
|
|
+ struct device dev;
|
|
+ u32 num_resources;
|
|
+ struct resource *resource;
|
|
+ const struct platform_device_id *id_entry;
|
|
+ char *driver_override;
|
|
+ struct mfd_cell *mfd_cell;
|
|
+ struct pdev_archdata archdata;
|
|
+};
|
|
+
|
|
+struct platform_device_id {
|
|
+ char name[20];
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+struct rtc_time {
|
|
+ int tm_sec;
|
|
+ int tm_min;
|
|
+ int tm_hour;
|
|
+ int tm_mday;
|
|
+ int tm_mon;
|
|
+ int tm_year;
|
|
+ int tm_wday;
|
|
+ int tm_yday;
|
|
+ int tm_isdst;
|
|
+};
|
|
+
|
|
+struct pnp_device_id {
|
|
+ __u8 id[8];
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+struct pnp_card_device_id {
|
|
+ __u8 id[8];
|
|
+ kernel_ulong_t driver_data;
|
|
+ struct {
|
|
+ __u8 id[8];
|
|
+ } devs[8];
|
|
+};
|
|
+
|
|
+struct pnp_protocol;
|
|
+
|
|
+struct pnp_id;
|
|
+
|
|
+struct pnp_card {
|
|
+ struct device dev;
|
|
+ unsigned char number;
|
|
+ struct list_head global_list;
|
|
+ struct list_head protocol_list;
|
|
+ struct list_head devices;
|
|
+ struct pnp_protocol *protocol;
|
|
+ struct pnp_id *id;
|
|
+ char name[50];
|
|
+ unsigned char pnpver;
|
|
+ unsigned char productver;
|
|
+ unsigned int serial;
|
|
+ unsigned char checksum;
|
|
+ struct proc_dir_entry *procdir;
|
|
+};
|
|
+
|
|
+struct pnp_dev;
|
|
+
|
|
+struct pnp_protocol {
|
|
+ struct list_head protocol_list;
|
|
+ char *name;
|
|
+ int (*get)(struct pnp_dev *);
|
|
+ int (*set)(struct pnp_dev *);
|
|
+ int (*disable)(struct pnp_dev *);
|
|
+ bool (*can_wakeup)(struct pnp_dev *);
|
|
+ int (*suspend)(struct pnp_dev *, pm_message_t);
|
|
+ int (*resume)(struct pnp_dev *);
|
|
+ unsigned char number;
|
|
+ struct device dev;
|
|
+ struct list_head cards;
|
|
+ struct list_head devices;
|
|
+};
|
|
+
|
|
+struct pnp_id {
|
|
+ char id[8];
|
|
+ struct pnp_id *next;
|
|
+};
|
|
+
|
|
+struct pnp_card_driver;
|
|
+
|
|
+struct pnp_card_link {
|
|
+ struct pnp_card *card;
|
|
+ struct pnp_card_driver *driver;
|
|
+ void *driver_data;
|
|
+ pm_message_t pm_state;
|
|
+};
|
|
+
|
|
+struct pnp_driver {
|
|
+ char *name;
|
|
+ const struct pnp_device_id *id_table;
|
|
+ unsigned int flags;
|
|
+ int (*probe)(struct pnp_dev *, const struct pnp_device_id *);
|
|
+ void (*remove)(struct pnp_dev *);
|
|
+ void (*shutdown)(struct pnp_dev *);
|
|
+ int (*suspend)(struct pnp_dev *, pm_message_t);
|
|
+ int (*resume)(struct pnp_dev *);
|
|
+ struct device_driver driver;
|
|
+};
|
|
+
|
|
+struct pnp_card_driver {
|
|
+ struct list_head global_list;
|
|
+ char *name;
|
|
+ const struct pnp_card_device_id *id_table;
|
|
+ unsigned int flags;
|
|
+ int (*probe)(struct pnp_card_link *, const struct pnp_card_device_id *);
|
|
+ void (*remove)(struct pnp_card_link *);
|
|
+ int (*suspend)(struct pnp_card_link *, pm_message_t);
|
|
+ int (*resume)(struct pnp_card_link *);
|
|
+ struct pnp_driver link;
|
|
+};
|
|
+
|
|
+struct pnp_dev {
|
|
+ struct device dev;
|
|
+ u64 dma_mask;
|
|
+ unsigned int number;
|
|
+ int status;
|
|
+ struct list_head global_list;
|
|
+ struct list_head protocol_list;
|
|
+ struct list_head card_list;
|
|
+ struct list_head rdev_list;
|
|
+ struct pnp_protocol *protocol;
|
|
+ struct pnp_card *card;
|
|
+ struct pnp_driver *driver;
|
|
+ struct pnp_card_link *card_link;
|
|
+ struct pnp_id *id;
|
|
+ int active;
|
|
+ int capabilities;
|
|
+ unsigned int num_dependent_sets;
|
|
+ struct list_head resources;
|
|
+ struct list_head options;
|
|
+ char name[50];
|
|
+ int flags;
|
|
+ struct proc_dir_entry *procent;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct sfi_rtc_table_entry {
|
|
+ u64 phys_addr;
|
|
+ u32 irq;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum intel_mid_cpu_type {
|
|
+ INTEL_MID_CPU_CHIP_PENWELL = 2,
|
|
+ INTEL_MID_CPU_CHIP_CLOVERVIEW = 3,
|
|
+ INTEL_MID_CPU_CHIP_TANGIER = 4,
|
|
+};
|
|
+
|
|
+enum intel_mid_timer_options {
|
|
+ INTEL_MID_TIMER_DEFAULT = 0,
|
|
+ INTEL_MID_TIMER_APBT_ONLY = 1,
|
|
+ INTEL_MID_TIMER_LAPIC_APBT = 2,
|
|
+};
|
|
+
|
|
+enum idle_boot_override {
|
|
+ IDLE_NO_OVERRIDE = 0,
|
|
+ IDLE_HALT = 1,
|
|
+ IDLE_NOMWAIT = 2,
|
|
+ IDLE_POLL = 3,
|
|
+};
|
|
+
|
|
+struct plist_head {
|
|
+ struct list_head node_list;
|
|
+};
|
|
+
|
|
+enum pm_qos_type {
|
|
+ PM_QOS_UNITIALIZED = 0,
|
|
+ PM_QOS_MAX = 1,
|
|
+ PM_QOS_MIN = 2,
|
|
+ PM_QOS_SUM = 3,
|
|
+};
|
|
+
|
|
+struct pm_qos_constraints {
|
|
+ struct plist_head list;
|
|
+ s32 target_value;
|
|
+ s32 default_value;
|
|
+ s32 no_constraint_value;
|
|
+ enum pm_qos_type type;
|
|
+ struct blocking_notifier_head *notifiers;
|
|
+};
|
|
+
|
|
+struct pm_qos_flags {
|
|
+ struct list_head list;
|
|
+ s32 effective_flags;
|
|
+};
|
|
+
|
|
+struct dev_pm_qos_request;
|
|
+
|
|
+struct dev_pm_qos {
|
|
+ struct pm_qos_constraints resume_latency;
|
|
+ struct pm_qos_constraints latency_tolerance;
|
|
+ struct pm_qos_flags flags;
|
|
+ struct dev_pm_qos_request *resume_latency_req;
|
|
+ struct dev_pm_qos_request *latency_tolerance_req;
|
|
+ struct dev_pm_qos_request *flags_req;
|
|
+};
|
|
+
|
|
+enum tick_broadcast_mode {
|
|
+ TICK_BROADCAST_OFF = 0,
|
|
+ TICK_BROADCAST_ON = 1,
|
|
+ TICK_BROADCAST_FORCE = 2,
|
|
+};
|
|
+
|
|
+enum tick_broadcast_state {
|
|
+ TICK_BROADCAST_EXIT = 0,
|
|
+ TICK_BROADCAST_ENTER = 1,
|
|
+};
|
|
+
|
|
+struct cpuidle_state_usage {
|
|
+ long long unsigned int disable;
|
|
+ long long unsigned int usage;
|
|
+ long long unsigned int time;
|
|
+ long long unsigned int s2idle_usage;
|
|
+ long long unsigned int s2idle_time;
|
|
+};
|
|
+
|
|
+struct cpuidle_driver_kobj;
|
|
+
|
|
+struct cpuidle_state_kobj;
|
|
+
|
|
+struct cpuidle_device_kobj;
|
|
+
|
|
+struct cpuidle_device {
|
|
+ unsigned int registered: 1;
|
|
+ unsigned int enabled: 1;
|
|
+ unsigned int use_deepest_state: 1;
|
|
+ unsigned int poll_time_limit: 1;
|
|
+ unsigned int cpu;
|
|
+ int last_residency;
|
|
+ struct cpuidle_state_usage states_usage[10];
|
|
+ struct cpuidle_state_kobj *kobjs[10];
|
|
+ struct cpuidle_driver_kobj *kobj_driver;
|
|
+ struct cpuidle_device_kobj *kobj_dev;
|
|
+ struct list_head device_list;
|
|
+};
|
|
+
|
|
+struct pm_qos_flags_request {
|
|
+ struct list_head node;
|
|
+ s32 flags;
|
|
+};
|
|
+
|
|
+enum dev_pm_qos_req_type {
|
|
+ DEV_PM_QOS_RESUME_LATENCY = 1,
|
|
+ DEV_PM_QOS_LATENCY_TOLERANCE = 2,
|
|
+ DEV_PM_QOS_FLAGS = 3,
|
|
+};
|
|
+
|
|
+struct dev_pm_qos_request {
|
|
+ enum dev_pm_qos_req_type type;
|
|
+ union {
|
|
+ struct plist_node pnode;
|
|
+ struct pm_qos_flags_request flr;
|
|
+ } data;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct ssb_state {
|
|
+ struct ssb_state *shared_state;
|
|
+ raw_spinlock_t lock;
|
|
+ unsigned int disable_state;
|
|
+ long unsigned int local_state;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_x86_fpu {
|
|
+ struct trace_entry ent;
|
|
+ struct fpu *fpu;
|
|
+ bool initialized;
|
|
+ u64 xfeatures;
|
|
+ u64 xcomp_bv;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_x86_fpu {};
|
|
+
|
|
+struct _fpreg {
|
|
+ __u16 significand[4];
|
|
+ __u16 exponent;
|
|
+};
|
|
+
|
|
+struct _fpxreg {
|
|
+ __u16 significand[4];
|
|
+ __u16 exponent;
|
|
+ __u16 padding[3];
|
|
+};
|
|
+
|
|
+struct user_i387_ia32_struct {
|
|
+ u32 cwd;
|
|
+ u32 swd;
|
|
+ u32 twd;
|
|
+ u32 fip;
|
|
+ u32 fcs;
|
|
+ u32 foo;
|
|
+ u32 fos;
|
|
+ u32 st_space[20];
|
|
+};
|
|
+
|
|
+struct user_regset;
|
|
+
|
|
+typedef int user_regset_active_fn(struct task_struct *, const struct user_regset *);
|
|
+
|
|
+typedef int user_regset_get_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, void *, void *);
|
|
+
|
|
+typedef int user_regset_set_fn(struct task_struct *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *);
|
|
+
|
|
+typedef int user_regset_writeback_fn(struct task_struct *, const struct user_regset *, int);
|
|
+
|
|
+typedef unsigned int user_regset_get_size_fn(struct task_struct *, const struct user_regset *);
|
|
+
|
|
+struct user_regset {
|
|
+ user_regset_get_fn *get;
|
|
+ user_regset_set_fn *set;
|
|
+ user_regset_active_fn *active;
|
|
+ user_regset_writeback_fn *writeback;
|
|
+ user_regset_get_size_fn *get_size;
|
|
+ unsigned int n;
|
|
+ unsigned int size;
|
|
+ unsigned int align;
|
|
+ unsigned int bias;
|
|
+ unsigned int core_note_type;
|
|
+};
|
|
+
|
|
+struct _fpx_sw_bytes {
|
|
+ __u32 magic1;
|
|
+ __u32 extended_size;
|
|
+ __u64 xfeatures;
|
|
+ __u32 xstate_size;
|
|
+ __u32 padding[7];
|
|
+};
|
|
+
|
|
+struct _xmmreg {
|
|
+ __u32 element[4];
|
|
+};
|
|
+
|
|
+struct _fpstate_32 {
|
|
+ __u32 cw;
|
|
+ __u32 sw;
|
|
+ __u32 tag;
|
|
+ __u32 ipoff;
|
|
+ __u32 cssel;
|
|
+ __u32 dataoff;
|
|
+ __u32 datasel;
|
|
+ struct _fpreg _st[8];
|
|
+ __u16 status;
|
|
+ __u16 magic;
|
|
+ __u32 _fxsr_env[6];
|
|
+ __u32 mxcsr;
|
|
+ __u32 reserved;
|
|
+ struct _fpxreg _fxsr_st[8];
|
|
+ struct _xmmreg _xmm[8];
|
|
+ union {
|
|
+ __u32 padding1[44];
|
|
+ __u32 padding[44];
|
|
+ };
|
|
+ union {
|
|
+ __u32 padding2[12];
|
|
+ struct _fpx_sw_bytes sw_reserved;
|
|
+ };
|
|
+};
|
|
+
|
|
+typedef u32 compat_ulong_t;
|
|
+
|
|
+struct user_regset_view {
|
|
+ const char *name;
|
|
+ const struct user_regset *regsets;
|
|
+ unsigned int n;
|
|
+ u32 e_flags;
|
|
+ u16 e_machine;
|
|
+ u8 ei_osabi;
|
|
+};
|
|
+
|
|
+enum x86_regset {
|
|
+ REGSET_GENERAL = 0,
|
|
+ REGSET_FP = 1,
|
|
+ REGSET_XFP = 2,
|
|
+ REGSET_IOPERM64 = 2,
|
|
+ REGSET_XSTATE = 3,
|
|
+ REGSET_TLS = 4,
|
|
+ REGSET_IOPERM32 = 5,
|
|
+};
|
|
+
|
|
+struct pt_regs_offset {
|
|
+ const char *name;
|
|
+ int offset;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TB_SHUTDOWN_REBOOT = 0,
|
|
+ TB_SHUTDOWN_S5 = 1,
|
|
+ TB_SHUTDOWN_S4 = 2,
|
|
+ TB_SHUTDOWN_S3 = 3,
|
|
+ TB_SHUTDOWN_HALT = 4,
|
|
+ TB_SHUTDOWN_WFS = 5,
|
|
+};
|
|
+
|
|
+struct wakeup_header {
|
|
+ u16 video_mode;
|
|
+ u32 pmode_entry;
|
|
+ u16 pmode_cs;
|
|
+ u32 pmode_cr0;
|
|
+ u32 pmode_cr3;
|
|
+ u32 pmode_cr4;
|
|
+ u32 pmode_efer_low;
|
|
+ u32 pmode_efer_high;
|
|
+ u64 pmode_gdt;
|
|
+ u32 pmode_misc_en_low;
|
|
+ u32 pmode_misc_en_high;
|
|
+ u32 pmode_behavior;
|
|
+ u32 realmode_flags;
|
|
+ u32 real_magic;
|
|
+ u32 signature;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct sha1_hash {
|
|
+ u8 hash[20];
|
|
+};
|
|
+
|
|
+struct sinit_mle_data {
|
|
+ u32 version;
|
|
+ struct sha1_hash bios_acm_id;
|
|
+ u32 edx_senter_flags;
|
|
+ u64 mseg_valid;
|
|
+ struct sha1_hash sinit_hash;
|
|
+ struct sha1_hash mle_hash;
|
|
+ struct sha1_hash stm_hash;
|
|
+ struct sha1_hash lcp_policy_hash;
|
|
+ u32 lcp_policy_control;
|
|
+ u32 rlp_wakeup_addr;
|
|
+ u32 reserved;
|
|
+ u32 num_mdrs;
|
|
+ u32 mdrs_off;
|
|
+ u32 num_vtd_dmars;
|
|
+ u32 vtd_dmars_off;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct stack_frame_user {
|
|
+ const void *next_fp;
|
|
+ long unsigned int ret_addr;
|
|
+};
|
|
+
|
|
+enum cache_type {
|
|
+ CACHE_TYPE_NOCACHE = 0,
|
|
+ CACHE_TYPE_INST = 1,
|
|
+ CACHE_TYPE_DATA = 2,
|
|
+ CACHE_TYPE_SEPARATE = 3,
|
|
+ CACHE_TYPE_UNIFIED = 4,
|
|
+};
|
|
+
|
|
+struct cacheinfo {
|
|
+ unsigned int id;
|
|
+ enum cache_type type;
|
|
+ unsigned int level;
|
|
+ unsigned int coherency_line_size;
|
|
+ unsigned int number_of_sets;
|
|
+ unsigned int ways_of_associativity;
|
|
+ unsigned int physical_line_partition;
|
|
+ unsigned int size;
|
|
+ cpumask_t shared_cpu_map;
|
|
+ unsigned int attributes;
|
|
+ void *fw_token;
|
|
+ bool disable_sysfs;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct cpu_cacheinfo {
|
|
+ struct cacheinfo *info_list;
|
|
+ unsigned int num_levels;
|
|
+ unsigned int num_leaves;
|
|
+ bool cpu_map_populated;
|
|
+};
|
|
+
|
|
+struct amd_l3_cache {
|
|
+ unsigned int indices;
|
|
+ u8 subcaches[4];
|
|
+};
|
|
+
|
|
+struct threshold_block {
|
|
+ unsigned int block;
|
|
+ unsigned int bank;
|
|
+ unsigned int cpu;
|
|
+ u32 address;
|
|
+ u16 interrupt_enable;
|
|
+ bool interrupt_capable;
|
|
+ u16 threshold_limit;
|
|
+ struct kobject kobj;
|
|
+ struct list_head miscj;
|
|
+};
|
|
+
|
|
+struct threshold_bank {
|
|
+ struct kobject *kobj;
|
|
+ struct threshold_block *blocks;
|
|
+ refcount_t cpus;
|
|
+};
|
|
+
|
|
+struct amd_northbridge {
|
|
+ struct pci_dev *root;
|
|
+ struct pci_dev *misc;
|
|
+ struct pci_dev *link;
|
|
+ struct amd_l3_cache l3_cache;
|
|
+ struct threshold_bank *bank4;
|
|
+};
|
|
+
|
|
+struct cpu_dev {
|
|
+ const char *c_vendor;
|
|
+ const char *c_ident[2];
|
|
+ void (*c_early_init)(struct cpuinfo_x86 *);
|
|
+ void (*c_bsp_init)(struct cpuinfo_x86 *);
|
|
+ void (*c_init)(struct cpuinfo_x86 *);
|
|
+ void (*c_identify)(struct cpuinfo_x86 *);
|
|
+ void (*c_detect_tlb)(struct cpuinfo_x86 *);
|
|
+ void (*c_bsp_resume)(struct cpuinfo_x86 *);
|
|
+ int c_x86_vendor;
|
|
+};
|
|
+
|
|
+enum tsx_ctrl_states {
|
|
+ TSX_CTRL_ENABLE = 0,
|
|
+ TSX_CTRL_DISABLE = 1,
|
|
+ TSX_CTRL_NOT_SUPPORTED = 2,
|
|
+};
|
|
+
|
|
+struct _cache_table {
|
|
+ unsigned char descriptor;
|
|
+ char cache_type;
|
|
+ short int size;
|
|
+};
|
|
+
|
|
+enum _cache_type {
|
|
+ CTYPE_NULL = 0,
|
|
+ CTYPE_DATA = 1,
|
|
+ CTYPE_INST = 2,
|
|
+ CTYPE_UNIFIED = 3,
|
|
+};
|
|
+
|
|
+union _cpuid4_leaf_eax {
|
|
+ struct {
|
|
+ enum _cache_type type: 5;
|
|
+ unsigned int level: 3;
|
|
+ unsigned int is_self_initializing: 1;
|
|
+ unsigned int is_fully_associative: 1;
|
|
+ unsigned int reserved: 4;
|
|
+ unsigned int num_threads_sharing: 12;
|
|
+ unsigned int num_cores_on_die: 6;
|
|
+ } split;
|
|
+ u32 full;
|
|
+};
|
|
+
|
|
+union _cpuid4_leaf_ebx {
|
|
+ struct {
|
|
+ unsigned int coherency_line_size: 12;
|
|
+ unsigned int physical_line_partition: 10;
|
|
+ unsigned int ways_of_associativity: 10;
|
|
+ } split;
|
|
+ u32 full;
|
|
+};
|
|
+
|
|
+union _cpuid4_leaf_ecx {
|
|
+ struct {
|
|
+ unsigned int number_of_sets: 32;
|
|
+ } split;
|
|
+ u32 full;
|
|
+};
|
|
+
|
|
+struct _cpuid4_info_regs {
|
|
+ union _cpuid4_leaf_eax eax;
|
|
+ union _cpuid4_leaf_ebx ebx;
|
|
+ union _cpuid4_leaf_ecx ecx;
|
|
+ unsigned int id;
|
|
+ long unsigned int size;
|
|
+ struct amd_northbridge *nb;
|
|
+};
|
|
+
|
|
+union l1_cache {
|
|
+ struct {
|
|
+ unsigned int line_size: 8;
|
|
+ unsigned int lines_per_tag: 8;
|
|
+ unsigned int assoc: 8;
|
|
+ unsigned int size_in_kb: 8;
|
|
+ };
|
|
+ unsigned int val;
|
|
+};
|
|
+
|
|
+union l2_cache {
|
|
+ struct {
|
|
+ unsigned int line_size: 8;
|
|
+ unsigned int lines_per_tag: 4;
|
|
+ unsigned int assoc: 4;
|
|
+ unsigned int size_in_kb: 16;
|
|
+ };
|
|
+ unsigned int val;
|
|
+};
|
|
+
|
|
+union l3_cache {
|
|
+ struct {
|
|
+ unsigned int line_size: 8;
|
|
+ unsigned int lines_per_tag: 4;
|
|
+ unsigned int assoc: 4;
|
|
+ unsigned int res: 2;
|
|
+ unsigned int size_encoded: 14;
|
|
+ };
|
|
+ unsigned int val;
|
|
+};
|
|
+
|
|
+struct cpuid_bit {
|
|
+ u16 feature;
|
|
+ u8 reg;
|
|
+ u8 bit;
|
|
+ u32 level;
|
|
+ u32 sub_leaf;
|
|
+};
|
|
+
|
|
+enum cpuid_leafs {
|
|
+ CPUID_1_EDX = 0,
|
|
+ CPUID_8000_0001_EDX = 1,
|
|
+ CPUID_8086_0001_EDX = 2,
|
|
+ CPUID_LNX_1 = 3,
|
|
+ CPUID_1_ECX = 4,
|
|
+ CPUID_C000_0001_EDX = 5,
|
|
+ CPUID_8000_0001_ECX = 6,
|
|
+ CPUID_LNX_2 = 7,
|
|
+ CPUID_LNX_3 = 8,
|
|
+ CPUID_7_0_EBX = 9,
|
|
+ CPUID_D_1_EAX = 10,
|
|
+ CPUID_LNX_4 = 11,
|
|
+ CPUID_7_1_EAX = 12,
|
|
+ CPUID_8000_0008_EBX = 13,
|
|
+ CPUID_6_EAX = 14,
|
|
+ CPUID_8000_000A_EDX = 15,
|
|
+ CPUID_7_ECX = 16,
|
|
+ CPUID_8000_0007_EBX = 17,
|
|
+ CPUID_7_EDX = 18,
|
|
+};
|
|
+
|
|
+struct cpuid_dependent_feature {
|
|
+ u32 feature;
|
|
+ u32 level;
|
|
+};
|
|
+
|
|
+typedef u32 pao_T_____4;
|
|
+
|
|
+enum spectre_v2_mitigation {
|
|
+ SPECTRE_V2_NONE = 0,
|
|
+ SPECTRE_V2_RETPOLINE_GENERIC = 1,
|
|
+ SPECTRE_V2_RETPOLINE_AMD = 2,
|
|
+ SPECTRE_V2_IBRS_ENHANCED = 3,
|
|
+};
|
|
+
|
|
+enum spectre_v2_user_mitigation {
|
|
+ SPECTRE_V2_USER_NONE = 0,
|
|
+ SPECTRE_V2_USER_STRICT = 1,
|
|
+ SPECTRE_V2_USER_STRICT_PREFERRED = 2,
|
|
+ SPECTRE_V2_USER_PRCTL = 3,
|
|
+ SPECTRE_V2_USER_SECCOMP = 4,
|
|
+};
|
|
+
|
|
+enum ssb_mitigation {
|
|
+ SPEC_STORE_BYPASS_NONE = 0,
|
|
+ SPEC_STORE_BYPASS_DISABLE = 1,
|
|
+ SPEC_STORE_BYPASS_PRCTL = 2,
|
|
+ SPEC_STORE_BYPASS_SECCOMP = 3,
|
|
+};
|
|
+
|
|
+enum mds_mitigations {
|
|
+ MDS_MITIGATION_OFF = 0,
|
|
+ MDS_MITIGATION_FULL = 1,
|
|
+ MDS_MITIGATION_VMWERV = 2,
|
|
+};
|
|
+
|
|
+enum taa_mitigations {
|
|
+ TAA_MITIGATION_OFF = 0,
|
|
+ TAA_MITIGATION_UCODE_NEEDED = 1,
|
|
+ TAA_MITIGATION_VERW = 2,
|
|
+ TAA_MITIGATION_TSX_DISABLED = 3,
|
|
+};
|
|
+
|
|
+enum vmx_l1d_flush_state {
|
|
+ VMENTER_L1D_FLUSH_AUTO = 0,
|
|
+ VMENTER_L1D_FLUSH_NEVER = 1,
|
|
+ VMENTER_L1D_FLUSH_COND = 2,
|
|
+ VMENTER_L1D_FLUSH_ALWAYS = 3,
|
|
+ VMENTER_L1D_FLUSH_EPT_DISABLED = 4,
|
|
+ VMENTER_L1D_FLUSH_NOT_REQUIRED = 5,
|
|
+};
|
|
+
|
|
+enum srbds_mitigations {
|
|
+ SRBDS_MITIGATION_OFF = 0,
|
|
+ SRBDS_MITIGATION_UCODE_NEEDED = 1,
|
|
+ SRBDS_MITIGATION_FULL = 2,
|
|
+ SRBDS_MITIGATION_TSX_OFF = 3,
|
|
+ SRBDS_MITIGATION_HYPERVISOR = 4,
|
|
+};
|
|
+
|
|
+enum spectre_v1_mitigation {
|
|
+ SPECTRE_V1_MITIGATION_NONE = 0,
|
|
+ SPECTRE_V1_MITIGATION_AUTO = 1,
|
|
+};
|
|
+
|
|
+enum spectre_v2_mitigation_cmd {
|
|
+ SPECTRE_V2_CMD_NONE = 0,
|
|
+ SPECTRE_V2_CMD_AUTO = 1,
|
|
+ SPECTRE_V2_CMD_FORCE = 2,
|
|
+ SPECTRE_V2_CMD_RETPOLINE = 3,
|
|
+ SPECTRE_V2_CMD_RETPOLINE_GENERIC = 4,
|
|
+ SPECTRE_V2_CMD_RETPOLINE_AMD = 5,
|
|
+};
|
|
+
|
|
+enum spectre_v2_user_cmd {
|
|
+ SPECTRE_V2_USER_CMD_NONE = 0,
|
|
+ SPECTRE_V2_USER_CMD_AUTO = 1,
|
|
+ SPECTRE_V2_USER_CMD_FORCE = 2,
|
|
+ SPECTRE_V2_USER_CMD_PRCTL = 3,
|
|
+ SPECTRE_V2_USER_CMD_PRCTL_IBPB = 4,
|
|
+ SPECTRE_V2_USER_CMD_SECCOMP = 5,
|
|
+ SPECTRE_V2_USER_CMD_SECCOMP_IBPB = 6,
|
|
+};
|
|
+
|
|
+enum ssb_mitigation_cmd {
|
|
+ SPEC_STORE_BYPASS_CMD_NONE = 0,
|
|
+ SPEC_STORE_BYPASS_CMD_AUTO = 1,
|
|
+ SPEC_STORE_BYPASS_CMD_ON = 2,
|
|
+ SPEC_STORE_BYPASS_CMD_PRCTL = 3,
|
|
+ SPEC_STORE_BYPASS_CMD_SECCOMP = 4,
|
|
+};
|
|
+
|
|
+struct aperfmperf_sample {
|
|
+ unsigned int khz;
|
|
+ ktime_t time;
|
|
+ u64 aperf;
|
|
+ u64 mperf;
|
|
+};
|
|
+
|
|
+struct cpuid_dep {
|
|
+ unsigned int feature;
|
|
+ unsigned int depends;
|
|
+};
|
|
+
|
|
+struct _tlb_table {
|
|
+ unsigned char descriptor;
|
|
+ char tlb_type;
|
|
+ unsigned int entries;
|
|
+ char info[128];
|
|
+};
|
|
+
|
|
+struct sku_microcode {
|
|
+ u8 model;
|
|
+ u8 stepping;
|
|
+ u32 microcode;
|
|
+};
|
|
+
|
|
+struct cpuid_regs {
|
|
+ u32 eax;
|
|
+ u32 ebx;
|
|
+ u32 ecx;
|
|
+ u32 edx;
|
|
+};
|
|
+
|
|
+enum pconfig_target {
|
|
+ INVALID_TARGET = 0,
|
|
+ MKTME_TARGET = 1,
|
|
+ PCONFIG_TARGET_NR = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PCONFIG_CPUID_SUBLEAF_INVALID = 0,
|
|
+ PCONFIG_CPUID_SUBLEAF_TARGETID = 1,
|
|
+};
|
|
+
|
|
+enum mf_flags {
|
|
+ MF_COUNT_INCREASED = 1,
|
|
+ MF_ACTION_REQUIRED = 2,
|
|
+ MF_MUST_KILL = 4,
|
|
+ MF_SOFT_OFFLINE = 8,
|
|
+};
|
|
+
|
|
+enum mce_notifier_prios {
|
|
+ MCE_PRIO_FIRST = 2147483647,
|
|
+ MCE_PRIO_SRAO = 2147483646,
|
|
+ MCE_PRIO_EXTLOG = 2147483645,
|
|
+ MCE_PRIO_NFIT = 2147483644,
|
|
+ MCE_PRIO_EDAC = 2147483643,
|
|
+ MCE_PRIO_MCELOG = 1,
|
|
+ MCE_PRIO_LOWEST = 0,
|
|
+};
|
|
+
|
|
+enum mcp_flags {
|
|
+ MCP_TIMESTAMP = 1,
|
|
+ MCP_UC = 2,
|
|
+ MCP_DONTLOG = 4,
|
|
+};
|
|
+
|
|
+enum severity_level {
|
|
+ MCE_NO_SEVERITY = 0,
|
|
+ MCE_DEFERRED_SEVERITY = 1,
|
|
+ MCE_UCNA_SEVERITY = 1,
|
|
+ MCE_KEEP_SEVERITY = 2,
|
|
+ MCE_SOME_SEVERITY = 3,
|
|
+ MCE_AO_SEVERITY = 4,
|
|
+ MCE_UC_SEVERITY = 5,
|
|
+ MCE_AR_SEVERITY = 6,
|
|
+ MCE_PANIC_SEVERITY = 7,
|
|
+};
|
|
+
|
|
+struct mce_bank {
|
|
+ u64 ctl;
|
|
+ unsigned char init;
|
|
+ struct device_attribute attr;
|
|
+ char attrname[16];
|
|
+};
|
|
+
|
|
+struct mce_evt_llist {
|
|
+ struct llist_node llnode;
|
|
+ struct mce mce;
|
|
+};
|
|
+
|
|
+struct mca_config {
|
|
+ bool dont_log_ce;
|
|
+ bool cmci_disabled;
|
|
+ bool ignore_ce;
|
|
+ __u64 lmce_disabled: 1;
|
|
+ __u64 disabled: 1;
|
|
+ __u64 ser: 1;
|
|
+ __u64 recovery: 1;
|
|
+ __u64 bios_cmci_threshold: 1;
|
|
+ long: 35;
|
|
+ __u64 __reserved: 59;
|
|
+ u8 banks;
|
|
+ s8 bootlog;
|
|
+ int tolerant;
|
|
+ int monarch_timeout;
|
|
+ int panic_timeout;
|
|
+ u32 rip_msr;
|
|
+};
|
|
+
|
|
+struct mce_vendor_flags {
|
|
+ __u64 overflow_recov: 1;
|
|
+ __u64 succor: 1;
|
|
+ __u64 smca: 1;
|
|
+ __u64 __reserved_0: 61;
|
|
+};
|
|
+
|
|
+struct mca_msr_regs {
|
|
+ u32 (*ctl)(int);
|
|
+ u32 (*status)(int);
|
|
+ u32 (*addr)(int);
|
|
+ u32 (*misc)(int);
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mce_record {
|
|
+ struct trace_entry ent;
|
|
+ u64 mcgcap;
|
|
+ u64 mcgstatus;
|
|
+ u64 status;
|
|
+ u64 addr;
|
|
+ u64 misc;
|
|
+ u64 synd;
|
|
+ u64 ipid;
|
|
+ u64 ip;
|
|
+ u64 tsc;
|
|
+ u64 walltime;
|
|
+ u32 cpu;
|
|
+ u32 cpuid;
|
|
+ u32 apicid;
|
|
+ u32 socketid;
|
|
+ u8 cs;
|
|
+ u8 bank;
|
|
+ u8 cpuvendor;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mce_record {};
|
|
+
|
|
+enum context {
|
|
+ IN_KERNEL = 1,
|
|
+ IN_USER = 2,
|
|
+ IN_KERNEL_RECOV = 3,
|
|
+};
|
|
+
|
|
+enum ser {
|
|
+ SER_REQUIRED = 1,
|
|
+ NO_SER = 2,
|
|
+};
|
|
+
|
|
+enum exception {
|
|
+ EXCP_CONTEXT = 1,
|
|
+ NO_EXCP = 2,
|
|
+};
|
|
+
|
|
+struct severity {
|
|
+ u64 mask;
|
|
+ u64 result;
|
|
+ unsigned char sev;
|
|
+ unsigned char mcgmask;
|
|
+ unsigned char mcgres;
|
|
+ unsigned char ser;
|
|
+ unsigned char context;
|
|
+ unsigned char excp;
|
|
+ unsigned char covered;
|
|
+ char *msg;
|
|
+};
|
|
+
|
|
+struct gen_pool;
|
|
+
|
|
+typedef long unsigned int (*genpool_algo_t)(long unsigned int *, long unsigned int, long unsigned int, unsigned int, void *, struct gen_pool *, long unsigned int);
|
|
+
|
|
+struct gen_pool {
|
|
+ spinlock_t lock;
|
|
+ struct list_head chunks;
|
|
+ int min_alloc_order;
|
|
+ genpool_algo_t algo;
|
|
+ void *data;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CMCI_STORM_NONE = 0,
|
|
+ CMCI_STORM_ACTIVE = 1,
|
|
+ CMCI_STORM_SUBSIDED = 2,
|
|
+};
|
|
+
|
|
+typedef unsigned int pto_T_____12;
|
|
+
|
|
+enum kobject_action {
|
|
+ KOBJ_ADD = 0,
|
|
+ KOBJ_REMOVE = 1,
|
|
+ KOBJ_CHANGE = 2,
|
|
+ KOBJ_MOVE = 3,
|
|
+ KOBJ_ONLINE = 4,
|
|
+ KOBJ_OFFLINE = 5,
|
|
+ KOBJ_BIND = 6,
|
|
+ KOBJ_UNBIND = 7,
|
|
+ KOBJ_MAX = 8,
|
|
+};
|
|
+
|
|
+enum smca_bank_types {
|
|
+ SMCA_LS = 0,
|
|
+ SMCA_IF = 1,
|
|
+ SMCA_L2_CACHE = 2,
|
|
+ SMCA_DE = 3,
|
|
+ SMCA_RESERVED = 4,
|
|
+ SMCA_EX = 5,
|
|
+ SMCA_FP = 6,
|
|
+ SMCA_L3_CACHE = 7,
|
|
+ SMCA_CS = 8,
|
|
+ SMCA_PIE = 9,
|
|
+ SMCA_UMC = 10,
|
|
+ SMCA_PB = 11,
|
|
+ SMCA_PSP = 12,
|
|
+ SMCA_SMU = 13,
|
|
+ N_SMCA_BANK_TYPES = 14,
|
|
+};
|
|
+
|
|
+struct smca_bank_name {
|
|
+ const char *name;
|
|
+ const char *long_name;
|
|
+};
|
|
+
|
|
+struct thresh_restart {
|
|
+ struct threshold_block *b;
|
|
+ int reset;
|
|
+ int set_lvt_off;
|
|
+ int lvt_off;
|
|
+ u16 old_limit;
|
|
+};
|
|
+
|
|
+struct threshold_attr {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct threshold_block *, char *);
|
|
+ ssize_t (*store)(struct threshold_block *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct _thermal_state {
|
|
+ bool new_event;
|
|
+ int event;
|
|
+ u64 next_check;
|
|
+ long unsigned int count;
|
|
+ long unsigned int last_count;
|
|
+};
|
|
+
|
|
+struct thermal_state {
|
|
+ struct _thermal_state core_throttle;
|
|
+ struct _thermal_state core_power_limit;
|
|
+ struct _thermal_state package_throttle;
|
|
+ struct _thermal_state package_power_limit;
|
|
+ struct _thermal_state core_thresh0;
|
|
+ struct _thermal_state core_thresh1;
|
|
+ struct _thermal_state pkg_thresh0;
|
|
+ struct _thermal_state pkg_thresh1;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CPER_SEV_RECOVERABLE = 0,
|
|
+ CPER_SEV_FATAL = 1,
|
|
+ CPER_SEV_CORRECTED = 2,
|
|
+ CPER_SEV_INFORMATIONAL = 3,
|
|
+};
|
|
+
|
|
+struct cper_record_header {
|
|
+ char signature[4];
|
|
+ __u16 revision;
|
|
+ __u32 signature_end;
|
|
+ __u16 section_count;
|
|
+ __u32 error_severity;
|
|
+ __u32 validation_bits;
|
|
+ __u32 record_length;
|
|
+ __u64 timestamp;
|
|
+ guid_t platform_id;
|
|
+ guid_t partition_id;
|
|
+ guid_t creator_id;
|
|
+ guid_t notification_type;
|
|
+ __u64 record_id;
|
|
+ __u32 flags;
|
|
+ __u64 persistence_information;
|
|
+ __u8 reserved[12];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct cper_section_descriptor {
|
|
+ __u32 section_offset;
|
|
+ __u32 section_length;
|
|
+ __u16 revision;
|
|
+ __u8 validation_bits;
|
|
+ __u8 reserved;
|
|
+ __u32 flags;
|
|
+ guid_t section_type;
|
|
+ guid_t fru_id;
|
|
+ __u32 section_severity;
|
|
+ __u8 fru_text[20];
|
|
+};
|
|
+
|
|
+struct cper_sec_mem_err {
|
|
+ __u64 validation_bits;
|
|
+ __u64 error_status;
|
|
+ __u64 physical_addr;
|
|
+ __u64 physical_addr_mask;
|
|
+ __u16 node;
|
|
+ __u16 card;
|
|
+ __u16 module;
|
|
+ __u16 bank;
|
|
+ __u16 device;
|
|
+ __u16 row;
|
|
+ __u16 column;
|
|
+ __u16 bit_pos;
|
|
+ __u64 requestor_id;
|
|
+ __u64 responder_id;
|
|
+ __u64 target_id;
|
|
+ __u8 error_type;
|
|
+ __u8 reserved;
|
|
+ __u16 rank;
|
|
+ __u16 mem_array_handle;
|
|
+ __u16 mem_dev_handle;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ GHES_SEV_NO = 0,
|
|
+ GHES_SEV_CORRECTED = 1,
|
|
+ GHES_SEV_RECOVERABLE = 2,
|
|
+ GHES_SEV_PANIC = 3,
|
|
+};
|
|
+
|
|
+struct cper_mce_record {
|
|
+ struct cper_record_header hdr;
|
|
+ struct cper_section_descriptor sec_hdr;
|
|
+ struct mce mce;
|
|
+};
|
|
+
|
|
+struct miscdevice {
|
|
+ int minor;
|
|
+ const char *name;
|
|
+ const struct file_operations *fops;
|
|
+ struct list_head list;
|
|
+ struct device *parent;
|
|
+ struct device *this_device;
|
|
+ const struct attribute_group **groups;
|
|
+ const char *nodename;
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+typedef struct poll_table_struct poll_table;
|
|
+
|
|
+struct mce_log_buffer {
|
|
+ char signature[12];
|
|
+ unsigned int len;
|
|
+ unsigned int next;
|
|
+ unsigned int flags;
|
|
+ unsigned int recordlen;
|
|
+ struct mce entry[32];
|
|
+};
|
|
+
|
|
+struct mtrr_var_range {
|
|
+ __u32 base_lo;
|
|
+ __u32 base_hi;
|
|
+ __u32 mask_lo;
|
|
+ __u32 mask_hi;
|
|
+};
|
|
+
|
|
+typedef __u8 mtrr_type;
|
|
+
|
|
+struct mtrr_state_type {
|
|
+ struct mtrr_var_range var_ranges[256];
|
|
+ mtrr_type fixed_ranges[88];
|
|
+ unsigned char enabled;
|
|
+ unsigned char have_fixed;
|
|
+ mtrr_type def_type;
|
|
+};
|
|
+
|
|
+struct mtrr_ops {
|
|
+ u32 vendor;
|
|
+ u32 use_intel_if;
|
|
+ void (*set)(unsigned int, long unsigned int, long unsigned int, mtrr_type);
|
|
+ void (*set_all)();
|
|
+ void (*get)(unsigned int, long unsigned int *, long unsigned int *, mtrr_type *);
|
|
+ int (*get_free_region)(long unsigned int, long unsigned int, int);
|
|
+ int (*validate_add_page)(long unsigned int, long unsigned int, unsigned int);
|
|
+ int (*have_wrcomb)();
|
|
+};
|
|
+
|
|
+struct set_mtrr_data {
|
|
+ long unsigned int smp_base;
|
|
+ long unsigned int smp_size;
|
|
+ unsigned int smp_reg;
|
|
+ mtrr_type smp_type;
|
|
+};
|
|
+
|
|
+struct mtrr_value {
|
|
+ mtrr_type ltype;
|
|
+ long unsigned int lbase;
|
|
+ long unsigned int lsize;
|
|
+};
|
|
+
|
|
+struct mtrr_sentry {
|
|
+ __u64 base;
|
|
+ __u32 size;
|
|
+ __u32 type;
|
|
+};
|
|
+
|
|
+struct mtrr_gentry {
|
|
+ __u64 base;
|
|
+ __u32 size;
|
|
+ __u32 regnum;
|
|
+ __u32 type;
|
|
+ __u32 _pad;
|
|
+};
|
|
+
|
|
+typedef u32 compat_uint_t;
|
|
+
|
|
+struct mtrr_sentry32 {
|
|
+ compat_ulong_t base;
|
|
+ compat_uint_t size;
|
|
+ compat_uint_t type;
|
|
+};
|
|
+
|
|
+struct mtrr_gentry32 {
|
|
+ compat_ulong_t regnum;
|
|
+ compat_uint_t base;
|
|
+ compat_uint_t size;
|
|
+ compat_uint_t type;
|
|
+};
|
|
+
|
|
+struct fixed_range_block {
|
|
+ int base_msr;
|
|
+ int ranges;
|
|
+};
|
|
+
|
|
+struct var_mtrr_range_state {
|
|
+ long unsigned int base_pfn;
|
|
+ long unsigned int size_pfn;
|
|
+ mtrr_type type;
|
|
+};
|
|
+
|
|
+struct var_mtrr_state {
|
|
+ long unsigned int range_startk;
|
|
+ long unsigned int range_sizek;
|
|
+ long unsigned int chunk_sizek;
|
|
+ long unsigned int gran_sizek;
|
|
+ unsigned int reg;
|
|
+};
|
|
+
|
|
+struct mtrr_cleanup_result {
|
|
+ long unsigned int gran_sizek;
|
|
+ long unsigned int chunk_sizek;
|
|
+ long unsigned int lose_cover_sizek;
|
|
+ unsigned int num_reg;
|
|
+ int bad;
|
|
+};
|
|
+
|
|
+struct subsys_interface {
|
|
+ const char *name;
|
|
+ struct bus_type *subsys;
|
|
+ struct list_head node;
|
|
+ int (*add_dev)(struct device *, struct subsys_interface *);
|
|
+ void (*remove_dev)(struct device *, struct subsys_interface *);
|
|
+};
|
|
+
|
|
+struct property_entry;
|
|
+
|
|
+struct platform_device_info {
|
|
+ struct device *parent;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ const char *name;
|
|
+ int id;
|
|
+ const struct resource *res;
|
|
+ unsigned int num_res;
|
|
+ const void *data;
|
|
+ size_t size_data;
|
|
+ u64 dma_mask;
|
|
+ struct property_entry *properties;
|
|
+};
|
|
+
|
|
+struct builtin_fw {
|
|
+ char *name;
|
|
+ void *data;
|
|
+ long unsigned int size;
|
|
+};
|
|
+
|
|
+struct cpio_data {
|
|
+ void *data;
|
|
+ size_t size;
|
|
+ char name[18];
|
|
+};
|
|
+
|
|
+enum ucode_state {
|
|
+ UCODE_OK = 0,
|
|
+ UCODE_NEW = 1,
|
|
+ UCODE_UPDATED = 2,
|
|
+ UCODE_NFOUND = 3,
|
|
+ UCODE_ERROR = 4,
|
|
+};
|
|
+
|
|
+struct microcode_ops {
|
|
+ enum ucode_state (*request_microcode_user)(int, const void *, size_t);
|
|
+ enum ucode_state (*request_microcode_fw)(int, struct device *, bool);
|
|
+ void (*microcode_fini_cpu)(int);
|
|
+ enum ucode_state (*apply_microcode)(int);
|
|
+ int (*collect_cpu_info)(int, struct cpu_signature *);
|
|
+};
|
|
+
|
|
+struct cpu_info_ctx {
|
|
+ struct cpu_signature *cpu_sig;
|
|
+ int err;
|
|
+};
|
|
+
|
|
+struct firmware {
|
|
+ size_t size;
|
|
+ const u8 *data;
|
|
+ struct page **pages;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct ucode_patch {
|
|
+ struct list_head plist;
|
|
+ void *data;
|
|
+ u32 patch_id;
|
|
+ u16 equiv_cpu;
|
|
+};
|
|
+
|
|
+struct microcode_header_intel {
|
|
+ unsigned int hdrver;
|
|
+ unsigned int rev;
|
|
+ unsigned int date;
|
|
+ unsigned int sig;
|
|
+ unsigned int cksum;
|
|
+ unsigned int ldrver;
|
|
+ unsigned int pf;
|
|
+ unsigned int datasize;
|
|
+ unsigned int totalsize;
|
|
+ unsigned int reserved[3];
|
|
+};
|
|
+
|
|
+struct microcode_intel {
|
|
+ struct microcode_header_intel hdr;
|
|
+ unsigned int bits[0];
|
|
+};
|
|
+
|
|
+struct extended_signature {
|
|
+ unsigned int sig;
|
|
+ unsigned int pf;
|
|
+ unsigned int cksum;
|
|
+};
|
|
+
|
|
+struct extended_sigtable {
|
|
+ unsigned int count;
|
|
+ unsigned int cksum;
|
|
+ unsigned int reserved[3];
|
|
+ struct extended_signature sigs[0];
|
|
+};
|
|
+
|
|
+struct equiv_cpu_entry {
|
|
+ u32 installed_cpu;
|
|
+ u32 fixed_errata_mask;
|
|
+ u32 fixed_errata_compare;
|
|
+ u16 equiv_cpu;
|
|
+ u16 res;
|
|
+};
|
|
+
|
|
+struct microcode_header_amd {
|
|
+ u32 data_code;
|
|
+ u32 patch_id;
|
|
+ u16 mc_patch_data_id;
|
|
+ u8 mc_patch_data_len;
|
|
+ u8 init_flag;
|
|
+ u32 mc_patch_data_checksum;
|
|
+ u32 nb_dev_id;
|
|
+ u32 sb_dev_id;
|
|
+ u16 processor_rev_id;
|
|
+ u8 nb_rev_id;
|
|
+ u8 sb_rev_id;
|
|
+ u8 bios_api_rev;
|
|
+ u8 reserved1[3];
|
|
+ u32 match_reg[8];
|
|
+};
|
|
+
|
|
+struct microcode_amd {
|
|
+ struct microcode_header_amd hdr;
|
|
+ unsigned int mpb[0];
|
|
+};
|
|
+
|
|
+struct cont_desc {
|
|
+ struct microcode_amd *mc;
|
|
+ u32 cpuid_1_eax;
|
|
+ u32 psize;
|
|
+ u8 *data;
|
|
+ size_t size;
|
|
+};
|
|
+
|
|
+enum mp_irq_source_types {
|
|
+ mp_INT = 0,
|
|
+ mp_NMI = 1,
|
|
+ mp_SMI = 2,
|
|
+ mp_ExtINT = 3,
|
|
+};
|
|
+
|
|
+struct IO_APIC_route_entry {
|
|
+ __u32 vector: 8;
|
|
+ __u32 delivery_mode: 3;
|
|
+ __u32 dest_mode: 1;
|
|
+ __u32 delivery_status: 1;
|
|
+ __u32 polarity: 1;
|
|
+ __u32 irr: 1;
|
|
+ __u32 trigger: 1;
|
|
+ __u32 mask: 1;
|
|
+ __u32 __reserved_2: 15;
|
|
+ __u32 __reserved_3: 24;
|
|
+ __u32 dest: 8;
|
|
+};
|
|
+
|
|
+typedef u64 acpi_physical_address;
|
|
+
|
|
+typedef u32 acpi_status;
|
|
+
|
|
+typedef void *acpi_handle;
|
|
+
|
|
+typedef u8 acpi_adr_space_type;
|
|
+
|
|
+struct acpi_subtable_header {
|
|
+ u8 type;
|
|
+ u8 length;
|
|
+};
|
|
+
|
|
+struct acpi_table_bgrt {
|
|
+ struct acpi_table_header header;
|
|
+ u16 version;
|
|
+ u8 status;
|
|
+ u8 image_type;
|
|
+ u64 image_address;
|
|
+ u32 image_offset_x;
|
|
+ u32 image_offset_y;
|
|
+};
|
|
+
|
|
+struct acpi_table_boot {
|
|
+ struct acpi_table_header header;
|
|
+ u8 cmos_index;
|
|
+ u8 reserved[3];
|
|
+};
|
|
+
|
|
+struct acpi_hmat_structure {
|
|
+ u16 type;
|
|
+ u16 reserved;
|
|
+ u32 length;
|
|
+};
|
|
+
|
|
+struct acpi_table_hpet {
|
|
+ struct acpi_table_header header;
|
|
+ u32 id;
|
|
+ struct acpi_generic_address address;
|
|
+ u8 sequence;
|
|
+ u16 minimum_tick;
|
|
+ u8 flags;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_table_madt {
|
|
+ struct acpi_table_header header;
|
|
+ u32 address;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+enum acpi_madt_type {
|
|
+ ACPI_MADT_TYPE_LOCAL_APIC = 0,
|
|
+ ACPI_MADT_TYPE_IO_APIC = 1,
|
|
+ ACPI_MADT_TYPE_INTERRUPT_OVERRIDE = 2,
|
|
+ ACPI_MADT_TYPE_NMI_SOURCE = 3,
|
|
+ ACPI_MADT_TYPE_LOCAL_APIC_NMI = 4,
|
|
+ ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE = 5,
|
|
+ ACPI_MADT_TYPE_IO_SAPIC = 6,
|
|
+ ACPI_MADT_TYPE_LOCAL_SAPIC = 7,
|
|
+ ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
|
|
+ ACPI_MADT_TYPE_LOCAL_X2APIC = 9,
|
|
+ ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10,
|
|
+ ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11,
|
|
+ ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
|
|
+ ACPI_MADT_TYPE_GENERIC_MSI_FRAME = 13,
|
|
+ ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR = 14,
|
|
+ ACPI_MADT_TYPE_GENERIC_TRANSLATOR = 15,
|
|
+ ACPI_MADT_TYPE_RESERVED = 16,
|
|
+ ACPI_MADT_TYPE_PHYTIUM_2500 = 128,
|
|
+};
|
|
+
|
|
+struct acpi_madt_local_apic {
|
|
+ struct acpi_subtable_header header;
|
|
+ u8 processor_id;
|
|
+ u8 id;
|
|
+ u32 lapic_flags;
|
|
+};
|
|
+
|
|
+struct acpi_madt_io_apic {
|
|
+ struct acpi_subtable_header header;
|
|
+ u8 id;
|
|
+ u8 reserved;
|
|
+ u32 address;
|
|
+ u32 global_irq_base;
|
|
+};
|
|
+
|
|
+struct acpi_madt_interrupt_override {
|
|
+ struct acpi_subtable_header header;
|
|
+ u8 bus;
|
|
+ u8 source_irq;
|
|
+ u32 global_irq;
|
|
+ u16 inti_flags;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_madt_nmi_source {
|
|
+ struct acpi_subtable_header header;
|
|
+ u16 inti_flags;
|
|
+ u32 global_irq;
|
|
+};
|
|
+
|
|
+struct acpi_madt_local_apic_nmi {
|
|
+ struct acpi_subtable_header header;
|
|
+ u8 processor_id;
|
|
+ u16 inti_flags;
|
|
+ u8 lint;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_madt_local_apic_override {
|
|
+ struct acpi_subtable_header header;
|
|
+ u16 reserved;
|
|
+ u64 address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_madt_local_sapic {
|
|
+ struct acpi_subtable_header header;
|
|
+ u8 processor_id;
|
|
+ u8 id;
|
|
+ u8 eid;
|
|
+ u8 reserved[3];
|
|
+ u32 lapic_flags;
|
|
+ u32 uid;
|
|
+ char uid_string[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_madt_local_x2apic {
|
|
+ struct acpi_subtable_header header;
|
|
+ u16 reserved;
|
|
+ u32 local_apic_id;
|
|
+ u32 lapic_flags;
|
|
+ u32 uid;
|
|
+};
|
|
+
|
|
+struct acpi_madt_local_x2apic_nmi {
|
|
+ struct acpi_subtable_header header;
|
|
+ u16 inti_flags;
|
|
+ u32 uid;
|
|
+ u8 lint;
|
|
+ u8 reserved[3];
|
|
+};
|
|
+
|
|
+union acpi_subtable_headers {
|
|
+ struct acpi_subtable_header common;
|
|
+ struct acpi_hmat_structure hmat;
|
|
+};
|
|
+
|
|
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *, const long unsigned int);
|
|
+
|
|
+struct acpi_subtable_proc {
|
|
+ int id;
|
|
+ acpi_tbl_entry_handler handler;
|
|
+ int count;
|
|
+};
|
|
+
|
|
+typedef u32 phys_cpuid_t;
|
|
+
|
|
+enum irq_alloc_type {
|
|
+ X86_IRQ_ALLOC_TYPE_IOAPIC = 1,
|
|
+ X86_IRQ_ALLOC_TYPE_HPET = 2,
|
|
+ X86_IRQ_ALLOC_TYPE_MSI = 3,
|
|
+ X86_IRQ_ALLOC_TYPE_MSIX = 4,
|
|
+ X86_IRQ_ALLOC_TYPE_DMAR = 5,
|
|
+ X86_IRQ_ALLOC_TYPE_UV = 6,
|
|
+};
|
|
+
|
|
+struct irq_alloc_info {
|
|
+ enum irq_alloc_type type;
|
|
+ u32 flags;
|
|
+ const struct cpumask *mask;
|
|
+ union {
|
|
+ int unused;
|
|
+ struct {
|
|
+ int hpet_id;
|
|
+ int hpet_index;
|
|
+ void *hpet_data;
|
|
+ };
|
|
+ struct {
|
|
+ struct pci_dev *msi_dev;
|
|
+ irq_hw_number_t msi_hwirq;
|
|
+ };
|
|
+ struct {
|
|
+ int ioapic_id;
|
|
+ int ioapic_pin;
|
|
+ int ioapic_node;
|
|
+ u32 ioapic_trigger: 1;
|
|
+ u32 ioapic_polarity: 1;
|
|
+ u32 ioapic_valid: 1;
|
|
+ struct IO_APIC_route_entry *ioapic_entry;
|
|
+ };
|
|
+ struct {
|
|
+ int dmar_id;
|
|
+ void *dmar_data;
|
|
+ };
|
|
+ struct {
|
|
+ int uv_limit;
|
|
+ int uv_blade;
|
|
+ long unsigned int uv_offset;
|
|
+ char *uv_name;
|
|
+ };
|
|
+ struct {
|
|
+ struct msi_desc *desc;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct circ_buf {
|
|
+ char *buf;
|
|
+ int head;
|
|
+ int tail;
|
|
+};
|
|
+
|
|
+struct serial_icounter_struct {
|
|
+ int cts;
|
|
+ int dsr;
|
|
+ int rng;
|
|
+ int dcd;
|
|
+ int rx;
|
|
+ int tx;
|
|
+ int frame;
|
|
+ int overrun;
|
|
+ int parity;
|
|
+ int brk;
|
|
+ int buf_overrun;
|
|
+ int reserved[9];
|
|
+};
|
|
+
|
|
+struct serial_struct {
|
|
+ int type;
|
|
+ int line;
|
|
+ unsigned int port;
|
|
+ int irq;
|
|
+ int flags;
|
|
+ int xmit_fifo_size;
|
|
+ int custom_divisor;
|
|
+ int baud_base;
|
|
+ short unsigned int close_delay;
|
|
+ char io_type;
|
|
+ char reserved_char[1];
|
|
+ int hub6;
|
|
+ short unsigned int closing_wait;
|
|
+ short unsigned int closing_wait2;
|
|
+ unsigned char *iomem_base;
|
|
+ short unsigned int iomem_reg_shift;
|
|
+ unsigned int port_high;
|
|
+ long unsigned int iomap_base;
|
|
+};
|
|
+
|
|
+struct serial_rs485 {
|
|
+ __u32 flags;
|
|
+ __u32 delay_rts_before_send;
|
|
+ __u32 delay_rts_after_send;
|
|
+ __u32 padding[5];
|
|
+};
|
|
+
|
|
+struct uart_port;
|
|
+
|
|
+struct uart_ops {
|
|
+ unsigned int (*tx_empty)(struct uart_port *);
|
|
+ void (*set_mctrl)(struct uart_port *, unsigned int);
|
|
+ unsigned int (*get_mctrl)(struct uart_port *);
|
|
+ void (*stop_tx)(struct uart_port *);
|
|
+ void (*start_tx)(struct uart_port *);
|
|
+ void (*throttle)(struct uart_port *);
|
|
+ void (*unthrottle)(struct uart_port *);
|
|
+ void (*send_xchar)(struct uart_port *, char);
|
|
+ void (*stop_rx)(struct uart_port *);
|
|
+ void (*enable_ms)(struct uart_port *);
|
|
+ void (*break_ctl)(struct uart_port *, int);
|
|
+ int (*startup)(struct uart_port *);
|
|
+ void (*shutdown)(struct uart_port *);
|
|
+ void (*flush_buffer)(struct uart_port *);
|
|
+ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *);
|
|
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
|
|
+ void (*pm)(struct uart_port *, unsigned int, unsigned int);
|
|
+ const char * (*type)(struct uart_port *);
|
|
+ void (*release_port)(struct uart_port *);
|
|
+ int (*request_port)(struct uart_port *);
|
|
+ void (*config_port)(struct uart_port *, int);
|
|
+ int (*verify_port)(struct uart_port *, struct serial_struct *);
|
|
+ int (*ioctl)(struct uart_port *, unsigned int, long unsigned int);
|
|
+ int (*poll_init)(struct uart_port *);
|
|
+ void (*poll_put_char)(struct uart_port *, unsigned char);
|
|
+ int (*poll_get_char)(struct uart_port *);
|
|
+};
|
|
+
|
|
+struct uart_icount {
|
|
+ __u32 cts;
|
|
+ __u32 dsr;
|
|
+ __u32 rng;
|
|
+ __u32 dcd;
|
|
+ __u32 rx;
|
|
+ __u32 tx;
|
|
+ __u32 frame;
|
|
+ __u32 overrun;
|
|
+ __u32 parity;
|
|
+ __u32 brk;
|
|
+ __u32 buf_overrun;
|
|
+};
|
|
+
|
|
+typedef unsigned int upf_t;
|
|
+
|
|
+typedef unsigned int upstat_t;
|
|
+
|
|
+struct uart_state;
|
|
+
|
|
+struct uart_port {
|
|
+ spinlock_t lock;
|
|
+ long unsigned int iobase;
|
|
+ unsigned char *membase;
|
|
+ unsigned int (*serial_in)(struct uart_port *, int);
|
|
+ void (*serial_out)(struct uart_port *, int, int);
|
|
+ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *);
|
|
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
|
|
+ unsigned int (*get_mctrl)(struct uart_port *);
|
|
+ void (*set_mctrl)(struct uart_port *, unsigned int);
|
|
+ unsigned int (*get_divisor)(struct uart_port *, unsigned int, unsigned int *);
|
|
+ void (*set_divisor)(struct uart_port *, unsigned int, unsigned int, unsigned int);
|
|
+ int (*startup)(struct uart_port *);
|
|
+ void (*shutdown)(struct uart_port *);
|
|
+ void (*throttle)(struct uart_port *);
|
|
+ void (*unthrottle)(struct uart_port *);
|
|
+ int (*handle_irq)(struct uart_port *);
|
|
+ void (*pm)(struct uart_port *, unsigned int, unsigned int);
|
|
+ void (*handle_break)(struct uart_port *);
|
|
+ int (*rs485_config)(struct uart_port *, struct serial_rs485 *);
|
|
+ unsigned int irq;
|
|
+ long unsigned int irqflags;
|
|
+ unsigned int uartclk;
|
|
+ unsigned int fifosize;
|
|
+ unsigned char x_char;
|
|
+ unsigned char regshift;
|
|
+ unsigned char iotype;
|
|
+ unsigned char quirks;
|
|
+ unsigned int read_status_mask;
|
|
+ unsigned int ignore_status_mask;
|
|
+ struct uart_state *state;
|
|
+ struct uart_icount icount;
|
|
+ struct console *cons;
|
|
+ long unsigned int sysrq;
|
|
+ unsigned int sysrq_ch;
|
|
+ upf_t flags;
|
|
+ upstat_t status;
|
|
+ int hw_stopped;
|
|
+ unsigned int mctrl;
|
|
+ unsigned int timeout;
|
|
+ unsigned int type;
|
|
+ const struct uart_ops *ops;
|
|
+ unsigned int custom_divisor;
|
|
+ unsigned int line;
|
|
+ unsigned int minor;
|
|
+ resource_size_t mapbase;
|
|
+ resource_size_t mapsize;
|
|
+ struct device *dev;
|
|
+ unsigned char hub6;
|
|
+ unsigned char suspended;
|
|
+ unsigned char unused[2];
|
|
+ const char *name;
|
|
+ struct attribute_group *attr_group;
|
|
+ const struct attribute_group **tty_groups;
|
|
+ struct serial_rs485 rs485;
|
|
+ void *private_data;
|
|
+};
|
|
+
|
|
+enum uart_pm_state {
|
|
+ UART_PM_STATE_ON = 0,
|
|
+ UART_PM_STATE_OFF = 3,
|
|
+ UART_PM_STATE_UNDEFINED = 4,
|
|
+};
|
|
+
|
|
+struct uart_state {
|
|
+ struct tty_port port;
|
|
+ enum uart_pm_state pm_state;
|
|
+ struct circ_buf xmit;
|
|
+ atomic_t refcount;
|
|
+ wait_queue_head_t remove_wait;
|
|
+ struct uart_port *uart_port;
|
|
+};
|
|
+
|
|
+struct earlycon_device {
|
|
+ struct console *con;
|
|
+ struct uart_port port;
|
|
+ char options[16];
|
|
+ unsigned int baud;
|
|
+};
|
|
+
|
|
+struct earlycon_id {
|
|
+ char name[15];
|
|
+ char name_term;
|
|
+ char compatible[128];
|
|
+ int (*setup)(struct earlycon_device *, const char *);
|
|
+};
|
|
+
|
|
+enum ioapic_domain_type {
|
|
+ IOAPIC_DOMAIN_INVALID = 0,
|
|
+ IOAPIC_DOMAIN_LEGACY = 1,
|
|
+ IOAPIC_DOMAIN_STRICT = 2,
|
|
+ IOAPIC_DOMAIN_DYNAMIC = 3,
|
|
+};
|
|
+
|
|
+struct ioapic_domain_cfg {
|
|
+ enum ioapic_domain_type type;
|
|
+ const struct irq_domain_ops *ops;
|
|
+ struct device_node *dev;
|
|
+};
|
|
+
|
|
+enum thermal_device_mode {
|
|
+ THERMAL_DEVICE_DISABLED = 0,
|
|
+ THERMAL_DEVICE_ENABLED = 1,
|
|
+};
|
|
+
|
|
+enum thermal_trip_type {
|
|
+ THERMAL_TRIP_ACTIVE = 0,
|
|
+ THERMAL_TRIP_PASSIVE = 1,
|
|
+ THERMAL_TRIP_HOT = 2,
|
|
+ THERMAL_TRIP_CRITICAL = 3,
|
|
+};
|
|
+
|
|
+enum thermal_trend {
|
|
+ THERMAL_TREND_STABLE = 0,
|
|
+ THERMAL_TREND_RAISING = 1,
|
|
+ THERMAL_TREND_DROPPING = 2,
|
|
+ THERMAL_TREND_RAISE_FULL = 3,
|
|
+ THERMAL_TREND_DROP_FULL = 4,
|
|
+};
|
|
+
|
|
+enum thermal_notify_event {
|
|
+ THERMAL_EVENT_UNSPECIFIED = 0,
|
|
+ THERMAL_EVENT_TEMP_SAMPLE = 1,
|
|
+ THERMAL_TRIP_VIOLATED = 2,
|
|
+ THERMAL_TRIP_CHANGED = 3,
|
|
+ THERMAL_DEVICE_DOWN = 4,
|
|
+ THERMAL_DEVICE_UP = 5,
|
|
+ THERMAL_DEVICE_POWER_CAPABILITY_CHANGED = 6,
|
|
+ THERMAL_TABLE_CHANGED = 7,
|
|
+};
|
|
+
|
|
+struct thermal_zone_device;
|
|
+
|
|
+struct thermal_cooling_device;
|
|
+
|
|
+struct thermal_zone_device_ops {
|
|
+ int (*bind)(struct thermal_zone_device *, struct thermal_cooling_device *);
|
|
+ int (*unbind)(struct thermal_zone_device *, struct thermal_cooling_device *);
|
|
+ int (*get_temp)(struct thermal_zone_device *, int *);
|
|
+ int (*set_trips)(struct thermal_zone_device *, int, int);
|
|
+ int (*get_mode)(struct thermal_zone_device *, enum thermal_device_mode *);
|
|
+ int (*set_mode)(struct thermal_zone_device *, enum thermal_device_mode);
|
|
+ int (*get_trip_type)(struct thermal_zone_device *, int, enum thermal_trip_type *);
|
|
+ int (*get_trip_temp)(struct thermal_zone_device *, int, int *);
|
|
+ int (*set_trip_temp)(struct thermal_zone_device *, int, int);
|
|
+ int (*get_trip_hyst)(struct thermal_zone_device *, int, int *);
|
|
+ int (*set_trip_hyst)(struct thermal_zone_device *, int, int);
|
|
+ int (*get_crit_temp)(struct thermal_zone_device *, int *);
|
|
+ int (*set_emul_temp)(struct thermal_zone_device *, int);
|
|
+ int (*get_trend)(struct thermal_zone_device *, int, enum thermal_trend *);
|
|
+ int (*notify)(struct thermal_zone_device *, int, enum thermal_trip_type);
|
|
+};
|
|
+
|
|
+struct thermal_attr;
|
|
+
|
|
+struct thermal_zone_params;
|
|
+
|
|
+struct thermal_governor;
|
|
+
|
|
+struct thermal_zone_device {
|
|
+ int id;
|
|
+ char type[20];
|
|
+ struct device device;
|
|
+ struct attribute_group trips_attribute_group;
|
|
+ struct thermal_attr *trip_temp_attrs;
|
|
+ struct thermal_attr *trip_type_attrs;
|
|
+ struct thermal_attr *trip_hyst_attrs;
|
|
+ void *devdata;
|
|
+ int trips;
|
|
+ long unsigned int trips_disabled;
|
|
+ int passive_delay;
|
|
+ int polling_delay;
|
|
+ int temperature;
|
|
+ int last_temperature;
|
|
+ int emul_temperature;
|
|
+ int passive;
|
|
+ int prev_low_trip;
|
|
+ int prev_high_trip;
|
|
+ unsigned int forced_passive;
|
|
+ atomic_t need_update;
|
|
+ struct thermal_zone_device_ops *ops;
|
|
+ struct thermal_zone_params *tzp;
|
|
+ struct thermal_governor *governor;
|
|
+ void *governor_data;
|
|
+ struct list_head thermal_instances;
|
|
+ struct ida ida;
|
|
+ struct mutex lock;
|
|
+ struct list_head node;
|
|
+ struct delayed_work poll_queue;
|
|
+ enum thermal_notify_event notify_event;
|
|
+};
|
|
+
|
|
+struct thermal_cooling_device_ops;
|
|
+
|
|
+struct thermal_cooling_device {
|
|
+ int id;
|
|
+ char type[20];
|
|
+ struct device device;
|
|
+ struct device_node *np;
|
|
+ void *devdata;
|
|
+ void *stats;
|
|
+ const struct thermal_cooling_device_ops *ops;
|
|
+ bool updated;
|
|
+ struct mutex lock;
|
|
+ struct list_head thermal_instances;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct thermal_cooling_device_ops {
|
|
+ int (*get_max_state)(struct thermal_cooling_device *, long unsigned int *);
|
|
+ int (*get_cur_state)(struct thermal_cooling_device *, long unsigned int *);
|
|
+ int (*set_cur_state)(struct thermal_cooling_device *, long unsigned int);
|
|
+ int (*get_requested_power)(struct thermal_cooling_device *, struct thermal_zone_device *, u32 *);
|
|
+ int (*state2power)(struct thermal_cooling_device *, struct thermal_zone_device *, long unsigned int, u32 *);
|
|
+ int (*power2state)(struct thermal_cooling_device *, struct thermal_zone_device *, u32, long unsigned int *);
|
|
+};
|
|
+
|
|
+struct thermal_attr {
|
|
+ struct device_attribute attr;
|
|
+ char name[20];
|
|
+};
|
|
+
|
|
+struct thermal_bind_params;
|
|
+
|
|
+struct thermal_zone_params {
|
|
+ char governor_name[20];
|
|
+ bool no_hwmon;
|
|
+ int num_tbps;
|
|
+ struct thermal_bind_params *tbp;
|
|
+ u32 sustainable_power;
|
|
+ s32 k_po;
|
|
+ s32 k_pu;
|
|
+ s32 k_i;
|
|
+ s32 k_d;
|
|
+ s32 integral_cutoff;
|
|
+ int slope;
|
|
+ int offset;
|
|
+};
|
|
+
|
|
+struct thermal_governor {
|
|
+ char name[20];
|
|
+ int (*bind_to_tz)(struct thermal_zone_device *);
|
|
+ void (*unbind_from_tz)(struct thermal_zone_device *);
|
|
+ int (*throttle)(struct thermal_zone_device *, int);
|
|
+ struct list_head governor_list;
|
|
+};
|
|
+
|
|
+struct thermal_bind_params {
|
|
+ struct thermal_cooling_device *cdev;
|
|
+ int weight;
|
|
+ int trip_mask;
|
|
+ long unsigned int *binding_limits;
|
|
+ int (*match)(struct thermal_zone_device *, struct thermal_cooling_device *);
|
|
+};
|
|
+
|
|
+struct acpi_processor_cx {
|
|
+ u8 valid;
|
|
+ u8 type;
|
|
+ u32 address;
|
|
+ u8 entry_method;
|
|
+ u8 index;
|
|
+ u32 latency;
|
|
+ u8 bm_sts_skip;
|
|
+ char desc[32];
|
|
+};
|
|
+
|
|
+struct acpi_lpi_state {
|
|
+ u32 min_residency;
|
|
+ u32 wake_latency;
|
|
+ u32 flags;
|
|
+ u32 arch_flags;
|
|
+ u32 res_cnt_freq;
|
|
+ u32 enable_parent_state;
|
|
+ u64 address;
|
|
+ u8 index;
|
|
+ u8 entry_method;
|
|
+ char desc[32];
|
|
+};
|
|
+
|
|
+struct acpi_processor_power {
|
|
+ int count;
|
|
+ union {
|
|
+ struct acpi_processor_cx states[8];
|
|
+ struct acpi_lpi_state lpi_states[8];
|
|
+ };
|
|
+ int timer_broadcast_on_state;
|
|
+};
|
|
+
|
|
+struct acpi_psd_package {
|
|
+ u64 num_entries;
|
|
+ u64 revision;
|
|
+ u64 domain;
|
|
+ u64 coord_type;
|
|
+ u64 num_processors;
|
|
+};
|
|
+
|
|
+struct acpi_pct_register {
|
|
+ u8 descriptor;
|
|
+ u16 length;
|
|
+ u8 space_id;
|
|
+ u8 bit_width;
|
|
+ u8 bit_offset;
|
|
+ u8 reserved;
|
|
+ u64 address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_processor_px {
|
|
+ u64 core_frequency;
|
|
+ u64 power;
|
|
+ u64 transition_latency;
|
|
+ u64 bus_master_latency;
|
|
+ u64 control;
|
|
+ u64 status;
|
|
+};
|
|
+
|
|
+struct acpi_processor_performance {
|
|
+ unsigned int state;
|
|
+ unsigned int platform_limit;
|
|
+ struct acpi_pct_register control_register;
|
|
+ struct acpi_pct_register status_register;
|
|
+ short: 16;
|
|
+ unsigned int state_count;
|
|
+ int: 32;
|
|
+ struct acpi_processor_px *states;
|
|
+ struct acpi_psd_package domain_info;
|
|
+ cpumask_var_t shared_cpu_map;
|
|
+ unsigned int shared_type;
|
|
+ int: 32;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_tsd_package {
|
|
+ u64 num_entries;
|
|
+ u64 revision;
|
|
+ u64 domain;
|
|
+ u64 coord_type;
|
|
+ u64 num_processors;
|
|
+};
|
|
+
|
|
+struct acpi_processor_tx_tss {
|
|
+ u64 freqpercentage;
|
|
+ u64 power;
|
|
+ u64 transition_latency;
|
|
+ u64 control;
|
|
+ u64 status;
|
|
+};
|
|
+
|
|
+struct acpi_processor_tx {
|
|
+ u16 power;
|
|
+ u16 performance;
|
|
+};
|
|
+
|
|
+struct acpi_processor;
|
|
+
|
|
+struct acpi_processor_throttling {
|
|
+ unsigned int state;
|
|
+ unsigned int platform_limit;
|
|
+ struct acpi_pct_register control_register;
|
|
+ struct acpi_pct_register status_register;
|
|
+ short: 16;
|
|
+ unsigned int state_count;
|
|
+ int: 32;
|
|
+ struct acpi_processor_tx_tss *states_tss;
|
|
+ struct acpi_tsd_package domain_info;
|
|
+ cpumask_var_t shared_cpu_map;
|
|
+ int (*acpi_processor_get_throttling)(struct acpi_processor *);
|
|
+ int (*acpi_processor_set_throttling)(struct acpi_processor *, int, bool);
|
|
+ u32 address;
|
|
+ u8 duty_offset;
|
|
+ u8 duty_width;
|
|
+ u8 tsd_valid_flag;
|
|
+ char: 8;
|
|
+ unsigned int shared_type;
|
|
+ struct acpi_processor_tx states[16];
|
|
+ int: 32;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_processor_flags {
|
|
+ u8 power: 1;
|
|
+ u8 performance: 1;
|
|
+ u8 throttling: 1;
|
|
+ u8 limit: 1;
|
|
+ u8 bm_control: 1;
|
|
+ u8 bm_check: 1;
|
|
+ u8 has_cst: 1;
|
|
+ u8 has_lpi: 1;
|
|
+ u8 power_setup_done: 1;
|
|
+ u8 bm_rld_set: 1;
|
|
+ u8 need_hotplug_init: 1;
|
|
+};
|
|
+
|
|
+struct acpi_processor_lx {
|
|
+ int px;
|
|
+ int tx;
|
|
+};
|
|
+
|
|
+struct acpi_processor_limit {
|
|
+ struct acpi_processor_lx state;
|
|
+ struct acpi_processor_lx thermal;
|
|
+ struct acpi_processor_lx user;
|
|
+};
|
|
+
|
|
+struct acpi_processor {
|
|
+ acpi_handle handle;
|
|
+ u32 acpi_id;
|
|
+ phys_cpuid_t phys_id;
|
|
+ u32 id;
|
|
+ u32 pblk;
|
|
+ int performance_platform_limit;
|
|
+ int throttling_platform_limit;
|
|
+ struct acpi_processor_flags flags;
|
|
+ struct acpi_processor_power power;
|
|
+ struct acpi_processor_performance *performance;
|
|
+ struct acpi_processor_throttling throttling;
|
|
+ struct acpi_processor_limit limit;
|
|
+ struct thermal_cooling_device *cdev;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct acpi_processor_errata {
|
|
+ u8 smp;
|
|
+ struct {
|
|
+ u8 throttle: 1;
|
|
+ u8 fdma: 1;
|
|
+ u8 reserved: 6;
|
|
+ u32 bmisx;
|
|
+ } piix4;
|
|
+};
|
|
+
|
|
+struct cpuidle_driver;
|
|
+
|
|
+struct acpi_hest_header {
|
|
+ u16 type;
|
|
+ u16 source_id;
|
|
+};
|
|
+
|
|
+struct acpi_hest_ia_error_bank {
|
|
+ u8 bank_number;
|
|
+ u8 clear_status_on_init;
|
|
+ u8 status_format;
|
|
+ u8 reserved;
|
|
+ u32 control_register;
|
|
+ u64 control_data;
|
|
+ u32 status_register;
|
|
+ u32 address_register;
|
|
+ u32 misc_register;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_hest_notify {
|
|
+ u8 type;
|
|
+ u8 length;
|
|
+ u16 config_write_enable;
|
|
+ u32 poll_interval;
|
|
+ u32 vector;
|
|
+ u32 polling_threshold_value;
|
|
+ u32 polling_threshold_window;
|
|
+ u32 error_threshold_value;
|
|
+ u32 error_threshold_window;
|
|
+};
|
|
+
|
|
+struct acpi_hest_ia_corrected {
|
|
+ struct acpi_hest_header header;
|
|
+ u16 reserved1;
|
|
+ u8 flags;
|
|
+ u8 enabled;
|
|
+ u32 records_to_preallocate;
|
|
+ u32 max_sections_per_record;
|
|
+ struct acpi_hest_notify notify;
|
|
+ u8 num_hardware_banks;
|
|
+ u8 reserved2[3];
|
|
+};
|
|
+
|
|
+struct cpc_reg {
|
|
+ u8 descriptor;
|
|
+ u16 length;
|
|
+ u8 space_id;
|
|
+ u8 bit_width;
|
|
+ u8 bit_offset;
|
|
+ u8 access_width;
|
|
+ u64 address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_power_register {
|
|
+ u8 descriptor;
|
|
+ u16 length;
|
|
+ u8 space_id;
|
|
+ u8 bit_width;
|
|
+ u8 bit_offset;
|
|
+ u8 access_size;
|
|
+ u64 address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct cstate_entry {
|
|
+ struct {
|
|
+ unsigned int eax;
|
|
+ unsigned int ecx;
|
|
+ } states[8];
|
|
+};
|
|
+
|
|
+typedef void (*nmi_shootdown_cb)(int, struct pt_regs *);
|
|
+
|
|
+struct pci_ops___2;
|
|
+
|
|
+struct cpuid_regs_done {
|
|
+ struct cpuid_regs regs;
|
|
+ struct completion done;
|
|
+};
|
|
+
|
|
+struct pinctrl;
|
|
+
|
|
+struct pinctrl_state;
|
|
+
|
|
+struct dev_pin_info {
|
|
+ struct pinctrl *p;
|
|
+ struct pinctrl_state *default_state;
|
|
+ struct pinctrl_state *init_state;
|
|
+ struct pinctrl_state *sleep_state;
|
|
+ struct pinctrl_state *idle_state;
|
|
+};
|
|
+
|
|
+struct intel_early_ops {
|
|
+ resource_size_t (*stolen_size)(int, int, int);
|
|
+ resource_size_t (*stolen_base)(int, int, int, resource_size_t);
|
|
+};
|
|
+
|
|
+struct chipset {
|
|
+ u32 vendor;
|
|
+ u32 device;
|
|
+ u32 class;
|
|
+ u32 class_mask;
|
|
+ u32 flags;
|
|
+ void (*f)(int, int, int);
|
|
+};
|
|
+
|
|
+struct sparsemask;
|
|
+
|
|
+struct sched_domain_shared {
|
|
+ atomic_t ref;
|
|
+ atomic_t nr_busy_cpus;
|
|
+ int has_idle_cores;
|
|
+ struct sparsemask *cfs_overload_cpus;
|
|
+};
|
|
+
|
|
+struct sched_group;
|
|
+
|
|
+struct sched_domain {
|
|
+ struct sched_domain *parent;
|
|
+ struct sched_domain *child;
|
|
+ struct sched_group *groups;
|
|
+ long unsigned int min_interval;
|
|
+ long unsigned int max_interval;
|
|
+ unsigned int busy_factor;
|
|
+ unsigned int imbalance_pct;
|
|
+ unsigned int cache_nice_tries;
|
|
+ unsigned int busy_idx;
|
|
+ unsigned int idle_idx;
|
|
+ unsigned int newidle_idx;
|
|
+ unsigned int wake_idx;
|
|
+ unsigned int forkexec_idx;
|
|
+ unsigned int smt_gain;
|
|
+ int nohz_idle;
|
|
+ int flags;
|
|
+ int level;
|
|
+ long unsigned int last_balance;
|
|
+ unsigned int balance_interval;
|
|
+ unsigned int nr_balance_failed;
|
|
+ u64 max_newidle_lb_cost;
|
|
+ long unsigned int next_decay_max_lb_cost;
|
|
+ u64 avg_scan_cost;
|
|
+ unsigned int lb_count[3];
|
|
+ unsigned int lb_failed[3];
|
|
+ unsigned int lb_balanced[3];
|
|
+ unsigned int lb_imbalance[3];
|
|
+ unsigned int lb_gained[3];
|
|
+ unsigned int lb_hot_gained[3];
|
|
+ unsigned int lb_nobusyg[3];
|
|
+ unsigned int lb_nobusyq[3];
|
|
+ unsigned int alb_count;
|
|
+ unsigned int alb_failed;
|
|
+ unsigned int alb_pushed;
|
|
+ unsigned int sbe_count;
|
|
+ unsigned int sbe_balanced;
|
|
+ unsigned int sbe_pushed;
|
|
+ unsigned int sbf_count;
|
|
+ unsigned int sbf_balanced;
|
|
+ unsigned int sbf_pushed;
|
|
+ unsigned int ttwu_wake_remote;
|
|
+ unsigned int ttwu_move_affine;
|
|
+ unsigned int ttwu_move_balance;
|
|
+ char *name;
|
|
+ union {
|
|
+ void *private;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ struct sched_domain_shared *shared;
|
|
+ unsigned int span_weight;
|
|
+ long unsigned int span[0];
|
|
+};
|
|
+
|
|
+typedef const struct cpumask * (*sched_domain_mask_f)(int);
|
|
+
|
|
+typedef int (*sched_domain_flags_f)();
|
|
+
|
|
+struct sched_group_capacity;
|
|
+
|
|
+struct sd_data {
|
|
+ struct sched_domain **sd;
|
|
+ struct sched_domain_shared **sds;
|
|
+ struct sched_group **sg;
|
|
+ struct sched_group_capacity **sgc;
|
|
+};
|
|
+
|
|
+struct sched_domain_topology_level {
|
|
+ sched_domain_mask_f mask;
|
|
+ sched_domain_flags_f sd_flags;
|
|
+ int flags;
|
|
+ int numa_level;
|
|
+ struct sd_data data;
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+struct tsc_adjust {
|
|
+ s64 bootval;
|
|
+ s64 adjusted;
|
|
+ long unsigned int nextcheck;
|
|
+ bool warned;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DUMP_PREFIX_NONE = 0,
|
|
+ DUMP_PREFIX_ADDRESS = 1,
|
|
+ DUMP_PREFIX_OFFSET = 2,
|
|
+};
|
|
+
|
|
+struct mpf_intel {
|
|
+ char signature[4];
|
|
+ unsigned int physptr;
|
|
+ unsigned char length;
|
|
+ unsigned char specification;
|
|
+ unsigned char checksum;
|
|
+ unsigned char feature1;
|
|
+ unsigned char feature2;
|
|
+ unsigned char feature3;
|
|
+ unsigned char feature4;
|
|
+ unsigned char feature5;
|
|
+};
|
|
+
|
|
+struct mpc_ioapic {
|
|
+ unsigned char type;
|
|
+ unsigned char apicid;
|
|
+ unsigned char apicver;
|
|
+ unsigned char flags;
|
|
+ unsigned int apicaddr;
|
|
+};
|
|
+
|
|
+struct mpc_lintsrc {
|
|
+ unsigned char type;
|
|
+ unsigned char irqtype;
|
|
+ short unsigned int irqflag;
|
|
+ unsigned char srcbusid;
|
|
+ unsigned char srcbusirq;
|
|
+ unsigned char destapic;
|
|
+ unsigned char destapiclint;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQ_REMAP_XAPIC_MODE = 0,
|
|
+ IRQ_REMAP_X2APIC_MODE = 1,
|
|
+};
|
|
+
|
|
+union apic_ir {
|
|
+ long unsigned int map[4];
|
|
+ u32 regs[8];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ X2APIC_OFF = 0,
|
|
+ X2APIC_ON = 1,
|
|
+ X2APIC_DISABLED = 2,
|
|
+};
|
|
+
|
|
+enum ioapic_irq_destination_types {
|
|
+ dest_Fixed = 0,
|
|
+ dest_LowestPrio = 1,
|
|
+ dest_SMI = 2,
|
|
+ dest__reserved_1 = 3,
|
|
+ dest_NMI = 4,
|
|
+ dest_INIT = 5,
|
|
+ dest__reserved_2 = 6,
|
|
+ dest_ExtINT = 7,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQ_SET_MASK_OK = 0,
|
|
+ IRQ_SET_MASK_OK_NOCOPY = 1,
|
|
+ IRQ_SET_MASK_OK_DONE = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQD_TRIGGER_MASK = 15,
|
|
+ IRQD_SETAFFINITY_PENDING = 256,
|
|
+ IRQD_ACTIVATED = 512,
|
|
+ IRQD_NO_BALANCING = 1024,
|
|
+ IRQD_PER_CPU = 2048,
|
|
+ IRQD_AFFINITY_SET = 4096,
|
|
+ IRQD_LEVEL = 8192,
|
|
+ IRQD_WAKEUP_STATE = 16384,
|
|
+ IRQD_MOVE_PCNTXT = 32768,
|
|
+ IRQD_IRQ_DISABLED = 65536,
|
|
+ IRQD_IRQ_MASKED = 131072,
|
|
+ IRQD_IRQ_INPROGRESS = 262144,
|
|
+ IRQD_WAKEUP_ARMED = 524288,
|
|
+ IRQD_FORWARDED_TO_VCPU = 1048576,
|
|
+ IRQD_AFFINITY_MANAGED = 2097152,
|
|
+ IRQD_IRQ_STARTED = 4194304,
|
|
+ IRQD_MANAGED_SHUTDOWN = 8388608,
|
|
+ IRQD_SINGLE_TARGET = 16777216,
|
|
+ IRQD_DEFAULT_TRIGGER_SET = 33554432,
|
|
+ IRQD_CAN_RESERVE = 67108864,
|
|
+ IRQD_MSI_NOMASK_QUIRK = 134217728,
|
|
+ IRQD_HANDLE_ENFORCE_IRQCTX = 268435456,
|
|
+ IRQD_AFFINITY_ON_ACTIVATE = 536870912,
|
|
+};
|
|
+
|
|
+struct irq_cfg {
|
|
+ unsigned int dest_apicid;
|
|
+ unsigned int vector;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQCHIP_FWNODE_REAL = 0,
|
|
+ IRQCHIP_FWNODE_NAMED = 1,
|
|
+ IRQCHIP_FWNODE_NAMED_ID = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ X86_IRQ_ALLOC_CONTIGUOUS_VECTORS = 1,
|
|
+ X86_IRQ_ALLOC_LEGACY = 2,
|
|
+};
|
|
+
|
|
+struct apic_chip_data {
|
|
+ struct irq_cfg hw_irq_cfg;
|
|
+ unsigned int vector;
|
|
+ unsigned int prev_vector;
|
|
+ unsigned int cpu;
|
|
+ unsigned int prev_cpu;
|
|
+ unsigned int irq;
|
|
+ struct hlist_node clist;
|
|
+ unsigned int move_in_progress: 1;
|
|
+ unsigned int is_managed: 1;
|
|
+ unsigned int can_reserve: 1;
|
|
+ unsigned int has_reserved: 1;
|
|
+};
|
|
+
|
|
+struct irq_matrix;
|
|
+
|
|
+union IO_APIC_reg_00 {
|
|
+ u32 raw;
|
|
+ struct {
|
|
+ u32 __reserved_2: 14;
|
|
+ u32 LTS: 1;
|
|
+ u32 delivery_type: 1;
|
|
+ u32 __reserved_1: 8;
|
|
+ u32 ID: 8;
|
|
+ } bits;
|
|
+};
|
|
+
|
|
+union IO_APIC_reg_01 {
|
|
+ u32 raw;
|
|
+ struct {
|
|
+ u32 version: 8;
|
|
+ u32 __reserved_2: 7;
|
|
+ u32 PRQ: 1;
|
|
+ u32 entries: 8;
|
|
+ u32 __reserved_1: 8;
|
|
+ } bits;
|
|
+};
|
|
+
|
|
+union IO_APIC_reg_02 {
|
|
+ u32 raw;
|
|
+ struct {
|
|
+ u32 __reserved_2: 24;
|
|
+ u32 arbitration: 4;
|
|
+ u32 __reserved_1: 4;
|
|
+ } bits;
|
|
+};
|
|
+
|
|
+union IO_APIC_reg_03 {
|
|
+ u32 raw;
|
|
+ struct {
|
|
+ u32 boot_DT: 1;
|
|
+ u32 __reserved_1: 31;
|
|
+ } bits;
|
|
+};
|
|
+
|
|
+struct IR_IO_APIC_route_entry {
|
|
+ __u64 vector: 8;
|
|
+ __u64 zero: 3;
|
|
+ __u64 index2: 1;
|
|
+ __u64 delivery_status: 1;
|
|
+ __u64 polarity: 1;
|
|
+ __u64 irr: 1;
|
|
+ __u64 trigger: 1;
|
|
+ __u64 mask: 1;
|
|
+ __u64 reserved: 31;
|
|
+ __u64 format: 1;
|
|
+ __u64 index: 15;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQ_TYPE_NONE = 0,
|
|
+ IRQ_TYPE_EDGE_RISING = 1,
|
|
+ IRQ_TYPE_EDGE_FALLING = 2,
|
|
+ IRQ_TYPE_EDGE_BOTH = 3,
|
|
+ IRQ_TYPE_LEVEL_HIGH = 4,
|
|
+ IRQ_TYPE_LEVEL_LOW = 8,
|
|
+ IRQ_TYPE_LEVEL_MASK = 12,
|
|
+ IRQ_TYPE_SENSE_MASK = 15,
|
|
+ IRQ_TYPE_DEFAULT = 15,
|
|
+ IRQ_TYPE_PROBE = 16,
|
|
+ IRQ_LEVEL = 256,
|
|
+ IRQ_PER_CPU = 512,
|
|
+ IRQ_NOPROBE = 1024,
|
|
+ IRQ_NOREQUEST = 2048,
|
|
+ IRQ_NOAUTOEN = 4096,
|
|
+ IRQ_NO_BALANCING = 8192,
|
|
+ IRQ_MOVE_PCNTXT = 16384,
|
|
+ IRQ_NESTED_THREAD = 32768,
|
|
+ IRQ_NOTHREAD = 65536,
|
|
+ IRQ_PER_CPU_DEVID = 131072,
|
|
+ IRQ_IS_POLLED = 262144,
|
|
+ IRQ_DISABLE_UNLAZY = 524288,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQCHIP_SET_TYPE_MASKED = 1,
|
|
+ IRQCHIP_EOI_IF_HANDLED = 2,
|
|
+ IRQCHIP_MASK_ON_SUSPEND = 4,
|
|
+ IRQCHIP_ONOFFLINE_ENABLED = 8,
|
|
+ IRQCHIP_SKIP_SET_WAKE = 16,
|
|
+ IRQCHIP_ONESHOT_SAFE = 32,
|
|
+ IRQCHIP_EOI_THREADED = 64,
|
|
+ IRQCHIP_SUPPORTS_LEVEL_MSI = 128,
|
|
+ IRQCHIP_SUPPORTS_NMI = 256,
|
|
+ IRQCHIP_AFFINITY_PRE_STARTUP = 1024,
|
|
+};
|
|
+
|
|
+struct irq_pin_list {
|
|
+ struct list_head list;
|
|
+ int apic;
|
|
+ int pin;
|
|
+};
|
|
+
|
|
+struct mp_chip_data {
|
|
+ struct list_head irq_2_pin;
|
|
+ struct IO_APIC_route_entry entry;
|
|
+ int trigger;
|
|
+ int polarity;
|
|
+ u32 count;
|
|
+ bool isa_irq;
|
|
+};
|
|
+
|
|
+struct mp_ioapic_gsi {
|
|
+ u32 gsi_base;
|
|
+ u32 gsi_end;
|
|
+};
|
|
+
|
|
+struct ioapic {
|
|
+ int nr_registers;
|
|
+ struct IO_APIC_route_entry *saved_registers;
|
|
+ struct mpc_ioapic mp_config;
|
|
+ struct mp_ioapic_gsi gsi_config;
|
|
+ struct ioapic_domain_cfg irqdomain_cfg;
|
|
+ struct irq_domain *irqdomain;
|
|
+ struct resource *iomem_res;
|
|
+};
|
|
+
|
|
+struct io_apic {
|
|
+ unsigned int index;
|
|
+ unsigned int unused[3];
|
|
+ unsigned int data;
|
|
+ unsigned int unused2[11];
|
|
+ unsigned int eoi;
|
|
+};
|
|
+
|
|
+union entry_union {
|
|
+ struct {
|
|
+ u32 w1;
|
|
+ u32 w2;
|
|
+ };
|
|
+ struct IO_APIC_route_entry entry;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQ_DOMAIN_FLAG_HIERARCHY = 1,
|
|
+ IRQ_DOMAIN_NAME_ALLOCATED = 2,
|
|
+ IRQ_DOMAIN_FLAG_IPI_PER_CPU = 4,
|
|
+ IRQ_DOMAIN_FLAG_IPI_SINGLE = 8,
|
|
+ IRQ_DOMAIN_FLAG_MSI = 16,
|
|
+ IRQ_DOMAIN_FLAG_MSI_REMAP = 32,
|
|
+ IRQ_DOMAIN_MSI_NOMASK_QUIRK = 64,
|
|
+ IRQ_DOMAIN_FLAG_NONCORE = 65536,
|
|
+};
|
|
+
|
|
+typedef struct irq_alloc_info msi_alloc_info_t;
|
|
+
|
|
+struct msi_domain_info;
|
|
+
|
|
+struct msi_domain_ops {
|
|
+ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *, msi_alloc_info_t *);
|
|
+ int (*msi_init)(struct irq_domain *, struct msi_domain_info *, unsigned int, irq_hw_number_t, msi_alloc_info_t *);
|
|
+ void (*msi_free)(struct irq_domain *, struct msi_domain_info *, unsigned int);
|
|
+ int (*msi_check)(struct irq_domain *, struct msi_domain_info *, struct device *);
|
|
+ int (*msi_prepare)(struct irq_domain *, struct device *, int, msi_alloc_info_t *);
|
|
+ void (*msi_finish)(msi_alloc_info_t *, int);
|
|
+ void (*set_desc)(msi_alloc_info_t *, struct msi_desc *);
|
|
+ int (*handle_error)(struct irq_domain *, struct msi_desc *, int);
|
|
+};
|
|
+
|
|
+struct msi_domain_info {
|
|
+ u32 flags;
|
|
+ struct msi_domain_ops *ops;
|
|
+ struct irq_chip *chip;
|
|
+ void *chip_data;
|
|
+ irq_flow_handler_t handler;
|
|
+ void *handler_data;
|
|
+ const char *handler_name;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MSI_FLAG_USE_DEF_DOM_OPS = 1,
|
|
+ MSI_FLAG_USE_DEF_CHIP_OPS = 2,
|
|
+ MSI_FLAG_MULTI_PCI_MSI = 4,
|
|
+ MSI_FLAG_PCI_MSIX = 8,
|
|
+ MSI_FLAG_ACTIVATE_EARLY = 16,
|
|
+ MSI_FLAG_MUST_REACTIVATE = 32,
|
|
+ MSI_FLAG_LEVEL_CAPABLE = 64,
|
|
+};
|
|
+
|
|
+struct hpet_dev;
|
|
+
|
|
+struct semaphore {
|
|
+ raw_spinlock_t lock;
|
|
+ unsigned int count;
|
|
+ struct list_head wait_list;
|
|
+};
|
|
+
|
|
+struct uvh_node_id_s {
|
|
+ long unsigned int force1: 1;
|
|
+ long unsigned int manufacturer: 11;
|
|
+ long unsigned int part_number: 16;
|
|
+ long unsigned int revision: 4;
|
|
+ long unsigned int node_id: 15;
|
|
+ long unsigned int rsvd_47_63: 17;
|
|
+};
|
|
+
|
|
+struct uv1h_node_id_s {
|
|
+ long unsigned int force1: 1;
|
|
+ long unsigned int manufacturer: 11;
|
|
+ long unsigned int part_number: 16;
|
|
+ long unsigned int revision: 4;
|
|
+ long unsigned int node_id: 15;
|
|
+ long unsigned int rsvd_47: 1;
|
|
+ long unsigned int nodes_per_bit: 7;
|
|
+ long unsigned int rsvd_55: 1;
|
|
+ long unsigned int ni_port: 4;
|
|
+ long unsigned int rsvd_60_63: 4;
|
|
+};
|
|
+
|
|
+struct uvxh_node_id_s {
|
|
+ long unsigned int force1: 1;
|
|
+ long unsigned int manufacturer: 11;
|
|
+ long unsigned int part_number: 16;
|
|
+ long unsigned int revision: 4;
|
|
+ long unsigned int node_id: 15;
|
|
+ long unsigned int rsvd_47_49: 3;
|
|
+ long unsigned int nodes_per_bit: 7;
|
|
+ long unsigned int ni_port: 5;
|
|
+ long unsigned int rsvd_62_63: 2;
|
|
+};
|
|
+
|
|
+struct uv2h_node_id_s {
|
|
+ long unsigned int force1: 1;
|
|
+ long unsigned int manufacturer: 11;
|
|
+ long unsigned int part_number: 16;
|
|
+ long unsigned int revision: 4;
|
|
+ long unsigned int node_id: 15;
|
|
+ long unsigned int rsvd_47_49: 3;
|
|
+ long unsigned int nodes_per_bit: 7;
|
|
+ long unsigned int ni_port: 5;
|
|
+ long unsigned int rsvd_62_63: 2;
|
|
+};
|
|
+
|
|
+struct uv3h_node_id_s {
|
|
+ long unsigned int force1: 1;
|
|
+ long unsigned int manufacturer: 11;
|
|
+ long unsigned int part_number: 16;
|
|
+ long unsigned int revision: 4;
|
|
+ long unsigned int node_id: 15;
|
|
+ long unsigned int rsvd_47: 1;
|
|
+ long unsigned int router_select: 1;
|
|
+ long unsigned int rsvd_49: 1;
|
|
+ long unsigned int nodes_per_bit: 7;
|
|
+ long unsigned int ni_port: 5;
|
|
+ long unsigned int rsvd_62_63: 2;
|
|
+};
|
|
+
|
|
+struct uv4h_node_id_s {
|
|
+ long unsigned int force1: 1;
|
|
+ long unsigned int manufacturer: 11;
|
|
+ long unsigned int part_number: 16;
|
|
+ long unsigned int revision: 4;
|
|
+ long unsigned int node_id: 15;
|
|
+ long unsigned int rsvd_47: 1;
|
|
+ long unsigned int router_select: 1;
|
|
+ long unsigned int rsvd_49: 1;
|
|
+ long unsigned int nodes_per_bit: 7;
|
|
+ long unsigned int ni_port: 5;
|
|
+ long unsigned int rsvd_62_63: 2;
|
|
+};
|
|
+
|
|
+union uvh_node_id_u {
|
|
+ long unsigned int v;
|
|
+ struct uvh_node_id_s s;
|
|
+ struct uv1h_node_id_s s1;
|
|
+ struct uvxh_node_id_s sx;
|
|
+ struct uv2h_node_id_s s2;
|
|
+ struct uv3h_node_id_s s3;
|
|
+ struct uv4h_node_id_s s4;
|
|
+};
|
|
+
|
|
+struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int base: 8;
|
|
+ long unsigned int rsvd_32_47: 16;
|
|
+ long unsigned int m_alias: 5;
|
|
+ long unsigned int rsvd_53_62: 10;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv1h_rh_gam_alias210_overlay_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int base: 8;
|
|
+ long unsigned int rsvd_32_47: 16;
|
|
+ long unsigned int m_alias: 5;
|
|
+ long unsigned int rsvd_53_62: 10;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uvxh_rh_gam_alias210_overlay_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int base: 8;
|
|
+ long unsigned int rsvd_32_47: 16;
|
|
+ long unsigned int m_alias: 5;
|
|
+ long unsigned int rsvd_53_62: 10;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv2h_rh_gam_alias210_overlay_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int base: 8;
|
|
+ long unsigned int rsvd_32_47: 16;
|
|
+ long unsigned int m_alias: 5;
|
|
+ long unsigned int rsvd_53_62: 10;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv3h_rh_gam_alias210_overlay_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int base: 8;
|
|
+ long unsigned int rsvd_32_47: 16;
|
|
+ long unsigned int m_alias: 5;
|
|
+ long unsigned int rsvd_53_62: 10;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv4h_rh_gam_alias210_overlay_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int base: 8;
|
|
+ long unsigned int rsvd_32_47: 16;
|
|
+ long unsigned int m_alias: 5;
|
|
+ long unsigned int rsvd_53_62: 10;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
|
|
+ long unsigned int v;
|
|
+ struct uvh_rh_gam_alias210_overlay_config_2_mmr_s s;
|
|
+ struct uv1h_rh_gam_alias210_overlay_config_2_mmr_s s1;
|
|
+ struct uvxh_rh_gam_alias210_overlay_config_2_mmr_s sx;
|
|
+ struct uv2h_rh_gam_alias210_overlay_config_2_mmr_s s2;
|
|
+ struct uv3h_rh_gam_alias210_overlay_config_2_mmr_s s3;
|
|
+ struct uv4h_rh_gam_alias210_overlay_config_2_mmr_s s4;
|
|
+};
|
|
+
|
|
+struct uvh_rh_gam_alias210_redirect_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int dest_base: 22;
|
|
+ long unsigned int rsvd_46_63: 18;
|
|
+};
|
|
+
|
|
+struct uv1h_rh_gam_alias210_redirect_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int dest_base: 22;
|
|
+ long unsigned int rsvd_46_63: 18;
|
|
+};
|
|
+
|
|
+struct uvxh_rh_gam_alias210_redirect_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int dest_base: 22;
|
|
+ long unsigned int rsvd_46_63: 18;
|
|
+};
|
|
+
|
|
+struct uv2h_rh_gam_alias210_redirect_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int dest_base: 22;
|
|
+ long unsigned int rsvd_46_63: 18;
|
|
+};
|
|
+
|
|
+struct uv3h_rh_gam_alias210_redirect_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int dest_base: 22;
|
|
+ long unsigned int rsvd_46_63: 18;
|
|
+};
|
|
+
|
|
+struct uv4h_rh_gam_alias210_redirect_config_2_mmr_s {
|
|
+ long unsigned int rsvd_0_23: 24;
|
|
+ long unsigned int dest_base: 22;
|
|
+ long unsigned int rsvd_46_63: 18;
|
|
+};
|
|
+
|
|
+union uvh_rh_gam_alias210_redirect_config_2_mmr_u {
|
|
+ long unsigned int v;
|
|
+ struct uvh_rh_gam_alias210_redirect_config_2_mmr_s s;
|
|
+ struct uv1h_rh_gam_alias210_redirect_config_2_mmr_s s1;
|
|
+ struct uvxh_rh_gam_alias210_redirect_config_2_mmr_s sx;
|
|
+ struct uv2h_rh_gam_alias210_redirect_config_2_mmr_s s2;
|
|
+ struct uv3h_rh_gam_alias210_redirect_config_2_mmr_s s3;
|
|
+ struct uv4h_rh_gam_alias210_redirect_config_2_mmr_s s4;
|
|
+};
|
|
+
|
|
+struct uvh_rh_gam_config_mmr_s {
|
|
+ long unsigned int rsvd_0_5: 6;
|
|
+ long unsigned int n_skt: 4;
|
|
+ long unsigned int rsvd_10_63: 54;
|
|
+};
|
|
+
|
|
+struct uv1h_rh_gam_config_mmr_s {
|
|
+ long unsigned int m_skt: 6;
|
|
+ long unsigned int n_skt: 4;
|
|
+ long unsigned int rsvd_10_11: 2;
|
|
+ long unsigned int mmiol_cfg: 1;
|
|
+ long unsigned int rsvd_13_63: 51;
|
|
+};
|
|
+
|
|
+struct uvxh_rh_gam_config_mmr_s {
|
|
+ long unsigned int rsvd_0_5: 6;
|
|
+ long unsigned int n_skt: 4;
|
|
+ long unsigned int rsvd_10_63: 54;
|
|
+};
|
|
+
|
|
+struct uv2h_rh_gam_config_mmr_s {
|
|
+ long unsigned int m_skt: 6;
|
|
+ long unsigned int n_skt: 4;
|
|
+ long unsigned int rsvd_10_63: 54;
|
|
+};
|
|
+
|
|
+struct uv3h_rh_gam_config_mmr_s {
|
|
+ long unsigned int m_skt: 6;
|
|
+ long unsigned int n_skt: 4;
|
|
+ long unsigned int rsvd_10_63: 54;
|
|
+};
|
|
+
|
|
+struct uv4h_rh_gam_config_mmr_s {
|
|
+ long unsigned int rsvd_0_5: 6;
|
|
+ long unsigned int n_skt: 4;
|
|
+ long unsigned int rsvd_10_63: 54;
|
|
+};
|
|
+
|
|
+union uvh_rh_gam_config_mmr_u {
|
|
+ long unsigned int v;
|
|
+ struct uvh_rh_gam_config_mmr_s s;
|
|
+ struct uv1h_rh_gam_config_mmr_s s1;
|
|
+ struct uvxh_rh_gam_config_mmr_s sx;
|
|
+ struct uv2h_rh_gam_config_mmr_s s2;
|
|
+ struct uv3h_rh_gam_config_mmr_s s3;
|
|
+ struct uv4h_rh_gam_config_mmr_s s4;
|
|
+};
|
|
+
|
|
+struct uvh_rh_gam_gru_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_51: 52;
|
|
+ long unsigned int n_gru: 4;
|
|
+ long unsigned int rsvd_56_62: 7;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv1h_rh_gam_gru_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_27: 28;
|
|
+ long unsigned int base: 18;
|
|
+ long unsigned int rsvd_46_47: 2;
|
|
+ long unsigned int gr4: 1;
|
|
+ long unsigned int rsvd_49_51: 3;
|
|
+ long unsigned int n_gru: 4;
|
|
+ long unsigned int rsvd_56_62: 7;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uvxh_rh_gam_gru_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_45: 46;
|
|
+ long unsigned int rsvd_46_51: 6;
|
|
+ long unsigned int n_gru: 4;
|
|
+ long unsigned int rsvd_56_62: 7;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv2h_rh_gam_gru_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_27: 28;
|
|
+ long unsigned int base: 18;
|
|
+ long unsigned int rsvd_46_51: 6;
|
|
+ long unsigned int n_gru: 4;
|
|
+ long unsigned int rsvd_56_62: 7;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv3h_rh_gam_gru_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_27: 28;
|
|
+ long unsigned int base: 18;
|
|
+ long unsigned int rsvd_46_51: 6;
|
|
+ long unsigned int n_gru: 4;
|
|
+ long unsigned int rsvd_56_61: 6;
|
|
+ long unsigned int mode: 1;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv4h_rh_gam_gru_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_24: 25;
|
|
+ long unsigned int undef_25: 1;
|
|
+ long unsigned int base: 20;
|
|
+ long unsigned int rsvd_46_51: 6;
|
|
+ long unsigned int n_gru: 4;
|
|
+ long unsigned int rsvd_56_62: 7;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+union uvh_rh_gam_gru_overlay_config_mmr_u {
|
|
+ long unsigned int v;
|
|
+ struct uvh_rh_gam_gru_overlay_config_mmr_s s;
|
|
+ struct uv1h_rh_gam_gru_overlay_config_mmr_s s1;
|
|
+ struct uvxh_rh_gam_gru_overlay_config_mmr_s sx;
|
|
+ struct uv2h_rh_gam_gru_overlay_config_mmr_s s2;
|
|
+ struct uv3h_rh_gam_gru_overlay_config_mmr_s s3;
|
|
+ struct uv4h_rh_gam_gru_overlay_config_mmr_s s4;
|
|
+};
|
|
+
|
|
+struct uv1h_rh_gam_mmioh_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_29: 30;
|
|
+ long unsigned int base: 16;
|
|
+ long unsigned int m_io: 6;
|
|
+ long unsigned int n_io: 4;
|
|
+ long unsigned int rsvd_56_62: 7;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv2h_rh_gam_mmioh_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_26: 27;
|
|
+ long unsigned int base: 19;
|
|
+ long unsigned int m_io: 6;
|
|
+ long unsigned int n_io: 4;
|
|
+ long unsigned int rsvd_56_62: 7;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+union uvh_rh_gam_mmioh_overlay_config_mmr_u {
|
|
+ long unsigned int v;
|
|
+ struct uv1h_rh_gam_mmioh_overlay_config_mmr_s s1;
|
|
+ struct uv2h_rh_gam_mmioh_overlay_config_mmr_s s2;
|
|
+};
|
|
+
|
|
+struct uvh_rh_gam_mmr_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_25: 26;
|
|
+ long unsigned int base: 20;
|
|
+ long unsigned int rsvd_46_62: 17;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv1h_rh_gam_mmr_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_25: 26;
|
|
+ long unsigned int base: 20;
|
|
+ long unsigned int dual_hub: 1;
|
|
+ long unsigned int rsvd_47_62: 16;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uvxh_rh_gam_mmr_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_25: 26;
|
|
+ long unsigned int base: 20;
|
|
+ long unsigned int rsvd_46_62: 17;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv2h_rh_gam_mmr_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_25: 26;
|
|
+ long unsigned int base: 20;
|
|
+ long unsigned int rsvd_46_62: 17;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv3h_rh_gam_mmr_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_25: 26;
|
|
+ long unsigned int base: 20;
|
|
+ long unsigned int rsvd_46_62: 17;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+struct uv4h_rh_gam_mmr_overlay_config_mmr_s {
|
|
+ long unsigned int rsvd_0_25: 26;
|
|
+ long unsigned int base: 20;
|
|
+ long unsigned int rsvd_46_62: 17;
|
|
+ long unsigned int enable: 1;
|
|
+};
|
|
+
|
|
+union uvh_rh_gam_mmr_overlay_config_mmr_u {
|
|
+ long unsigned int v;
|
|
+ struct uvh_rh_gam_mmr_overlay_config_mmr_s s;
|
|
+ struct uv1h_rh_gam_mmr_overlay_config_mmr_s s1;
|
|
+ struct uvxh_rh_gam_mmr_overlay_config_mmr_s sx;
|
|
+ struct uv2h_rh_gam_mmr_overlay_config_mmr_s s2;
|
|
+ struct uv3h_rh_gam_mmr_overlay_config_mmr_s s3;
|
|
+ struct uv4h_rh_gam_mmr_overlay_config_mmr_s s4;
|
|
+};
|
|
+
|
|
+struct uv1h_lb_target_physical_apic_id_mask_s {
|
|
+ long unsigned int bit_enables: 32;
|
|
+ long unsigned int rsvd_32_63: 32;
|
|
+};
|
|
+
|
|
+union uv1h_lb_target_physical_apic_id_mask_u {
|
|
+ long unsigned int v;
|
|
+ struct uv1h_lb_target_physical_apic_id_mask_s s1;
|
|
+};
|
|
+
|
|
+struct uv3h_gr0_gam_gr_config_s {
|
|
+ long unsigned int m_skt: 6;
|
|
+ long unsigned int undef_6_9: 4;
|
|
+ long unsigned int subspace: 1;
|
|
+ long unsigned int reserved: 53;
|
|
+};
|
|
+
|
|
+union uv3h_gr0_gam_gr_config_u {
|
|
+ long unsigned int v;
|
|
+ struct uv3h_gr0_gam_gr_config_s s3;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BIOS_STATUS_MORE_PASSES = 1,
|
|
+ BIOS_STATUS_SUCCESS = 0,
|
|
+ BIOS_STATUS_UNIMPLEMENTED = -38,
|
|
+ BIOS_STATUS_EINVAL = -22,
|
|
+ BIOS_STATUS_UNAVAIL = -16,
|
|
+ BIOS_STATUS_ABORT = -4,
|
|
+};
|
|
+
|
|
+struct uv_gam_parameters {
|
|
+ u64 mmr_base;
|
|
+ u64 gru_base;
|
|
+ u8 mmr_shift;
|
|
+ u8 gru_shift;
|
|
+ u8 gpa_shift;
|
|
+ u8 unused1;
|
|
+};
|
|
+
|
|
+struct uv_gam_range_entry {
|
|
+ char type;
|
|
+ char unused1;
|
|
+ u16 nasid;
|
|
+ u16 sockid;
|
|
+ u16 pnode;
|
|
+ u32 unused2;
|
|
+ u32 limit;
|
|
+};
|
|
+
|
|
+struct uv_systab {
|
|
+ char signature[4];
|
|
+ u32 revision;
|
|
+ u64 function;
|
|
+ u32 size;
|
|
+ struct {
|
|
+ u32 type: 8;
|
|
+ u32 offset: 24;
|
|
+ } entry[1];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BIOS_FREQ_BASE_PLATFORM = 0,
|
|
+ BIOS_FREQ_BASE_INTERVAL_TIMER = 1,
|
|
+ BIOS_FREQ_BASE_REALTIME_CLOCK = 2,
|
|
+};
|
|
+
|
|
+struct uv_scir_s {
|
|
+ struct timer_list timer;
|
|
+ long unsigned int offset;
|
|
+ long unsigned int last;
|
|
+ long unsigned int idle_on;
|
|
+ long unsigned int idle_off;
|
|
+ unsigned char state;
|
|
+ unsigned char enabled;
|
|
+};
|
|
+
|
|
+struct uv_gam_range_s {
|
|
+ u32 limit;
|
|
+ u16 nasid;
|
|
+ s8 base;
|
|
+ u8 reserved;
|
|
+};
|
|
+
|
|
+struct uv_hub_info_s {
|
|
+ long unsigned int global_mmr_base;
|
|
+ long unsigned int global_mmr_shift;
|
|
+ long unsigned int gpa_mask;
|
|
+ short unsigned int *socket_to_node;
|
|
+ short unsigned int *socket_to_pnode;
|
|
+ short unsigned int *pnode_to_socket;
|
|
+ struct uv_gam_range_s *gr_table;
|
|
+ short unsigned int min_socket;
|
|
+ short unsigned int min_pnode;
|
|
+ unsigned char m_val;
|
|
+ unsigned char n_val;
|
|
+ unsigned char gr_table_len;
|
|
+ unsigned char hub_revision;
|
|
+ unsigned char apic_pnode_shift;
|
|
+ unsigned char gpa_shift;
|
|
+ unsigned char m_shift;
|
|
+ unsigned char n_lshift;
|
|
+ unsigned int gnode_extra;
|
|
+ long unsigned int gnode_upper;
|
|
+ long unsigned int lowmem_remap_top;
|
|
+ long unsigned int lowmem_remap_base;
|
|
+ long unsigned int global_gru_base;
|
|
+ long unsigned int global_gru_shift;
|
|
+ short unsigned int pnode;
|
|
+ short unsigned int pnode_mask;
|
|
+ short unsigned int coherency_domain_number;
|
|
+ short unsigned int numa_blade_id;
|
|
+ short unsigned int nr_possible_cpus;
|
|
+ short unsigned int nr_online_cpus;
|
|
+ short int memory_nid;
|
|
+};
|
|
+
|
|
+struct uv_cpu_info_s {
|
|
+ void *p_uv_hub_info;
|
|
+ unsigned char blade_cpu_id;
|
|
+ struct uv_scir_s scir;
|
|
+};
|
|
+
|
|
+struct uvh_apicid_s {
|
|
+ long unsigned int local_apic_mask: 24;
|
|
+ long unsigned int local_apic_shift: 5;
|
|
+ long unsigned int unused1: 3;
|
|
+ long unsigned int pnode_mask: 24;
|
|
+ long unsigned int pnode_shift: 5;
|
|
+ long unsigned int unused2: 3;
|
|
+};
|
|
+
|
|
+union uvh_apicid {
|
|
+ long unsigned int v;
|
|
+ struct uvh_apicid_s s;
|
|
+};
|
|
+
|
|
+struct uv_hub_nmi_s {
|
|
+ raw_spinlock_t nmi_lock;
|
|
+ atomic_t in_nmi;
|
|
+ atomic_t cpu_owner;
|
|
+ atomic_t read_mmr_count;
|
|
+ atomic_t nmi_count;
|
|
+ long unsigned int nmi_value;
|
|
+ bool hub_present;
|
|
+ bool pch_owner;
|
|
+};
|
|
+
|
|
+struct uv_cpu_nmi_s {
|
|
+ struct uv_hub_nmi_s *hub;
|
|
+ int state;
|
|
+ int pinging;
|
|
+ int queries;
|
|
+ int pings;
|
|
+};
|
|
+
|
|
+enum uv_system_type {
|
|
+ UV_NONE = 0,
|
|
+ UV_LEGACY_APIC = 1,
|
|
+ UV_X2APIC = 2,
|
|
+ UV_NON_UNIQUE_APIC = 3,
|
|
+};
|
|
+
|
|
+enum map_type {
|
|
+ map_wb = 0,
|
|
+ map_uc = 1,
|
|
+};
|
|
+
|
|
+struct mn {
|
|
+ unsigned char m_val;
|
|
+ unsigned char n_val;
|
|
+ unsigned char m_shift;
|
|
+ unsigned char n_lshift;
|
|
+};
|
|
+
|
|
+struct cluster_mask {
|
|
+ unsigned int clusterid;
|
|
+ int node;
|
|
+ struct cpumask mask;
|
|
+};
|
|
+
|
|
+typedef u32 pto_T_____13;
|
|
+
|
|
+typedef struct cluster_mask *pto_T_____14;
|
|
+
|
|
+struct dyn_arch_ftrace {};
|
|
+
|
|
+enum {
|
|
+ FTRACE_OPS_FL_ENABLED = 1,
|
|
+ FTRACE_OPS_FL_DYNAMIC = 2,
|
|
+ FTRACE_OPS_FL_SAVE_REGS = 4,
|
|
+ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 8,
|
|
+ FTRACE_OPS_FL_RECURSION_SAFE = 16,
|
|
+ FTRACE_OPS_FL_STUB = 32,
|
|
+ FTRACE_OPS_FL_INITIALIZED = 64,
|
|
+ FTRACE_OPS_FL_DELETED = 128,
|
|
+ FTRACE_OPS_FL_ADDING = 256,
|
|
+ FTRACE_OPS_FL_REMOVING = 512,
|
|
+ FTRACE_OPS_FL_MODIFYING = 1024,
|
|
+ FTRACE_OPS_FL_ALLOC_TRAMP = 2048,
|
|
+ FTRACE_OPS_FL_IPMODIFY = 4096,
|
|
+ FTRACE_OPS_FL_PID = 8192,
|
|
+ FTRACE_OPS_FL_RCU = 16384,
|
|
+ FTRACE_OPS_FL_TRACE_ARRAY = 32768,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FTRACE_FL_ENABLED = -2147483648,
|
|
+ FTRACE_FL_REGS = 1073741824,
|
|
+ FTRACE_FL_REGS_EN = 536870912,
|
|
+ FTRACE_FL_TRAMP = 268435456,
|
|
+ FTRACE_FL_TRAMP_EN = 134217728,
|
|
+ FTRACE_FL_IPMODIFY = 67108864,
|
|
+ FTRACE_FL_DISABLED = 33554432,
|
|
+};
|
|
+
|
|
+struct dyn_ftrace {
|
|
+ long unsigned int ip;
|
|
+ long unsigned int flags;
|
|
+ struct dyn_arch_ftrace arch;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FTRACE_UPDATE_IGNORE = 0,
|
|
+ FTRACE_UPDATE_MAKE_CALL = 1,
|
|
+ FTRACE_UPDATE_MODIFY_CALL = 2,
|
|
+ FTRACE_UPDATE_MAKE_NOP = 3,
|
|
+};
|
|
+
|
|
+union ftrace_code_union {
|
|
+ char code[5];
|
|
+ struct {
|
|
+ unsigned char op;
|
|
+ int offset;
|
|
+ } __attribute__((packed));
|
|
+};
|
|
+
|
|
+union ftrace_op_code_union {
|
|
+ char code[7];
|
|
+ struct {
|
|
+ char op[3];
|
|
+ int offset;
|
|
+ } __attribute__((packed));
|
|
+};
|
|
+
|
|
+struct ftrace_rec_iter;
|
|
+
|
|
+struct klp_func {
|
|
+ const char *old_name;
|
|
+ void *new_func;
|
|
+ long unsigned int old_sympos;
|
|
+ int force;
|
|
+ long unsigned int old_addr;
|
|
+ struct kobject kobj;
|
|
+ struct list_head stack_node;
|
|
+ long unsigned int old_size;
|
|
+ long unsigned int new_size;
|
|
+ bool patched;
|
|
+};
|
|
+
|
|
+struct klp_hook {
|
|
+ void (*hook)();
|
|
+};
|
|
+
|
|
+struct klp_object;
|
|
+
|
|
+struct klp_callbacks {
|
|
+ int (*pre_patch)(struct klp_object *);
|
|
+ void (*post_patch)(struct klp_object *);
|
|
+ void (*pre_unpatch)(struct klp_object *);
|
|
+ void (*post_unpatch)(struct klp_object *);
|
|
+ bool post_unpatch_enabled;
|
|
+};
|
|
+
|
|
+struct klp_object {
|
|
+ const char *name;
|
|
+ struct klp_func *funcs;
|
|
+ struct klp_hook *hooks_load;
|
|
+ struct klp_hook *hooks_unload;
|
|
+ struct klp_callbacks callbacks;
|
|
+ struct kobject kobj;
|
|
+ struct module *mod;
|
|
+ bool patched;
|
|
+};
|
|
+
|
|
+struct klp_patch {
|
|
+ struct module *mod;
|
|
+ struct klp_object *objs;
|
|
+ struct list_head list;
|
|
+ struct kobject kobj;
|
|
+ bool enabled;
|
|
+ struct completion finish;
|
|
+};
|
|
+
|
|
+union klp_code_union {
|
|
+ char code[5];
|
|
+ struct {
|
|
+ unsigned char e9;
|
|
+ int offset;
|
|
+ } __attribute__((packed));
|
|
+};
|
|
+
|
|
+struct klp_func_node {
|
|
+ struct list_head node;
|
|
+ struct list_head func_stack;
|
|
+ long unsigned int old_addr;
|
|
+ unsigned char old_code[5];
|
|
+};
|
|
+
|
|
+struct elf64_rela {
|
|
+ Elf64_Addr r_offset;
|
|
+ Elf64_Xword r_info;
|
|
+ Elf64_Sxword r_addend;
|
|
+};
|
|
+
|
|
+typedef struct elf64_rela Elf64_Rela;
|
|
+
|
|
+struct x86_mapping_info {
|
|
+ void * (*alloc_pgt_page)(void *);
|
|
+ void *context;
|
|
+ long unsigned int page_flag;
|
|
+ long unsigned int offset;
|
|
+ bool direct_gbpages;
|
|
+ long unsigned int kernpg_flag;
|
|
+};
|
|
+
|
|
+struct init_pgtable_data {
|
|
+ struct x86_mapping_info *info;
|
|
+ pgd_t *level4p;
|
|
+};
|
|
+
|
|
+struct elf64_phdr {
|
|
+ Elf64_Word p_type;
|
|
+ Elf64_Word p_flags;
|
|
+ Elf64_Off p_offset;
|
|
+ Elf64_Addr p_vaddr;
|
|
+ Elf64_Addr p_paddr;
|
|
+ Elf64_Xword p_filesz;
|
|
+ Elf64_Xword p_memsz;
|
|
+ Elf64_Xword p_align;
|
|
+};
|
|
+
|
|
+typedef struct elf64_phdr Elf64_Phdr;
|
|
+
|
|
+struct kexec_buf {
|
|
+ struct kimage *image;
|
|
+ void *buffer;
|
|
+ long unsigned int bufsz;
|
|
+ long unsigned int mem;
|
|
+ long unsigned int memsz;
|
|
+ long unsigned int buf_align;
|
|
+ long unsigned int buf_min;
|
|
+ long unsigned int buf_max;
|
|
+ bool top_down;
|
|
+};
|
|
+
|
|
+struct crash_mem_range {
|
|
+ u64 start;
|
|
+ u64 end;
|
|
+};
|
|
+
|
|
+struct crash_mem {
|
|
+ unsigned int max_nr_ranges;
|
|
+ unsigned int nr_ranges;
|
|
+ struct crash_mem_range ranges[0];
|
|
+};
|
|
+
|
|
+struct crash_memmap_data {
|
|
+ struct boot_params *params;
|
|
+ unsigned int type;
|
|
+};
|
|
+
|
|
+struct kexec_entry64_regs {
|
|
+ uint64_t rax;
|
|
+ uint64_t rcx;
|
|
+ uint64_t rdx;
|
|
+ uint64_t rbx;
|
|
+ uint64_t rsp;
|
|
+ uint64_t rbp;
|
|
+ uint64_t rsi;
|
|
+ uint64_t rdi;
|
|
+ uint64_t r8;
|
|
+ uint64_t r9;
|
|
+ uint64_t r10;
|
|
+ uint64_t r11;
|
|
+ uint64_t r12;
|
|
+ uint64_t r13;
|
|
+ uint64_t r14;
|
|
+ uint64_t r15;
|
|
+ uint64_t rip;
|
|
+};
|
|
+
|
|
+enum key_being_used_for {
|
|
+ VERIFYING_MODULE_SIGNATURE = 0,
|
|
+ VERIFYING_FIRMWARE_SIGNATURE = 1,
|
|
+ VERIFYING_KEXEC_PE_SIGNATURE = 2,
|
|
+ VERIFYING_KEY_SIGNATURE = 3,
|
|
+ VERIFYING_KEY_SELF_SIGNATURE = 4,
|
|
+ VERIFYING_UNSPECIFIED_SIGNATURE = 5,
|
|
+ NR__KEY_BEING_USED_FOR = 6,
|
|
+};
|
|
+
|
|
+struct efi_setup_data {
|
|
+ u64 fw_vendor;
|
|
+ u64 runtime;
|
|
+ u64 tables;
|
|
+ u64 smbios;
|
|
+ u64 reserved[8];
|
|
+};
|
|
+
|
|
+struct bzimage64_data {
|
|
+ void *bootparams_buf;
|
|
+};
|
|
+
|
|
+struct kretprobe_instance;
|
|
+
|
|
+typedef int (*kretprobe_handler_t)(struct kretprobe_instance *, struct pt_regs *);
|
|
+
|
|
+struct kretprobe;
|
|
+
|
|
+struct kretprobe_instance {
|
|
+ struct hlist_node hlist;
|
|
+ struct kretprobe *rp;
|
|
+ kprobe_opcode_t *ret_addr;
|
|
+ struct task_struct *task;
|
|
+ void *fp;
|
|
+ char data[0];
|
|
+};
|
|
+
|
|
+struct kretprobe {
|
|
+ struct kprobe kp;
|
|
+ kretprobe_handler_t handler;
|
|
+ kretprobe_handler_t entry_handler;
|
|
+ int maxactive;
|
|
+ int nmissed;
|
|
+ size_t data_size;
|
|
+ struct hlist_head free_instances;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+typedef struct kprobe *pto_T_____15;
|
|
+
|
|
+struct __arch_relative_insn {
|
|
+ u8 op;
|
|
+ s32 raddr;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct arch_optimized_insn {
|
|
+ kprobe_opcode_t copied_insn[4];
|
|
+ kprobe_opcode_t *insn;
|
|
+ size_t size;
|
|
+};
|
|
+
|
|
+struct optimized_kprobe {
|
|
+ struct kprobe kp;
|
|
+ struct list_head list;
|
|
+ struct arch_optimized_insn optinsn;
|
|
+};
|
|
+
|
|
+enum regnames {
|
|
+ GDB_AX = 0,
|
|
+ GDB_BX = 1,
|
|
+ GDB_CX = 2,
|
|
+ GDB_DX = 3,
|
|
+ GDB_SI = 4,
|
|
+ GDB_DI = 5,
|
|
+ GDB_BP = 6,
|
|
+ GDB_SP = 7,
|
|
+ GDB_R8 = 8,
|
|
+ GDB_R9 = 9,
|
|
+ GDB_R10 = 10,
|
|
+ GDB_R11 = 11,
|
|
+ GDB_R12 = 12,
|
|
+ GDB_R13 = 13,
|
|
+ GDB_R14 = 14,
|
|
+ GDB_R15 = 15,
|
|
+ GDB_PC = 16,
|
|
+ GDB_PS = 17,
|
|
+ GDB_CS = 18,
|
|
+ GDB_SS = 19,
|
|
+ GDB_DS = 20,
|
|
+ GDB_ES = 21,
|
|
+ GDB_FS = 22,
|
|
+ GDB_GS = 23,
|
|
+};
|
|
+
|
|
+enum kgdb_bpstate {
|
|
+ BP_UNDEFINED = 0,
|
|
+ BP_REMOVED = 1,
|
|
+ BP_SET = 2,
|
|
+ BP_ACTIVE = 3,
|
|
+};
|
|
+
|
|
+struct kgdb_bkpt {
|
|
+ long unsigned int bpt_addr;
|
|
+ unsigned char saved_instr[1];
|
|
+ enum kgdb_bptype type;
|
|
+ enum kgdb_bpstate state;
|
|
+};
|
|
+
|
|
+struct hw_breakpoint {
|
|
+ unsigned int enabled;
|
|
+ long unsigned int addr;
|
|
+ int len;
|
|
+ int type;
|
|
+ struct perf_event **pev;
|
|
+};
|
|
+
|
|
+struct hpet_timer {
|
|
+ u64 hpet_config;
|
|
+ union {
|
|
+ u64 _hpet_hc64;
|
|
+ u32 _hpet_hc32;
|
|
+ long unsigned int _hpet_compare;
|
|
+ } _u1;
|
|
+ u64 hpet_fsb[2];
|
|
+};
|
|
+
|
|
+struct hpet {
|
|
+ u64 hpet_cap;
|
|
+ u64 res0;
|
|
+ u64 hpet_config;
|
|
+ u64 res1;
|
|
+ u64 hpet_isr;
|
|
+ u64 res2[25];
|
|
+ union {
|
|
+ u64 _hpet_mc64;
|
|
+ u32 _hpet_mc32;
|
|
+ long unsigned int _hpet_mc;
|
|
+ } _u0;
|
|
+ u64 res3;
|
|
+ struct hpet_timer hpet_timers[1];
|
|
+};
|
|
+
|
|
+struct hpet_data {
|
|
+ long unsigned int hd_phys_address;
|
|
+ void *hd_address;
|
|
+ short unsigned int hd_nirqs;
|
|
+ unsigned int hd_state;
|
|
+ unsigned int hd_irq[32];
|
|
+};
|
|
+
|
|
+typedef irqreturn_t (*rtc_irq_handler)(int, void *);
|
|
+
|
|
+struct hpet_dev___2 {
|
|
+ struct clock_event_device evt;
|
|
+ unsigned int num;
|
|
+ int cpu;
|
|
+ unsigned int irq;
|
|
+ unsigned int flags;
|
|
+ char name[10];
|
|
+ long: 48;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct hpet_work_struct {
|
|
+ struct delayed_work work;
|
|
+ struct completion complete;
|
|
+};
|
|
+
|
|
+union hpet_lock {
|
|
+ struct {
|
|
+ arch_spinlock_t lock;
|
|
+ u32 value;
|
|
+ };
|
|
+ u64 lockval;
|
|
+};
|
|
+
|
|
+struct amd_northbridge_info {
|
|
+ u16 num;
|
|
+ u64 flags;
|
|
+ struct amd_northbridge *nb;
|
|
+};
|
|
+
|
|
+struct kvm_steal_time {
|
|
+ __u64 steal;
|
|
+ __u32 version;
|
|
+ __u32 flags;
|
|
+ __u8 preempted;
|
|
+ __u8 u8_pad[3];
|
|
+ __u32 pad[11];
|
|
+};
|
|
+
|
|
+struct kvm_vcpu_pv_apf_data {
|
|
+ __u32 reason;
|
|
+ __u8 pad[60];
|
|
+ __u32 enabled;
|
|
+};
|
|
+
|
|
+struct swait_queue_head {
|
|
+ raw_spinlock_t lock;
|
|
+ struct list_head task_list;
|
|
+};
|
|
+
|
|
+struct swait_queue {
|
|
+ struct task_struct *task;
|
|
+ struct list_head task_list;
|
|
+};
|
|
+
|
|
+struct kvm_task_sleep_node {
|
|
+ struct hlist_node link;
|
|
+ struct swait_queue_head wq;
|
|
+ u32 token;
|
|
+ int cpu;
|
|
+ bool halted;
|
|
+};
|
|
+
|
|
+struct kvm_task_sleep_head {
|
|
+ raw_spinlock_t lock;
|
|
+ struct hlist_head list;
|
|
+};
|
|
+
|
|
+typedef __u32 pto_T_____16;
|
|
+
|
|
+typedef struct pvclock_vsyscall_time_info *pto_T_____17;
|
|
+
|
|
+typedef struct ldttss_desc ldt_desc;
|
|
+
|
|
+struct paravirt_patch_template {
|
|
+ struct pv_init_ops pv_init_ops;
|
|
+ struct pv_time_ops pv_time_ops;
|
|
+ struct pv_cpu_ops pv_cpu_ops;
|
|
+ struct pv_irq_ops pv_irq_ops;
|
|
+ struct pv_mmu_ops pv_mmu_ops;
|
|
+ struct pv_lock_ops pv_lock_ops;
|
|
+};
|
|
+
|
|
+struct branch {
|
|
+ unsigned char opcode;
|
|
+ u32 delta;
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef enum paravirt_lazy_mode pto_T_____18;
|
|
+
|
|
+typedef long unsigned int ulong;
|
|
+
|
|
+struct scan_area {
|
|
+ u64 addr;
|
|
+ u64 size;
|
|
+};
|
|
+
|
|
+struct uprobe_xol_ops;
|
|
+
|
|
+struct arch_uprobe {
|
|
+ union {
|
|
+ u8 insn[16];
|
|
+ u8 ixol[16];
|
|
+ };
|
|
+ const struct uprobe_xol_ops *ops;
|
|
+ union {
|
|
+ struct {
|
|
+ s32 offs;
|
|
+ u8 ilen;
|
|
+ u8 opc1;
|
|
+ } branch;
|
|
+ struct {
|
|
+ u8 fixups;
|
|
+ u8 ilen;
|
|
+ } defparam;
|
|
+ struct {
|
|
+ u8 reg_offset;
|
|
+ u8 ilen;
|
|
+ } push;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct uprobe_xol_ops {
|
|
+ bool (*emulate)(struct arch_uprobe *, struct pt_regs *);
|
|
+ int (*pre_xol)(struct arch_uprobe *, struct pt_regs *);
|
|
+ int (*post_xol)(struct arch_uprobe *, struct pt_regs *);
|
|
+ void (*abort)(struct arch_uprobe *, struct pt_regs *);
|
|
+};
|
|
+
|
|
+enum rp_check {
|
|
+ RP_CHECK_CALL = 0,
|
|
+ RP_CHECK_CHAIN_CALL = 1,
|
|
+ RP_CHECK_RET = 2,
|
|
+};
|
|
+
|
|
+enum dev_prop_type {
|
|
+ DEV_PROP_U8 = 0,
|
|
+ DEV_PROP_U16 = 1,
|
|
+ DEV_PROP_U32 = 2,
|
|
+ DEV_PROP_U64 = 3,
|
|
+ DEV_PROP_STRING = 4,
|
|
+ DEV_PROP_MAX = 5,
|
|
+};
|
|
+
|
|
+struct property_entry {
|
|
+ const char *name;
|
|
+ size_t length;
|
|
+ bool is_array;
|
|
+ enum dev_prop_type type;
|
|
+ union {
|
|
+ union {
|
|
+ const u8 *u8_data;
|
|
+ const u16 *u16_data;
|
|
+ const u32 *u32_data;
|
|
+ const u64 *u64_data;
|
|
+ const char * const *str;
|
|
+ } pointer;
|
|
+ union {
|
|
+ u8 u8_data;
|
|
+ u16 u16_data;
|
|
+ u32 u32_data;
|
|
+ u64 u64_data;
|
|
+ const char *str;
|
|
+ } value;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct fb_fix_screeninfo {
|
|
+ char id[16];
|
|
+ long unsigned int smem_start;
|
|
+ __u32 smem_len;
|
|
+ __u32 type;
|
|
+ __u32 type_aux;
|
|
+ __u32 visual;
|
|
+ __u16 xpanstep;
|
|
+ __u16 ypanstep;
|
|
+ __u16 ywrapstep;
|
|
+ __u32 line_length;
|
|
+ long unsigned int mmio_start;
|
|
+ __u32 mmio_len;
|
|
+ __u32 accel;
|
|
+ __u16 capabilities;
|
|
+ __u16 reserved[2];
|
|
+};
|
|
+
|
|
+struct fb_bitfield {
|
|
+ __u32 offset;
|
|
+ __u32 length;
|
|
+ __u32 msb_right;
|
|
+};
|
|
+
|
|
+struct fb_var_screeninfo {
|
|
+ __u32 xres;
|
|
+ __u32 yres;
|
|
+ __u32 xres_virtual;
|
|
+ __u32 yres_virtual;
|
|
+ __u32 xoffset;
|
|
+ __u32 yoffset;
|
|
+ __u32 bits_per_pixel;
|
|
+ __u32 grayscale;
|
|
+ struct fb_bitfield red;
|
|
+ struct fb_bitfield green;
|
|
+ struct fb_bitfield blue;
|
|
+ struct fb_bitfield transp;
|
|
+ __u32 nonstd;
|
|
+ __u32 activate;
|
|
+ __u32 height;
|
|
+ __u32 width;
|
|
+ __u32 accel_flags;
|
|
+ __u32 pixclock;
|
|
+ __u32 left_margin;
|
|
+ __u32 right_margin;
|
|
+ __u32 upper_margin;
|
|
+ __u32 lower_margin;
|
|
+ __u32 hsync_len;
|
|
+ __u32 vsync_len;
|
|
+ __u32 sync;
|
|
+ __u32 vmode;
|
|
+ __u32 rotate;
|
|
+ __u32 colorspace;
|
|
+ __u32 reserved[4];
|
|
+};
|
|
+
|
|
+struct fb_cmap {
|
|
+ __u32 start;
|
|
+ __u32 len;
|
|
+ __u16 *red;
|
|
+ __u16 *green;
|
|
+ __u16 *blue;
|
|
+ __u16 *transp;
|
|
+};
|
|
+
|
|
+struct fb_copyarea {
|
|
+ __u32 dx;
|
|
+ __u32 dy;
|
|
+ __u32 width;
|
|
+ __u32 height;
|
|
+ __u32 sx;
|
|
+ __u32 sy;
|
|
+};
|
|
+
|
|
+struct fb_fillrect {
|
|
+ __u32 dx;
|
|
+ __u32 dy;
|
|
+ __u32 width;
|
|
+ __u32 height;
|
|
+ __u32 color;
|
|
+ __u32 rop;
|
|
+};
|
|
+
|
|
+struct fb_image {
|
|
+ __u32 dx;
|
|
+ __u32 dy;
|
|
+ __u32 width;
|
|
+ __u32 height;
|
|
+ __u32 fg_color;
|
|
+ __u32 bg_color;
|
|
+ __u8 depth;
|
|
+ const char *data;
|
|
+ struct fb_cmap cmap;
|
|
+};
|
|
+
|
|
+struct fbcurpos {
|
|
+ __u16 x;
|
|
+ __u16 y;
|
|
+};
|
|
+
|
|
+struct fb_cursor {
|
|
+ __u16 set;
|
|
+ __u16 enable;
|
|
+ __u16 rop;
|
|
+ const char *mask;
|
|
+ struct fbcurpos hot;
|
|
+ struct fb_image image;
|
|
+};
|
|
+
|
|
+enum backlight_type {
|
|
+ BACKLIGHT_RAW = 1,
|
|
+ BACKLIGHT_PLATFORM = 2,
|
|
+ BACKLIGHT_FIRMWARE = 3,
|
|
+ BACKLIGHT_TYPE_MAX = 4,
|
|
+};
|
|
+
|
|
+struct backlight_device;
|
|
+
|
|
+struct fb_info;
|
|
+
|
|
+struct backlight_ops {
|
|
+ unsigned int options;
|
|
+ int (*update_status)(struct backlight_device *);
|
|
+ int (*get_brightness)(struct backlight_device *);
|
|
+ int (*check_fb)(struct backlight_device *, struct fb_info *);
|
|
+};
|
|
+
|
|
+struct backlight_properties {
|
|
+ int brightness;
|
|
+ int max_brightness;
|
|
+ int power;
|
|
+ int fb_blank;
|
|
+ enum backlight_type type;
|
|
+ unsigned int state;
|
|
+};
|
|
+
|
|
+struct backlight_device {
|
|
+ struct backlight_properties props;
|
|
+ struct mutex update_lock;
|
|
+ struct mutex ops_lock;
|
|
+ const struct backlight_ops *ops;
|
|
+ struct notifier_block fb_notif;
|
|
+ struct list_head entry;
|
|
+ struct device dev;
|
|
+ bool fb_bl_on[32];
|
|
+ int use_count;
|
|
+};
|
|
+
|
|
+struct fb_chroma {
|
|
+ __u32 redx;
|
|
+ __u32 greenx;
|
|
+ __u32 bluex;
|
|
+ __u32 whitex;
|
|
+ __u32 redy;
|
|
+ __u32 greeny;
|
|
+ __u32 bluey;
|
|
+ __u32 whitey;
|
|
+};
|
|
+
|
|
+struct fb_videomode;
|
|
+
|
|
+struct fb_monspecs {
|
|
+ struct fb_chroma chroma;
|
|
+ struct fb_videomode *modedb;
|
|
+ __u8 manufacturer[4];
|
|
+ __u8 monitor[14];
|
|
+ __u8 serial_no[14];
|
|
+ __u8 ascii[14];
|
|
+ __u32 modedb_len;
|
|
+ __u32 model;
|
|
+ __u32 serial;
|
|
+ __u32 year;
|
|
+ __u32 week;
|
|
+ __u32 hfmin;
|
|
+ __u32 hfmax;
|
|
+ __u32 dclkmin;
|
|
+ __u32 dclkmax;
|
|
+ __u16 input;
|
|
+ __u16 dpms;
|
|
+ __u16 signal;
|
|
+ __u16 vfmin;
|
|
+ __u16 vfmax;
|
|
+ __u16 gamma;
|
|
+ __u16 gtf: 1;
|
|
+ __u16 misc;
|
|
+ __u8 version;
|
|
+ __u8 revision;
|
|
+ __u8 max_x;
|
|
+ __u8 max_y;
|
|
+};
|
|
+
|
|
+struct fb_pixmap {
|
|
+ u8 *addr;
|
|
+ u32 size;
|
|
+ u32 offset;
|
|
+ u32 buf_align;
|
|
+ u32 scan_align;
|
|
+ u32 access_align;
|
|
+ u32 flags;
|
|
+ u32 blit_x;
|
|
+ u32 blit_y;
|
|
+ void (*writeio)(struct fb_info *, void *, void *, unsigned int);
|
|
+ void (*readio)(struct fb_info *, void *, void *, unsigned int);
|
|
+};
|
|
+
|
|
+struct fb_deferred_io;
|
|
+
|
|
+struct fb_ops;
|
|
+
|
|
+struct fb_tile_ops;
|
|
+
|
|
+struct apertures_struct;
|
|
+
|
|
+struct fb_info {
|
|
+ atomic_t count;
|
|
+ int node;
|
|
+ int flags;
|
|
+ int fbcon_rotate_hint;
|
|
+ struct mutex lock;
|
|
+ struct mutex mm_lock;
|
|
+ struct fb_var_screeninfo var;
|
|
+ struct fb_fix_screeninfo fix;
|
|
+ struct fb_monspecs monspecs;
|
|
+ struct work_struct queue;
|
|
+ struct fb_pixmap pixmap;
|
|
+ struct fb_pixmap sprite;
|
|
+ struct fb_cmap cmap;
|
|
+ struct list_head modelist;
|
|
+ struct fb_videomode *mode;
|
|
+ struct backlight_device *bl_dev;
|
|
+ struct mutex bl_curve_mutex;
|
|
+ u8 bl_curve[128];
|
|
+ struct delayed_work deferred_work;
|
|
+ struct fb_deferred_io *fbdefio;
|
|
+ struct fb_ops *fbops;
|
|
+ struct device *device;
|
|
+ struct device *dev;
|
|
+ int class_flag;
|
|
+ struct fb_tile_ops *tileops;
|
|
+ union {
|
|
+ char *screen_base;
|
|
+ char *screen_buffer;
|
|
+ };
|
|
+ long unsigned int screen_size;
|
|
+ void *pseudo_palette;
|
|
+ u32 state;
|
|
+ void *fbcon_par;
|
|
+ void *par;
|
|
+ struct apertures_struct *apertures;
|
|
+ bool skip_vt_switch;
|
|
+};
|
|
+
|
|
+struct fb_videomode {
|
|
+ const char *name;
|
|
+ u32 refresh;
|
|
+ u32 xres;
|
|
+ u32 yres;
|
|
+ u32 pixclock;
|
|
+ u32 left_margin;
|
|
+ u32 right_margin;
|
|
+ u32 upper_margin;
|
|
+ u32 lower_margin;
|
|
+ u32 hsync_len;
|
|
+ u32 vsync_len;
|
|
+ u32 sync;
|
|
+ u32 vmode;
|
|
+ u32 flag;
|
|
+};
|
|
+
|
|
+struct fb_blit_caps {
|
|
+ u32 x;
|
|
+ u32 y;
|
|
+ u32 len;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct fb_deferred_io {
|
|
+ long unsigned int delay;
|
|
+ struct mutex lock;
|
|
+ struct list_head pagelist;
|
|
+ void (*first_io)(struct fb_info *);
|
|
+ void (*deferred_io)(struct fb_info *, struct list_head *);
|
|
+};
|
|
+
|
|
+struct fb_ops {
|
|
+ struct module *owner;
|
|
+ int (*fb_open)(struct fb_info *, int);
|
|
+ int (*fb_release)(struct fb_info *, int);
|
|
+ ssize_t (*fb_read)(struct fb_info *, char *, size_t, loff_t *);
|
|
+ ssize_t (*fb_write)(struct fb_info *, const char *, size_t, loff_t *);
|
|
+ int (*fb_check_var)(struct fb_var_screeninfo *, struct fb_info *);
|
|
+ int (*fb_set_par)(struct fb_info *);
|
|
+ int (*fb_setcolreg)(unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, struct fb_info *);
|
|
+ int (*fb_setcmap)(struct fb_cmap *, struct fb_info *);
|
|
+ int (*fb_blank)(int, struct fb_info *);
|
|
+ int (*fb_pan_display)(struct fb_var_screeninfo *, struct fb_info *);
|
|
+ void (*fb_fillrect)(struct fb_info *, const struct fb_fillrect *);
|
|
+ void (*fb_copyarea)(struct fb_info *, const struct fb_copyarea *);
|
|
+ void (*fb_imageblit)(struct fb_info *, const struct fb_image *);
|
|
+ int (*fb_cursor)(struct fb_info *, struct fb_cursor *);
|
|
+ int (*fb_sync)(struct fb_info *);
|
|
+ int (*fb_ioctl)(struct fb_info *, unsigned int, long unsigned int);
|
|
+ int (*fb_compat_ioctl)(struct fb_info *, unsigned int, long unsigned int);
|
|
+ int (*fb_mmap)(struct fb_info *, struct vm_area_struct *);
|
|
+ void (*fb_get_caps)(struct fb_info *, struct fb_blit_caps *, struct fb_var_screeninfo *);
|
|
+ void (*fb_destroy)(struct fb_info *);
|
|
+ int (*fb_debug_enter)(struct fb_info *);
|
|
+ int (*fb_debug_leave)(struct fb_info *);
|
|
+};
|
|
+
|
|
+struct fb_tilemap {
|
|
+ __u32 width;
|
|
+ __u32 height;
|
|
+ __u32 depth;
|
|
+ __u32 length;
|
|
+ const __u8 *data;
|
|
+};
|
|
+
|
|
+struct fb_tilerect {
|
|
+ __u32 sx;
|
|
+ __u32 sy;
|
|
+ __u32 width;
|
|
+ __u32 height;
|
|
+ __u32 index;
|
|
+ __u32 fg;
|
|
+ __u32 bg;
|
|
+ __u32 rop;
|
|
+};
|
|
+
|
|
+struct fb_tilearea {
|
|
+ __u32 sx;
|
|
+ __u32 sy;
|
|
+ __u32 dx;
|
|
+ __u32 dy;
|
|
+ __u32 width;
|
|
+ __u32 height;
|
|
+};
|
|
+
|
|
+struct fb_tileblit {
|
|
+ __u32 sx;
|
|
+ __u32 sy;
|
|
+ __u32 width;
|
|
+ __u32 height;
|
|
+ __u32 fg;
|
|
+ __u32 bg;
|
|
+ __u32 length;
|
|
+ __u32 *indices;
|
|
+};
|
|
+
|
|
+struct fb_tilecursor {
|
|
+ __u32 sx;
|
|
+ __u32 sy;
|
|
+ __u32 mode;
|
|
+ __u32 shape;
|
|
+ __u32 fg;
|
|
+ __u32 bg;
|
|
+};
|
|
+
|
|
+struct fb_tile_ops {
|
|
+ void (*fb_settile)(struct fb_info *, struct fb_tilemap *);
|
|
+ void (*fb_tilecopy)(struct fb_info *, struct fb_tilearea *);
|
|
+ void (*fb_tilefill)(struct fb_info *, struct fb_tilerect *);
|
|
+ void (*fb_tileblit)(struct fb_info *, struct fb_tileblit *);
|
|
+ void (*fb_tilecursor)(struct fb_info *, struct fb_tilecursor *);
|
|
+ int (*fb_get_tilemax)(struct fb_info *);
|
|
+};
|
|
+
|
|
+struct aperture {
|
|
+ resource_size_t base;
|
|
+ resource_size_t size;
|
|
+};
|
|
+
|
|
+struct apertures_struct {
|
|
+ unsigned int count;
|
|
+ struct aperture ranges[0];
|
|
+};
|
|
+
|
|
+struct dmt_videomode {
|
|
+ u32 dmt_id;
|
|
+ u32 std_2byte_code;
|
|
+ u32 cvt_3byte_code;
|
|
+ const struct fb_videomode *mode;
|
|
+};
|
|
+
|
|
+struct simplefb_platform_data {
|
|
+ u32 width;
|
|
+ u32 height;
|
|
+ u32 stride;
|
|
+ const char *format;
|
|
+};
|
|
+
|
|
+struct efifb_dmi_info {
|
|
+ char *optname;
|
|
+ long unsigned int base;
|
|
+ int stride;
|
|
+ int width;
|
|
+ int height;
|
|
+ int flags;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ M_I17 = 0,
|
|
+ M_I20 = 1,
|
|
+ M_I20_SR = 2,
|
|
+ M_I24 = 3,
|
|
+ M_I24_8_1 = 4,
|
|
+ M_I24_10_1 = 5,
|
|
+ M_I27_11_1 = 6,
|
|
+ M_MINI = 7,
|
|
+ M_MINI_3_1 = 8,
|
|
+ M_MINI_4_1 = 9,
|
|
+ M_MB = 10,
|
|
+ M_MB_2 = 11,
|
|
+ M_MB_3 = 12,
|
|
+ M_MB_5_1 = 13,
|
|
+ M_MB_6_1 = 14,
|
|
+ M_MB_7_1 = 15,
|
|
+ M_MB_SR = 16,
|
|
+ M_MBA = 17,
|
|
+ M_MBA_3 = 18,
|
|
+ M_MBP = 19,
|
|
+ M_MBP_2 = 20,
|
|
+ M_MBP_2_2 = 21,
|
|
+ M_MBP_SR = 22,
|
|
+ M_MBP_4 = 23,
|
|
+ M_MBP_5_1 = 24,
|
|
+ M_MBP_5_2 = 25,
|
|
+ M_MBP_5_3 = 26,
|
|
+ M_MBP_6_1 = 27,
|
|
+ M_MBP_6_2 = 28,
|
|
+ M_MBP_7_1 = 29,
|
|
+ M_MBP_8_2 = 30,
|
|
+ M_UNKNOWN = 31,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ OVERRIDE_NONE = 0,
|
|
+ OVERRIDE_BASE = 1,
|
|
+ OVERRIDE_STRIDE = 2,
|
|
+ OVERRIDE_HEIGHT = 4,
|
|
+ OVERRIDE_WIDTH = 8,
|
|
+};
|
|
+
|
|
+enum perf_sample_regs_abi {
|
|
+ PERF_SAMPLE_REGS_ABI_NONE = 0,
|
|
+ PERF_SAMPLE_REGS_ABI_32 = 1,
|
|
+ PERF_SAMPLE_REGS_ABI_64 = 2,
|
|
+};
|
|
+
|
|
+struct __va_list_tag {
|
|
+ unsigned int gp_offset;
|
|
+ unsigned int fp_offset;
|
|
+ void *overflow_arg_area;
|
|
+ void *reg_save_area;
|
|
+};
|
|
+
|
|
+typedef __builtin_va_list __gnuc_va_list;
|
|
+
|
|
+typedef __gnuc_va_list va_list;
|
|
+
|
|
+struct va_format {
|
|
+ const char *fmt;
|
|
+ va_list *va;
|
|
+};
|
|
+
|
|
+enum x86_pf_error_code {
|
|
+ X86_PF_PROT = 1,
|
|
+ X86_PF_WRITE = 2,
|
|
+ X86_PF_USER = 4,
|
|
+ X86_PF_RSVD = 8,
|
|
+ X86_PF_INSTR = 16,
|
|
+ X86_PF_PK = 32,
|
|
+};
|
|
+
|
|
+struct pci_hostbridge_probe {
|
|
+ u32 bus;
|
|
+ u32 slot;
|
|
+ u32 vendor;
|
|
+ u32 device;
|
|
+};
|
|
+
|
|
+enum pg_level {
|
|
+ PG_LEVEL_NONE = 0,
|
|
+ PG_LEVEL_4K = 1,
|
|
+ PG_LEVEL_2M = 2,
|
|
+ PG_LEVEL_1G = 3,
|
|
+ PG_LEVEL_512G = 4,
|
|
+ PG_LEVEL_NUM = 5,
|
|
+};
|
|
+
|
|
+struct trace_print_flags {
|
|
+ long unsigned int mask;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+enum tlb_flush_reason {
|
|
+ TLB_FLUSH_ON_TASK_SWITCH = 0,
|
|
+ TLB_REMOTE_SHOOTDOWN = 1,
|
|
+ TLB_LOCAL_SHOOTDOWN = 2,
|
|
+ TLB_LOCAL_MM_SHOOTDOWN = 3,
|
|
+ TLB_REMOTE_SEND_IPI = 4,
|
|
+ NR_TLB_FLUSH_REASONS = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ REGION_INTERSECTS = 0,
|
|
+ REGION_DISJOINT = 1,
|
|
+ REGION_MIXED = 2,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_tlb_flush {
|
|
+ struct trace_entry ent;
|
|
+ int reason;
|
|
+ long unsigned int pages;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_tlb_flush {};
|
|
+
|
|
+struct map_range {
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+ unsigned int page_size_mask;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12,
|
|
+ SECTION_INFO = 12,
|
|
+ MIX_SECTION_INFO = 13,
|
|
+ NODE_INFO = 14,
|
|
+ MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE = 14,
|
|
+};
|
|
+
|
|
+enum kcore_type {
|
|
+ KCORE_TEXT = 0,
|
|
+ KCORE_VMALLOC = 1,
|
|
+ KCORE_RAM = 2,
|
|
+ KCORE_VMEMMAP = 3,
|
|
+ KCORE_USER = 4,
|
|
+ KCORE_OTHER = 5,
|
|
+ KCORE_REMAP = 6,
|
|
+};
|
|
+
|
|
+struct kcore_list {
|
|
+ struct list_head list;
|
|
+ long unsigned int addr;
|
|
+ long unsigned int vaddr;
|
|
+ size_t size;
|
|
+ int type;
|
|
+};
|
|
+
|
|
+struct hstate {
|
|
+ int next_nid_to_alloc;
|
|
+ int next_nid_to_free;
|
|
+ unsigned int order;
|
|
+ long unsigned int mask;
|
|
+ long unsigned int max_huge_pages;
|
|
+ long unsigned int nr_huge_pages;
|
|
+ long unsigned int free_huge_pages;
|
|
+ long unsigned int resv_huge_pages;
|
|
+ long unsigned int surplus_huge_pages;
|
|
+ long unsigned int nr_overcommit_huge_pages;
|
|
+ struct list_head hugepage_activelist;
|
|
+ struct list_head hugepage_freelists[1024];
|
|
+ unsigned int nr_huge_pages_node[1024];
|
|
+ unsigned int free_huge_pages_node[1024];
|
|
+ unsigned int surplus_huge_pages_node[1024];
|
|
+ unsigned int resv_huge_pages_node[1024];
|
|
+ struct cftype cgroup_files[5];
|
|
+ char name[32];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_x86_exceptions {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int address;
|
|
+ long unsigned int ip;
|
|
+ long unsigned int error_code;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_x86_exceptions {};
|
|
+
|
|
+struct ioremap_mem_flags {
|
|
+ bool system_ram;
|
|
+ bool desc_other;
|
|
+};
|
|
+
|
|
+typedef bool (*ex_handler_t)(const struct exception_table_entry *, struct pt_regs *, int);
|
|
+
|
|
+struct cpa_data {
|
|
+ long unsigned int *vaddr;
|
|
+ pgd_t *pgd;
|
|
+ pgprot_t mask_set;
|
|
+ pgprot_t mask_clr;
|
|
+ long unsigned int numpages;
|
|
+ int flags;
|
|
+ long unsigned int pfn;
|
|
+ unsigned int force_split: 1;
|
|
+ int curpage;
|
|
+ struct page **pages;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ u64 val;
|
|
+} pfn_t;
|
|
+
|
|
+struct memtype {
|
|
+ u64 start;
|
|
+ u64 end;
|
|
+ u64 subtree_max_end;
|
|
+ enum page_cache_mode type;
|
|
+ struct rb_node rb;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PAT_UC = 0,
|
|
+ PAT_WC = 1,
|
|
+ PAT_WT = 4,
|
|
+ PAT_WP = 5,
|
|
+ PAT_WB = 6,
|
|
+ PAT_UC_MINUS = 7,
|
|
+};
|
|
+
|
|
+struct pagerange_state {
|
|
+ long unsigned int cur_pfn;
|
|
+ int ram;
|
|
+ int not_ram;
|
|
+};
|
|
+
|
|
+typedef u16 pto_T_____19;
|
|
+
|
|
+typedef struct mm_struct *pto_T_____20;
|
|
+
|
|
+struct rb_augment_callbacks {
|
|
+ void (*propagate)(struct rb_node *, struct rb_node *);
|
|
+ void (*copy)(struct rb_node *, struct rb_node *);
|
|
+ void (*rotate)(struct rb_node *, struct rb_node *);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MEMTYPE_EXACT_MATCH = 0,
|
|
+ MEMTYPE_END_MATCH = 1,
|
|
+};
|
|
+
|
|
+struct hugepage_subpool {
|
|
+ spinlock_t lock;
|
|
+ long int count;
|
|
+ long int max_hpages;
|
|
+ long int used_hpages;
|
|
+ struct hstate *hstate;
|
|
+ long int min_hpages;
|
|
+ long int rsv_hpages;
|
|
+};
|
|
+
|
|
+struct hugetlbfs_sb_info {
|
|
+ long int max_inodes;
|
|
+ long int free_inodes;
|
|
+ spinlock_t stat_lock;
|
|
+ struct hstate *hstate;
|
|
+ struct hugepage_subpool *spool;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+struct numa_memblk {
|
|
+ u64 start;
|
|
+ u64 end;
|
|
+ int nid;
|
|
+};
|
|
+
|
|
+struct numa_meminfo {
|
|
+ int nr_blks;
|
|
+ struct numa_memblk blk[2048];
|
|
+};
|
|
+
|
|
+struct acpi_srat_cpu_affinity {
|
|
+ struct acpi_subtable_header header;
|
|
+ u8 proximity_domain_lo;
|
|
+ u8 apic_id;
|
|
+ u32 flags;
|
|
+ u8 local_sapic_eid;
|
|
+ u8 proximity_domain_hi[3];
|
|
+ u32 clock_domain;
|
|
+};
|
|
+
|
|
+struct acpi_srat_x2apic_cpu_affinity {
|
|
+ struct acpi_subtable_header header;
|
|
+ u16 reserved;
|
|
+ u32 proximity_domain;
|
|
+ u32 apic_id;
|
|
+ u32 flags;
|
|
+ u32 clock_domain;
|
|
+ u32 reserved2;
|
|
+};
|
|
+
|
|
+struct rnd_state {
|
|
+ __u32 s1;
|
|
+ __u32 s2;
|
|
+ __u32 s3;
|
|
+ __u32 s4;
|
|
+};
|
|
+
|
|
+struct kaslr_memory_region {
|
|
+ long unsigned int *base;
|
|
+ long unsigned int size_tb;
|
|
+};
|
|
+
|
|
+enum pti_mode {
|
|
+ PTI_AUTO = 0,
|
|
+ PTI_FORCE_OFF = 1,
|
|
+ PTI_FORCE_ON = 2,
|
|
+};
|
|
+
|
|
+enum pti_clone_level {
|
|
+ PTI_CLONE_PMD = 0,
|
|
+ PTI_CLONE_PTE = 1,
|
|
+};
|
|
+
|
|
+struct sme_populate_pgd_data {
|
|
+ void *pgtable_area;
|
|
+ pgd_t *pgd;
|
|
+ pmdval_t pmd_flags;
|
|
+ pteval_t pte_flags;
|
|
+ long unsigned int paddr;
|
|
+ long unsigned int vaddr;
|
|
+ long unsigned int vaddr_end;
|
|
+};
|
|
+
|
|
+typedef __u64 __le64;
|
|
+
|
|
+typedef struct {
|
|
+ u64 a;
|
|
+ u64 b;
|
|
+} u128;
|
|
+
|
|
+typedef struct {
|
|
+ __be64 a;
|
|
+ __be64 b;
|
|
+} be128;
|
|
+
|
|
+typedef struct {
|
|
+ __le64 b;
|
|
+ __le64 a;
|
|
+} le128;
|
|
+
|
|
+struct crypto_async_request;
|
|
+
|
|
+typedef void (*crypto_completion_t)(struct crypto_async_request *, int);
|
|
+
|
|
+struct crypto_tfm;
|
|
+
|
|
+struct crypto_async_request {
|
|
+ struct list_head list;
|
|
+ crypto_completion_t complete;
|
|
+ void *data;
|
|
+ struct crypto_tfm *tfm;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct crypto_ablkcipher;
|
|
+
|
|
+struct ablkcipher_request;
|
|
+
|
|
+struct ablkcipher_tfm {
|
|
+ int (*setkey)(struct crypto_ablkcipher *, const u8 *, unsigned int);
|
|
+ int (*encrypt)(struct ablkcipher_request *);
|
|
+ int (*decrypt)(struct ablkcipher_request *);
|
|
+ struct crypto_ablkcipher *base;
|
|
+ unsigned int ivsize;
|
|
+ unsigned int reqsize;
|
|
+};
|
|
+
|
|
+struct blkcipher_desc;
|
|
+
|
|
+struct blkcipher_tfm {
|
|
+ void *iv;
|
|
+ int (*setkey)(struct crypto_tfm *, const u8 *, unsigned int);
|
|
+ int (*encrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int);
|
|
+ int (*decrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int);
|
|
+};
|
|
+
|
|
+struct cipher_tfm {
|
|
+ int (*cit_setkey)(struct crypto_tfm *, const u8 *, unsigned int);
|
|
+ void (*cit_encrypt_one)(struct crypto_tfm *, u8 *, const u8 *);
|
|
+ void (*cit_decrypt_one)(struct crypto_tfm *, u8 *, const u8 *);
|
|
+};
|
|
+
|
|
+struct compress_tfm {
|
|
+ int (*cot_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *);
|
|
+ int (*cot_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *);
|
|
+};
|
|
+
|
|
+struct crypto_alg;
|
|
+
|
|
+struct crypto_tfm {
|
|
+ u32 crt_flags;
|
|
+ union {
|
|
+ struct ablkcipher_tfm ablkcipher;
|
|
+ struct blkcipher_tfm blkcipher;
|
|
+ struct cipher_tfm cipher;
|
|
+ struct compress_tfm compress;
|
|
+ } crt_u;
|
|
+ void (*exit)(struct crypto_tfm *);
|
|
+ struct crypto_alg *__crt_alg;
|
|
+ void *__crt_ctx[0];
|
|
+};
|
|
+
|
|
+struct ablkcipher_request {
|
|
+ struct crypto_async_request base;
|
|
+ unsigned int nbytes;
|
|
+ void *info;
|
|
+ struct scatterlist *src;
|
|
+ struct scatterlist *dst;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct crypto_blkcipher;
|
|
+
|
|
+struct blkcipher_desc {
|
|
+ struct crypto_blkcipher *tfm;
|
|
+ void *info;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct crypto_blkcipher {
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct skcipher_givcrypt_request;
|
|
+
|
|
+struct ablkcipher_alg {
|
|
+ int (*setkey)(struct crypto_ablkcipher *, const u8 *, unsigned int);
|
|
+ int (*encrypt)(struct ablkcipher_request *);
|
|
+ int (*decrypt)(struct ablkcipher_request *);
|
|
+ int (*givencrypt)(struct skcipher_givcrypt_request *);
|
|
+ int (*givdecrypt)(struct skcipher_givcrypt_request *);
|
|
+ const char *geniv;
|
|
+ unsigned int min_keysize;
|
|
+ unsigned int max_keysize;
|
|
+ unsigned int ivsize;
|
|
+};
|
|
+
|
|
+struct crypto_ablkcipher {
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct skcipher_givcrypt_request {
|
|
+ u64 seq;
|
|
+ u8 *giv;
|
|
+ struct ablkcipher_request creq;
|
|
+};
|
|
+
|
|
+struct blkcipher_alg {
|
|
+ int (*setkey)(struct crypto_tfm *, const u8 *, unsigned int);
|
|
+ int (*encrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int);
|
|
+ int (*decrypt)(struct blkcipher_desc *, struct scatterlist *, struct scatterlist *, unsigned int);
|
|
+ const char *geniv;
|
|
+ unsigned int min_keysize;
|
|
+ unsigned int max_keysize;
|
|
+ unsigned int ivsize;
|
|
+};
|
|
+
|
|
+struct cipher_alg {
|
|
+ unsigned int cia_min_keysize;
|
|
+ unsigned int cia_max_keysize;
|
|
+ int (*cia_setkey)(struct crypto_tfm *, const u8 *, unsigned int);
|
|
+ void (*cia_encrypt)(struct crypto_tfm *, u8 *, const u8 *);
|
|
+ void (*cia_decrypt)(struct crypto_tfm *, u8 *, const u8 *);
|
|
+};
|
|
+
|
|
+struct compress_alg {
|
|
+ int (*coa_compress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *);
|
|
+ int (*coa_decompress)(struct crypto_tfm *, const u8 *, unsigned int, u8 *, unsigned int *);
|
|
+};
|
|
+
|
|
+struct crypto_type;
|
|
+
|
|
+struct crypto_alg {
|
|
+ struct list_head cra_list;
|
|
+ struct list_head cra_users;
|
|
+ u32 cra_flags;
|
|
+ unsigned int cra_blocksize;
|
|
+ unsigned int cra_ctxsize;
|
|
+ unsigned int cra_alignmask;
|
|
+ int cra_priority;
|
|
+ refcount_t cra_refcnt;
|
|
+ char cra_name[128];
|
|
+ char cra_driver_name[128];
|
|
+ const struct crypto_type *cra_type;
|
|
+ union {
|
|
+ struct ablkcipher_alg ablkcipher;
|
|
+ struct blkcipher_alg blkcipher;
|
|
+ struct cipher_alg cipher;
|
|
+ struct compress_alg compress;
|
|
+ } cra_u;
|
|
+ int (*cra_init)(struct crypto_tfm *);
|
|
+ void (*cra_exit)(struct crypto_tfm *);
|
|
+ void (*cra_destroy)(struct crypto_alg *);
|
|
+ struct module *cra_module;
|
|
+};
|
|
+
|
|
+struct crypto_instance;
|
|
+
|
|
+struct crypto_type {
|
|
+ unsigned int (*ctxsize)(struct crypto_alg *, u32, u32);
|
|
+ unsigned int (*extsize)(struct crypto_alg *);
|
|
+ int (*init)(struct crypto_tfm *, u32, u32);
|
|
+ int (*init_tfm)(struct crypto_tfm *);
|
|
+ void (*show)(struct seq_file *, struct crypto_alg *);
|
|
+ int (*report)(struct sk_buff *, struct crypto_alg *);
|
|
+ void (*free)(struct crypto_instance *);
|
|
+ unsigned int type;
|
|
+ unsigned int maskclear;
|
|
+ unsigned int maskset;
|
|
+ unsigned int tfmsize;
|
|
+};
|
|
+
|
|
+struct crypto_template;
|
|
+
|
|
+struct crypto_instance {
|
|
+ struct crypto_alg alg;
|
|
+ struct crypto_template *tmpl;
|
|
+ struct hlist_node list;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct rtattr;
|
|
+
|
|
+struct crypto_template {
|
|
+ struct list_head list;
|
|
+ struct hlist_head instances;
|
|
+ struct module *module;
|
|
+ struct crypto_instance * (*alloc)(struct rtattr **);
|
|
+ void (*free)(struct crypto_instance *);
|
|
+ int (*create)(struct crypto_template *, struct rtattr **);
|
|
+ char name[128];
|
|
+};
|
|
+
|
|
+struct scatter_walk {
|
|
+ struct scatterlist *sg;
|
|
+ unsigned int offset;
|
|
+};
|
|
+
|
|
+struct skcipher_request {
|
|
+ unsigned int cryptlen;
|
|
+ u8 *iv;
|
|
+ struct scatterlist *src;
|
|
+ struct scatterlist *dst;
|
|
+ struct crypto_async_request base;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct crypto_skcipher {
|
|
+ int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int);
|
|
+ int (*encrypt)(struct skcipher_request *);
|
|
+ int (*decrypt)(struct skcipher_request *);
|
|
+ unsigned int ivsize;
|
|
+ unsigned int reqsize;
|
|
+ unsigned int keysize;
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct skcipher_walk {
|
|
+ union {
|
|
+ struct {
|
|
+ struct page *page;
|
|
+ long unsigned int offset;
|
|
+ } phys;
|
|
+ struct {
|
|
+ u8 *page;
|
|
+ void *addr;
|
|
+ } virt;
|
|
+ } src;
|
|
+ union {
|
|
+ struct {
|
|
+ struct page *page;
|
|
+ long unsigned int offset;
|
|
+ } phys;
|
|
+ struct {
|
|
+ u8 *page;
|
|
+ void *addr;
|
|
+ } virt;
|
|
+ } dst;
|
|
+ struct scatter_walk in;
|
|
+ unsigned int nbytes;
|
|
+ struct scatter_walk out;
|
|
+ unsigned int total;
|
|
+ struct list_head buffers;
|
|
+ u8 *page;
|
|
+ u8 *buffer;
|
|
+ u8 *oiv;
|
|
+ void *iv;
|
|
+ unsigned int ivsize;
|
|
+ int flags;
|
|
+ unsigned int blocksize;
|
|
+ unsigned int stride;
|
|
+ unsigned int alignmask;
|
|
+};
|
|
+
|
|
+typedef void (*common_glue_func_t)(void *, u8 *, const u8 *);
|
|
+
|
|
+typedef void (*common_glue_cbc_func_t)(void *, u128 *, const u128 *);
|
|
+
|
|
+typedef void (*common_glue_ctr_func_t)(void *, u128 *, const u128 *, le128 *);
|
|
+
|
|
+typedef void (*common_glue_xts_func_t)(void *, u128 *, const u128 *, le128 *);
|
|
+
|
|
+struct common_glue_func_entry {
|
|
+ unsigned int num_blocks;
|
|
+ union {
|
|
+ common_glue_func_t ecb;
|
|
+ common_glue_cbc_func_t cbc;
|
|
+ common_glue_ctr_func_t ctr;
|
|
+ common_glue_xts_func_t xts;
|
|
+ } fn_u;
|
|
+};
|
|
+
|
|
+struct common_glue_ctx {
|
|
+ unsigned int num_funcs;
|
|
+ int fpu_blocks_limit;
|
|
+ struct common_glue_func_entry funcs[0];
|
|
+};
|
|
+
|
|
+struct crypto_aes_ctx {
|
|
+ u32 key_enc[60];
|
|
+ u32 key_dec[60];
|
|
+ u32 key_length;
|
|
+};
|
|
+
|
|
+struct crypto_cipher {
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct aead_request {
|
|
+ struct crypto_async_request base;
|
|
+ unsigned int assoclen;
|
|
+ unsigned int cryptlen;
|
|
+ u8 *iv;
|
|
+ struct scatterlist *src;
|
|
+ struct scatterlist *dst;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct crypto_aead;
|
|
+
|
|
+struct aead_alg {
|
|
+ int (*setkey)(struct crypto_aead *, const u8 *, unsigned int);
|
|
+ int (*setauthsize)(struct crypto_aead *, unsigned int);
|
|
+ int (*encrypt)(struct aead_request *);
|
|
+ int (*decrypt)(struct aead_request *);
|
|
+ int (*init)(struct crypto_aead *);
|
|
+ void (*exit)(struct crypto_aead *);
|
|
+ const char *geniv;
|
|
+ unsigned int ivsize;
|
|
+ unsigned int maxauthsize;
|
|
+ unsigned int chunksize;
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+struct crypto_aead {
|
|
+ unsigned int authsize;
|
|
+ unsigned int reqsize;
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct skcipher_alg {
|
|
+ int (*setkey)(struct crypto_skcipher *, const u8 *, unsigned int);
|
|
+ int (*encrypt)(struct skcipher_request *);
|
|
+ int (*decrypt)(struct skcipher_request *);
|
|
+ int (*init)(struct crypto_skcipher *);
|
|
+ void (*exit)(struct crypto_skcipher *);
|
|
+ unsigned int min_keysize;
|
|
+ unsigned int max_keysize;
|
|
+ unsigned int ivsize;
|
|
+ unsigned int chunksize;
|
|
+ unsigned int walksize;
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+struct cryptd_aead {
|
|
+ struct crypto_aead base;
|
|
+};
|
|
+
|
|
+struct aesni_rfc4106_gcm_ctx {
|
|
+ u8 hash_subkey[16];
|
|
+ struct crypto_aes_ctx aes_key_expanded;
|
|
+ u8 nonce[4];
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct generic_gcmaes_ctx {
|
|
+ u8 hash_subkey[16];
|
|
+ struct crypto_aes_ctx aes_key_expanded;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct aesni_xts_ctx {
|
|
+ u8 raw_tweak_ctx[484];
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ u8 raw_crypt_ctx[484];
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct gcm_context_data {
|
|
+ u8 aad_hash[16];
|
|
+ u64 aad_length;
|
|
+ u64 in_length;
|
|
+ u8 partial_block_enc_key[16];
|
|
+ u8 orig_IV[16];
|
|
+ u8 current_counter[16];
|
|
+ u64 partial_block_len;
|
|
+ u64 unused;
|
|
+ u8 hash_keys[128];
|
|
+};
|
|
+
|
|
+struct simd_skcipher_alg;
|
|
+
|
|
+struct crypto_attr_type {
|
|
+ u32 type;
|
|
+ u32 mask;
|
|
+};
|
|
+
|
|
+struct crypto_spawn {
|
|
+ struct list_head list;
|
|
+ struct crypto_alg *alg;
|
|
+ struct crypto_instance *inst;
|
|
+ const struct crypto_type *frontend;
|
|
+ u32 mask;
|
|
+};
|
|
+
|
|
+struct skcipher_instance {
|
|
+ void (*free)(struct skcipher_instance *);
|
|
+ union {
|
|
+ struct {
|
|
+ char head[64];
|
|
+ struct crypto_instance base;
|
|
+ } s;
|
|
+ struct skcipher_alg alg;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct crypto_skcipher_spawn {
|
|
+ struct crypto_spawn base;
|
|
+};
|
|
+
|
|
+struct crypto_fpu_ctx {
|
|
+ struct crypto_skcipher *child;
|
|
+};
|
|
+
|
|
+struct crypto_shash;
|
|
+
|
|
+struct shash_desc {
|
|
+ struct crypto_shash *tfm;
|
|
+ u32 flags;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct crypto_shash {
|
|
+ unsigned int descsize;
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct shash_alg {
|
|
+ int (*init)(struct shash_desc *);
|
|
+ int (*update)(struct shash_desc *, const u8 *, unsigned int);
|
|
+ int (*final)(struct shash_desc *, u8 *);
|
|
+ int (*finup)(struct shash_desc *, const u8 *, unsigned int, u8 *);
|
|
+ int (*digest)(struct shash_desc *, const u8 *, unsigned int, u8 *);
|
|
+ int (*export)(struct shash_desc *, void *);
|
|
+ int (*import)(struct shash_desc *, const void *);
|
|
+ int (*setkey)(struct crypto_shash *, const u8 *, unsigned int);
|
|
+ unsigned int descsize;
|
|
+ int: 32;
|
|
+ unsigned int digestsize;
|
|
+ unsigned int statesize;
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+struct sha1_state {
|
|
+ u32 state[5];
|
|
+ u64 count;
|
|
+ u8 buffer[64];
|
|
+};
|
|
+
|
|
+typedef void sha1_block_fn(struct sha1_state *, const u8 *, int);
|
|
+
|
|
+typedef void sha1_transform_fn(u32 *, const char *, unsigned int);
|
|
+
|
|
+struct sha256_state {
|
|
+ u32 state[8];
|
|
+ u64 count;
|
|
+ u8 buf[64];
|
|
+};
|
|
+
|
|
+typedef void sha256_block_fn(struct sha256_state *, const u8 *, int);
|
|
+
|
|
+typedef void sha256_transform_fn(u32 *, const char *, u64);
|
|
+
|
|
+typedef short unsigned int __kernel_old_uid_t;
|
|
+
|
|
+typedef short unsigned int __kernel_old_gid_t;
|
|
+
|
|
+typedef struct {
|
|
+ int val[2];
|
|
+} __kernel_fsid_t;
|
|
+
|
|
+typedef __kernel_old_uid_t old_uid_t;
|
|
+
|
|
+typedef __kernel_old_gid_t old_gid_t;
|
|
+
|
|
+struct kstatfs {
|
|
+ long int f_type;
|
|
+ long int f_bsize;
|
|
+ u64 f_blocks;
|
|
+ u64 f_bfree;
|
|
+ u64 f_bavail;
|
|
+ u64 f_files;
|
|
+ u64 f_ffree;
|
|
+ __kernel_fsid_t f_fsid;
|
|
+ long int f_namelen;
|
|
+ long int f_frsize;
|
|
+ long int f_flags;
|
|
+ long int f_spare[4];
|
|
+};
|
|
+
|
|
+struct stat64 {
|
|
+ long long unsigned int st_dev;
|
|
+ unsigned char __pad0[4];
|
|
+ unsigned int __st_ino;
|
|
+ unsigned int st_mode;
|
|
+ unsigned int st_nlink;
|
|
+ unsigned int st_uid;
|
|
+ unsigned int st_gid;
|
|
+ long long unsigned int st_rdev;
|
|
+ unsigned char __pad3[4];
|
|
+ long long int st_size;
|
|
+ unsigned int st_blksize;
|
|
+ long long int st_blocks;
|
|
+ unsigned int st_atime;
|
|
+ unsigned int st_atime_nsec;
|
|
+ unsigned int st_mtime;
|
|
+ unsigned int st_mtime_nsec;
|
|
+ unsigned int st_ctime;
|
|
+ unsigned int st_ctime_nsec;
|
|
+ long long unsigned int st_ino;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct mmap_arg_struct32 {
|
|
+ unsigned int addr;
|
|
+ unsigned int len;
|
|
+ unsigned int prot;
|
|
+ unsigned int flags;
|
|
+ unsigned int fd;
|
|
+ unsigned int offset;
|
|
+};
|
|
+
|
|
+struct sigcontext_32 {
|
|
+ __u16 gs;
|
|
+ __u16 __gsh;
|
|
+ __u16 fs;
|
|
+ __u16 __fsh;
|
|
+ __u16 es;
|
|
+ __u16 __esh;
|
|
+ __u16 ds;
|
|
+ __u16 __dsh;
|
|
+ __u32 di;
|
|
+ __u32 si;
|
|
+ __u32 bp;
|
|
+ __u32 sp;
|
|
+ __u32 bx;
|
|
+ __u32 dx;
|
|
+ __u32 cx;
|
|
+ __u32 ax;
|
|
+ __u32 trapno;
|
|
+ __u32 err;
|
|
+ __u32 ip;
|
|
+ __u16 cs;
|
|
+ __u16 __csh;
|
|
+ __u32 flags;
|
|
+ __u32 sp_at_signal;
|
|
+ __u16 ss;
|
|
+ __u16 __ssh;
|
|
+ __u32 fpstate;
|
|
+ __u32 oldmask;
|
|
+ __u32 cr2;
|
|
+};
|
|
+
|
|
+typedef u32 compat_size_t;
|
|
+
|
|
+struct compat_sigaltstack {
|
|
+ compat_uptr_t ss_sp;
|
|
+ int ss_flags;
|
|
+ compat_size_t ss_size;
|
|
+};
|
|
+
|
|
+typedef struct compat_sigaltstack compat_stack_t;
|
|
+
|
|
+struct ucontext_ia32 {
|
|
+ unsigned int uc_flags;
|
|
+ unsigned int uc_link;
|
|
+ compat_stack_t uc_stack;
|
|
+ struct sigcontext_32 uc_mcontext;
|
|
+ compat_sigset_t uc_sigmask;
|
|
+};
|
|
+
|
|
+struct sigframe_ia32 {
|
|
+ u32 pretcode;
|
|
+ int sig;
|
|
+ struct sigcontext_32 sc;
|
|
+ struct _fpstate_32 fpstate_unused;
|
|
+ unsigned int extramask[1];
|
|
+ char retcode[8];
|
|
+};
|
|
+
|
|
+struct rt_sigframe_ia32 {
|
|
+ u32 pretcode;
|
|
+ int sig;
|
|
+ u32 pinfo;
|
|
+ u32 puc;
|
|
+ compat_siginfo_t info;
|
|
+ struct ucontext_ia32 uc;
|
|
+ char retcode[8];
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ efi_guid_t guid;
|
|
+ u64 table;
|
|
+} efi_config_table_64_t;
|
|
+
|
|
+struct efi_mem_range {
|
|
+ struct range range;
|
|
+ u64 attribute;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ efi_table_hdr_t hdr;
|
|
+ u32 get_time;
|
|
+ u32 set_time;
|
|
+ u32 get_wakeup_time;
|
|
+ u32 set_wakeup_time;
|
|
+ u32 set_virtual_address_map;
|
|
+ u32 convert_pointer;
|
|
+ u32 get_variable;
|
|
+ u32 get_next_variable;
|
|
+ u32 set_variable;
|
|
+ u32 get_next_high_mono_count;
|
|
+ u32 reset_system;
|
|
+ u32 update_capsule;
|
|
+ u32 query_capsule_caps;
|
|
+ u32 query_variable_info;
|
|
+} efi_runtime_services_32_t;
|
|
+
|
|
+typedef struct {
|
|
+ efi_table_hdr_t hdr;
|
|
+ u64 get_time;
|
|
+ u64 set_time;
|
|
+ u64 get_wakeup_time;
|
|
+ u64 set_wakeup_time;
|
|
+ u64 set_virtual_address_map;
|
|
+ u64 convert_pointer;
|
|
+ u64 get_variable;
|
|
+ u64 get_next_variable;
|
|
+ u64 set_variable;
|
|
+ u64 get_next_high_mono_count;
|
|
+ u64 reset_system;
|
|
+ u64 update_capsule;
|
|
+ u64 query_capsule_caps;
|
|
+ u64 query_variable_info;
|
|
+} efi_runtime_services_64_t;
|
|
+
|
|
+typedef struct {
|
|
+ efi_guid_t guid;
|
|
+ const char *name;
|
|
+ long unsigned int *ptr;
|
|
+} efi_config_table_type_t;
|
|
+
|
|
+typedef struct {
|
|
+ efi_table_hdr_t hdr;
|
|
+ u64 fw_vendor;
|
|
+ u32 fw_revision;
|
|
+ u32 __pad1;
|
|
+ u64 con_in_handle;
|
|
+ u64 con_in;
|
|
+ u64 con_out_handle;
|
|
+ u64 con_out;
|
|
+ u64 stderr_handle;
|
|
+ u64 stderr;
|
|
+ u64 runtime;
|
|
+ u64 boottime;
|
|
+ u32 nr_tables;
|
|
+ u32 __pad2;
|
|
+ u64 tables;
|
|
+} efi_system_table_64_t;
|
|
+
|
|
+typedef struct {
|
|
+ efi_table_hdr_t hdr;
|
|
+ u32 fw_vendor;
|
|
+ u32 fw_revision;
|
|
+ u32 con_in_handle;
|
|
+ u32 con_in;
|
|
+ u32 con_out_handle;
|
|
+ u32 con_out;
|
|
+ u32 stderr_handle;
|
|
+ u32 stderr;
|
|
+ u32 runtime;
|
|
+ u32 boottime;
|
|
+ u32 nr_tables;
|
|
+ u32 tables;
|
|
+} efi_system_table_32_t;
|
|
+
|
|
+struct efi_memory_map_data {
|
|
+ phys_addr_t phys_map;
|
|
+ long unsigned int size;
|
|
+ long unsigned int desc_version;
|
|
+ long unsigned int desc_size;
|
|
+};
|
|
+
|
|
+struct font_desc {
|
|
+ int idx;
|
|
+ const char *name;
|
|
+ int width;
|
|
+ int height;
|
|
+ const void *data;
|
|
+ int pref;
|
|
+};
|
|
+
|
|
+struct sfi_table_header {
|
|
+ char sig[4];
|
|
+ u32 len;
|
|
+ u8 rev;
|
|
+ u8 csum;
|
|
+ char oem_id[6];
|
|
+ char oem_table_id[8];
|
|
+};
|
|
+
|
|
+struct sfi_table_simple {
|
|
+ struct sfi_table_header header;
|
|
+ u64 pentry[1];
|
|
+};
|
|
+
|
|
+struct sfi_cpu_table_entry {
|
|
+ u32 apic_id;
|
|
+};
|
|
+
|
|
+struct sfi_apic_table_entry {
|
|
+ u64 phys_addr;
|
|
+};
|
|
+
|
|
+enum uv_bau_version {
|
|
+ UV_BAU_V1 = 1,
|
|
+ UV_BAU_V2 = 2,
|
|
+ UV_BAU_V3 = 3,
|
|
+ UV_BAU_V4 = 4,
|
|
+};
|
|
+
|
|
+struct pnmask {
|
|
+ long unsigned int bits[4];
|
|
+};
|
|
+
|
|
+struct uv1_2_3_bau_msg_payload {
|
|
+ u64 address;
|
|
+ u16 sending_cpu;
|
|
+ u16 acknowledge_count;
|
|
+};
|
|
+
|
|
+struct uv4_bau_msg_payload {
|
|
+ u64 address;
|
|
+ u16 sending_cpu;
|
|
+ u16 acknowledge_count;
|
|
+ u32 reserved: 8;
|
|
+ u32 qualifier: 24;
|
|
+};
|
|
+
|
|
+struct uv1_bau_msg_header {
|
|
+ unsigned int dest_subnodeid: 6;
|
|
+ unsigned int base_dest_nasid: 15;
|
|
+ unsigned int command: 8;
|
|
+ unsigned int rsvd_1: 3;
|
|
+ unsigned int rsvd_2: 9;
|
|
+ unsigned int sequence: 16;
|
|
+ unsigned int rsvd_3: 1;
|
|
+ unsigned int replied_to: 1;
|
|
+ unsigned int msg_type: 3;
|
|
+ unsigned int canceled: 1;
|
|
+ unsigned int payload_1a: 1;
|
|
+ unsigned int payload_1b: 2;
|
|
+ unsigned int payload_1ca: 6;
|
|
+ unsigned int payload_1c: 2;
|
|
+ unsigned int payload_1d: 6;
|
|
+ unsigned int payload_1e: 2;
|
|
+ unsigned int rsvd_4: 7;
|
|
+ unsigned int swack_flag: 1;
|
|
+ unsigned int rsvd_5: 6;
|
|
+ unsigned int rsvd_6: 5;
|
|
+ unsigned int int_both: 1;
|
|
+ unsigned int fairness: 3;
|
|
+ unsigned int multilevel: 1;
|
|
+ unsigned int chaining: 1;
|
|
+ unsigned int rsvd_7: 21;
|
|
+};
|
|
+
|
|
+struct uv2_3_bau_msg_header {
|
|
+ unsigned int base_dest_nasid: 15;
|
|
+ unsigned int dest_subnodeid: 5;
|
|
+ unsigned int rsvd_1: 1;
|
|
+ unsigned int replied_to: 1;
|
|
+ unsigned int msg_type: 3;
|
|
+ unsigned int canceled: 1;
|
|
+ unsigned int payload_1: 3;
|
|
+ unsigned int payload_2a: 3;
|
|
+ unsigned int payload_2b: 5;
|
|
+ unsigned int payload_3: 8;
|
|
+ unsigned int rsvd_2: 7;
|
|
+ unsigned int swack_flag: 1;
|
|
+ unsigned int rsvd_3a: 3;
|
|
+ unsigned int rsvd_3b: 8;
|
|
+ unsigned int rsvd_3c: 8;
|
|
+ unsigned int rsvd_3d: 3;
|
|
+ unsigned int fairness: 3;
|
|
+ unsigned int sequence: 16;
|
|
+ unsigned int chaining: 1;
|
|
+ unsigned int multilevel: 1;
|
|
+ unsigned int rsvd_4: 24;
|
|
+ unsigned int command: 8;
|
|
+};
|
|
+
|
|
+union bau_msg_header {
|
|
+ struct uv1_bau_msg_header uv1_hdr;
|
|
+ struct uv2_3_bau_msg_header uv2_3_hdr;
|
|
+};
|
|
+
|
|
+union bau_payload_header {
|
|
+ struct uv1_2_3_bau_msg_payload uv1_2_3;
|
|
+ struct uv4_bau_msg_payload uv4;
|
|
+};
|
|
+
|
|
+struct bau_desc {
|
|
+ struct pnmask distribution;
|
|
+ union bau_msg_header header;
|
|
+ union bau_payload_header payload;
|
|
+};
|
|
+
|
|
+struct bau_pq_entry {
|
|
+ long unsigned int address;
|
|
+ short unsigned int sending_cpu;
|
|
+ short unsigned int acknowledge_count;
|
|
+ short unsigned int replied_to: 1;
|
|
+ short unsigned int msg_type: 3;
|
|
+ short unsigned int canceled: 1;
|
|
+ short unsigned int unused1: 3;
|
|
+ unsigned char unused2a;
|
|
+ unsigned char unused2;
|
|
+ unsigned char swack_vec;
|
|
+ short unsigned int sequence;
|
|
+ unsigned char unused4[2];
|
|
+ int number_of_cpus;
|
|
+ unsigned char unused5[8];
|
|
+};
|
|
+
|
|
+struct msg_desc {
|
|
+ struct bau_pq_entry *msg;
|
|
+ int msg_slot;
|
|
+ struct bau_pq_entry *queue_first;
|
|
+ struct bau_pq_entry *queue_last;
|
|
+};
|
|
+
|
|
+struct reset_args {
|
|
+ int sender;
|
|
+};
|
|
+
|
|
+struct ptc_stats {
|
|
+ long unsigned int s_giveup;
|
|
+ long unsigned int s_requestor;
|
|
+ long unsigned int s_stimeout;
|
|
+ long unsigned int s_dtimeout;
|
|
+ long unsigned int s_strongnacks;
|
|
+ long unsigned int s_time;
|
|
+ long unsigned int s_retriesok;
|
|
+ long unsigned int s_ntargcpu;
|
|
+ long unsigned int s_ntargself;
|
|
+ long unsigned int s_ntarglocals;
|
|
+ long unsigned int s_ntargremotes;
|
|
+ long unsigned int s_ntarglocaluvhub;
|
|
+ long unsigned int s_ntargremoteuvhub;
|
|
+ long unsigned int s_ntarguvhub;
|
|
+ long unsigned int s_ntarguvhub16;
|
|
+ long unsigned int s_ntarguvhub8;
|
|
+ long unsigned int s_ntarguvhub4;
|
|
+ long unsigned int s_ntarguvhub2;
|
|
+ long unsigned int s_ntarguvhub1;
|
|
+ long unsigned int s_resets_plug;
|
|
+ long unsigned int s_resets_timeout;
|
|
+ long unsigned int s_busy;
|
|
+ long unsigned int s_throttles;
|
|
+ long unsigned int s_retry_messages;
|
|
+ long unsigned int s_bau_reenabled;
|
|
+ long unsigned int s_bau_disabled;
|
|
+ long unsigned int s_uv2_wars;
|
|
+ long unsigned int s_uv2_wars_hw;
|
|
+ long unsigned int s_uv2_war_waits;
|
|
+ long unsigned int s_overipilimit;
|
|
+ long unsigned int s_giveuplimit;
|
|
+ long unsigned int s_enters;
|
|
+ long unsigned int s_ipifordisabled;
|
|
+ long unsigned int s_plugged;
|
|
+ long unsigned int s_congested;
|
|
+ long unsigned int d_alltlb;
|
|
+ long unsigned int d_onetlb;
|
|
+ long unsigned int d_multmsg;
|
|
+ long unsigned int d_nomsg;
|
|
+ long unsigned int d_time;
|
|
+ long unsigned int d_requestee;
|
|
+ long unsigned int d_retries;
|
|
+ long unsigned int d_canceled;
|
|
+ long unsigned int d_nocanceled;
|
|
+ long unsigned int d_resets;
|
|
+ long unsigned int d_rcanceled;
|
|
+};
|
|
+
|
|
+struct tunables {
|
|
+ int *tunp;
|
|
+ int deflt;
|
|
+};
|
|
+
|
|
+struct hub_and_pnode {
|
|
+ short int uvhub;
|
|
+ short int pnode;
|
|
+};
|
|
+
|
|
+struct socket_desc {
|
|
+ short int num_cpus;
|
|
+ short int cpu_number[64];
|
|
+};
|
|
+
|
|
+struct uvhub_desc {
|
|
+ short unsigned int socket_mask;
|
|
+ short int num_cpus;
|
|
+ short int uvhub;
|
|
+ short int pnode;
|
|
+ struct socket_desc socket[2];
|
|
+};
|
|
+
|
|
+struct bau_control {
|
|
+ struct bau_desc *descriptor_base;
|
|
+ struct bau_pq_entry *queue_first;
|
|
+ struct bau_pq_entry *queue_last;
|
|
+ struct bau_pq_entry *bau_msg_head;
|
|
+ struct bau_control *uvhub_master;
|
|
+ struct bau_control *socket_master;
|
|
+ struct ptc_stats *statp;
|
|
+ cpumask_t *cpumask;
|
|
+ long unsigned int timeout_interval;
|
|
+ long unsigned int set_bau_on_time;
|
|
+ atomic_t active_descriptor_count;
|
|
+ int plugged_tries;
|
|
+ int timeout_tries;
|
|
+ int ipi_attempts;
|
|
+ int conseccompletes;
|
|
+ u64 status_mmr;
|
|
+ int status_index;
|
|
+ bool nobau;
|
|
+ short int baudisabled;
|
|
+ short int cpu;
|
|
+ short int osnode;
|
|
+ short int uvhub_cpu;
|
|
+ short int uvhub;
|
|
+ short int uvhub_version;
|
|
+ short int cpus_in_socket;
|
|
+ short int cpus_in_uvhub;
|
|
+ short int partition_base_pnode;
|
|
+ short int busy;
|
|
+ short unsigned int message_number;
|
|
+ short unsigned int uvhub_quiesce;
|
|
+ short int socket_acknowledge_count[20];
|
|
+ cycles_t send_message;
|
|
+ cycles_t period_end;
|
|
+ cycles_t period_time;
|
|
+ spinlock_t uvhub_lock;
|
|
+ spinlock_t queue_lock;
|
|
+ spinlock_t disable_lock;
|
|
+ int max_concurr;
|
|
+ int max_concurr_const;
|
|
+ int plugged_delay;
|
|
+ int plugsb4reset;
|
|
+ int timeoutsb4reset;
|
|
+ int ipi_reset_limit;
|
|
+ int complete_threshold;
|
|
+ int cong_response_us;
|
|
+ int cong_reps;
|
|
+ cycles_t disabled_period;
|
|
+ int period_giveups;
|
|
+ int giveup_limit;
|
|
+ long int period_requests;
|
|
+ struct hub_and_pnode *thp;
|
|
+};
|
|
+
|
|
+struct bau_operations {
|
|
+ long unsigned int (*read_l_sw_ack)();
|
|
+ long unsigned int (*read_g_sw_ack)(int);
|
|
+ long unsigned int (*bau_gpa_to_offset)(long unsigned int);
|
|
+ void (*write_l_sw_ack)(long unsigned int);
|
|
+ void (*write_g_sw_ack)(int, long unsigned int);
|
|
+ void (*write_payload_first)(int, long unsigned int);
|
|
+ void (*write_payload_last)(int, long unsigned int);
|
|
+ int (*wait_completion)(struct bau_desc *, struct bau_control *, long int);
|
|
+};
|
|
+
|
|
+struct atomic_short {
|
|
+ short int counter;
|
|
+};
|
|
+
|
|
+enum uv_bios_cmd {
|
|
+ UV_BIOS_COMMON = 0,
|
|
+ UV_BIOS_GET_SN_INFO = 1,
|
|
+ UV_BIOS_FREQ_BASE = 2,
|
|
+ UV_BIOS_WATCHLIST_ALLOC = 3,
|
|
+ UV_BIOS_WATCHLIST_FREE = 4,
|
|
+ UV_BIOS_MEMPROTECT = 5,
|
|
+ UV_BIOS_GET_PARTITION_ADDR = 6,
|
|
+ UV_BIOS_SET_LEGACY_VGA_TARGET = 7,
|
|
+};
|
|
+
|
|
+union partition_info_u {
|
|
+ u64 val;
|
|
+ struct {
|
|
+ u64 hub_version: 8;
|
|
+ u64 partition_id: 16;
|
|
+ u64 coherence_id: 16;
|
|
+ u64 region_size: 24;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum uv_memprotect {
|
|
+ UV_MEMPROT_RESTRICT_ACCESS = 0,
|
|
+ UV_MEMPROT_ALLOW_AMO = 1,
|
|
+ UV_MEMPROT_ALLOW_RW = 2,
|
|
+};
|
|
+
|
|
+struct uv_IO_APIC_route_entry {
|
|
+ __u64 vector: 8;
|
|
+ __u64 delivery_mode: 3;
|
|
+ __u64 dest_mode: 1;
|
|
+ __u64 delivery_status: 1;
|
|
+ __u64 polarity: 1;
|
|
+ __u64 __reserved_1: 1;
|
|
+ __u64 trigger: 1;
|
|
+ __u64 mask: 1;
|
|
+ __u64 __reserved_2: 15;
|
|
+ __u64 dest: 32;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ UV_AFFINITY_ALL = 0,
|
|
+ UV_AFFINITY_NODE = 1,
|
|
+ UV_AFFINITY_CPU = 2,
|
|
+};
|
|
+
|
|
+struct uv_irq_2_mmr_pnode {
|
|
+ long unsigned int offset;
|
|
+ int pnode;
|
|
+};
|
|
+
|
|
+struct uv_rtc_timer_head {
|
|
+ spinlock_t lock;
|
|
+ int next_cpu;
|
|
+ int ncpus;
|
|
+ struct {
|
|
+ int lcpu;
|
|
+ u64 expires;
|
|
+ } cpu[1];
|
|
+};
|
|
+
|
|
+typedef int (*get_char_func)();
|
|
+
|
|
+struct nmi_action {
|
|
+ char *action;
|
|
+ char *desc;
|
|
+};
|
|
+
|
|
+typedef char action_t[16];
|
|
+
|
|
+struct init_nmi {
|
|
+ unsigned int offset;
|
|
+ unsigned int mask;
|
|
+ unsigned int data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BPF_REG_0 = 0,
|
|
+ BPF_REG_1 = 1,
|
|
+ BPF_REG_2 = 2,
|
|
+ BPF_REG_3 = 3,
|
|
+ BPF_REG_4 = 4,
|
|
+ BPF_REG_5 = 5,
|
|
+ BPF_REG_6 = 6,
|
|
+ BPF_REG_7 = 7,
|
|
+ BPF_REG_8 = 8,
|
|
+ BPF_REG_9 = 9,
|
|
+ BPF_REG_10 = 10,
|
|
+ __MAX_BPF_REG = 11,
|
|
+};
|
|
+
|
|
+struct bpf_cgroup_storage_key {
|
|
+ __u64 cgroup_inode_id;
|
|
+ __u32 attach_type;
|
|
+};
|
|
+
|
|
+enum bpf_map_type {
|
|
+ BPF_MAP_TYPE_UNSPEC = 0,
|
|
+ BPF_MAP_TYPE_HASH = 1,
|
|
+ BPF_MAP_TYPE_ARRAY = 2,
|
|
+ BPF_MAP_TYPE_PROG_ARRAY = 3,
|
|
+ BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4,
|
|
+ BPF_MAP_TYPE_PERCPU_HASH = 5,
|
|
+ BPF_MAP_TYPE_PERCPU_ARRAY = 6,
|
|
+ BPF_MAP_TYPE_STACK_TRACE = 7,
|
|
+ BPF_MAP_TYPE_CGROUP_ARRAY = 8,
|
|
+ BPF_MAP_TYPE_LRU_HASH = 9,
|
|
+ BPF_MAP_TYPE_LRU_PERCPU_HASH = 10,
|
|
+ BPF_MAP_TYPE_LPM_TRIE = 11,
|
|
+ BPF_MAP_TYPE_ARRAY_OF_MAPS = 12,
|
|
+ BPF_MAP_TYPE_HASH_OF_MAPS = 13,
|
|
+ BPF_MAP_TYPE_DEVMAP = 14,
|
|
+ BPF_MAP_TYPE_SOCKMAP = 15,
|
|
+ BPF_MAP_TYPE_CPUMAP = 16,
|
|
+ BPF_MAP_TYPE_XSKMAP = 17,
|
|
+ BPF_MAP_TYPE_SOCKHASH = 18,
|
|
+ BPF_MAP_TYPE_CGROUP_STORAGE = 19,
|
|
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20,
|
|
+};
|
|
+
|
|
+union bpf_attr {
|
|
+ struct {
|
|
+ __u32 map_type;
|
|
+ __u32 key_size;
|
|
+ __u32 value_size;
|
|
+ __u32 max_entries;
|
|
+ __u32 map_flags;
|
|
+ __u32 inner_map_fd;
|
|
+ __u32 numa_node;
|
|
+ char map_name[16];
|
|
+ __u32 map_ifindex;
|
|
+ __u32 btf_fd;
|
|
+ __u32 btf_key_type_id;
|
|
+ __u32 btf_value_type_id;
|
|
+ };
|
|
+ struct {
|
|
+ __u32 map_fd;
|
|
+ __u64 key;
|
|
+ union {
|
|
+ __u64 value;
|
|
+ __u64 next_key;
|
|
+ };
|
|
+ __u64 flags;
|
|
+ };
|
|
+ struct {
|
|
+ __u32 prog_type;
|
|
+ __u32 insn_cnt;
|
|
+ __u64 insns;
|
|
+ __u64 license;
|
|
+ __u32 log_level;
|
|
+ __u32 log_size;
|
|
+ __u64 log_buf;
|
|
+ __u32 kern_version;
|
|
+ __u32 prog_flags;
|
|
+ char prog_name[16];
|
|
+ __u32 prog_ifindex;
|
|
+ __u32 expected_attach_type;
|
|
+ };
|
|
+ struct {
|
|
+ __u64 pathname;
|
|
+ __u32 bpf_fd;
|
|
+ __u32 file_flags;
|
|
+ };
|
|
+ struct {
|
|
+ __u32 target_fd;
|
|
+ __u32 attach_bpf_fd;
|
|
+ __u32 attach_type;
|
|
+ __u32 attach_flags;
|
|
+ };
|
|
+ struct {
|
|
+ __u32 prog_fd;
|
|
+ __u32 retval;
|
|
+ __u32 data_size_in;
|
|
+ __u32 data_size_out;
|
|
+ __u64 data_in;
|
|
+ __u64 data_out;
|
|
+ __u32 repeat;
|
|
+ __u32 duration;
|
|
+ } test;
|
|
+ struct {
|
|
+ union {
|
|
+ __u32 start_id;
|
|
+ __u32 prog_id;
|
|
+ __u32 map_id;
|
|
+ __u32 btf_id;
|
|
+ };
|
|
+ __u32 next_id;
|
|
+ __u32 open_flags;
|
|
+ };
|
|
+ struct {
|
|
+ __u32 bpf_fd;
|
|
+ __u32 info_len;
|
|
+ __u64 info;
|
|
+ } info;
|
|
+ struct {
|
|
+ __u32 target_fd;
|
|
+ __u32 attach_type;
|
|
+ __u32 query_flags;
|
|
+ __u32 attach_flags;
|
|
+ __u64 prog_ids;
|
|
+ __u32 prog_cnt;
|
|
+ } query;
|
|
+ struct {
|
|
+ __u64 name;
|
|
+ __u32 prog_fd;
|
|
+ } raw_tracepoint;
|
|
+ struct {
|
|
+ __u64 btf;
|
|
+ __u64 btf_log_buf;
|
|
+ __u32 btf_size;
|
|
+ __u32 btf_log_size;
|
|
+ __u32 btf_log_level;
|
|
+ };
|
|
+ struct {
|
|
+ __u32 pid;
|
|
+ __u32 fd;
|
|
+ __u32 flags;
|
|
+ __u32 buf_len;
|
|
+ __u64 buf;
|
|
+ __u32 prog_id;
|
|
+ __u32 fd_type;
|
|
+ __u64 probe_offset;
|
|
+ __u64 probe_addr;
|
|
+ } task_fd_query;
|
|
+};
|
|
+
|
|
+enum bpf_func_id {
|
|
+ BPF_FUNC_unspec = 0,
|
|
+ BPF_FUNC_map_lookup_elem = 1,
|
|
+ BPF_FUNC_map_update_elem = 2,
|
|
+ BPF_FUNC_map_delete_elem = 3,
|
|
+ BPF_FUNC_probe_read = 4,
|
|
+ BPF_FUNC_ktime_get_ns = 5,
|
|
+ BPF_FUNC_trace_printk = 6,
|
|
+ BPF_FUNC_get_prandom_u32 = 7,
|
|
+ BPF_FUNC_get_smp_processor_id = 8,
|
|
+ BPF_FUNC_skb_store_bytes = 9,
|
|
+ BPF_FUNC_l3_csum_replace = 10,
|
|
+ BPF_FUNC_l4_csum_replace = 11,
|
|
+ BPF_FUNC_tail_call = 12,
|
|
+ BPF_FUNC_clone_redirect = 13,
|
|
+ BPF_FUNC_get_current_pid_tgid = 14,
|
|
+ BPF_FUNC_get_current_uid_gid = 15,
|
|
+ BPF_FUNC_get_current_comm = 16,
|
|
+ BPF_FUNC_get_cgroup_classid = 17,
|
|
+ BPF_FUNC_skb_vlan_push = 18,
|
|
+ BPF_FUNC_skb_vlan_pop = 19,
|
|
+ BPF_FUNC_skb_get_tunnel_key = 20,
|
|
+ BPF_FUNC_skb_set_tunnel_key = 21,
|
|
+ BPF_FUNC_perf_event_read = 22,
|
|
+ BPF_FUNC_redirect = 23,
|
|
+ BPF_FUNC_get_route_realm = 24,
|
|
+ BPF_FUNC_perf_event_output = 25,
|
|
+ BPF_FUNC_skb_load_bytes = 26,
|
|
+ BPF_FUNC_get_stackid = 27,
|
|
+ BPF_FUNC_csum_diff = 28,
|
|
+ BPF_FUNC_skb_get_tunnel_opt = 29,
|
|
+ BPF_FUNC_skb_set_tunnel_opt = 30,
|
|
+ BPF_FUNC_skb_change_proto = 31,
|
|
+ BPF_FUNC_skb_change_type = 32,
|
|
+ BPF_FUNC_skb_under_cgroup = 33,
|
|
+ BPF_FUNC_get_hash_recalc = 34,
|
|
+ BPF_FUNC_get_current_task = 35,
|
|
+ BPF_FUNC_probe_write_user = 36,
|
|
+ BPF_FUNC_current_task_under_cgroup = 37,
|
|
+ BPF_FUNC_skb_change_tail = 38,
|
|
+ BPF_FUNC_skb_pull_data = 39,
|
|
+ BPF_FUNC_csum_update = 40,
|
|
+ BPF_FUNC_set_hash_invalid = 41,
|
|
+ BPF_FUNC_get_numa_node_id = 42,
|
|
+ BPF_FUNC_skb_change_head = 43,
|
|
+ BPF_FUNC_xdp_adjust_head = 44,
|
|
+ BPF_FUNC_probe_read_str = 45,
|
|
+ BPF_FUNC_get_socket_cookie = 46,
|
|
+ BPF_FUNC_get_socket_uid = 47,
|
|
+ BPF_FUNC_set_hash = 48,
|
|
+ BPF_FUNC_setsockopt = 49,
|
|
+ BPF_FUNC_skb_adjust_room = 50,
|
|
+ BPF_FUNC_redirect_map = 51,
|
|
+ BPF_FUNC_sk_redirect_map = 52,
|
|
+ BPF_FUNC_sock_map_update = 53,
|
|
+ BPF_FUNC_xdp_adjust_meta = 54,
|
|
+ BPF_FUNC_perf_event_read_value = 55,
|
|
+ BPF_FUNC_perf_prog_read_value = 56,
|
|
+ BPF_FUNC_getsockopt = 57,
|
|
+ BPF_FUNC_override_return = 58,
|
|
+ BPF_FUNC_sock_ops_cb_flags_set = 59,
|
|
+ BPF_FUNC_msg_redirect_map = 60,
|
|
+ BPF_FUNC_msg_apply_bytes = 61,
|
|
+ BPF_FUNC_msg_cork_bytes = 62,
|
|
+ BPF_FUNC_msg_pull_data = 63,
|
|
+ BPF_FUNC_bind = 64,
|
|
+ BPF_FUNC_xdp_adjust_tail = 65,
|
|
+ BPF_FUNC_skb_get_xfrm_state = 66,
|
|
+ BPF_FUNC_get_stack = 67,
|
|
+ BPF_FUNC_skb_load_bytes_relative = 68,
|
|
+ BPF_FUNC_fib_lookup = 69,
|
|
+ BPF_FUNC_sock_hash_update = 70,
|
|
+ BPF_FUNC_msg_redirect_hash = 71,
|
|
+ BPF_FUNC_sk_redirect_hash = 72,
|
|
+ BPF_FUNC_lwt_push_encap = 73,
|
|
+ BPF_FUNC_lwt_seg6_store_bytes = 74,
|
|
+ BPF_FUNC_lwt_seg6_adjust_srh = 75,
|
|
+ BPF_FUNC_lwt_seg6_action = 76,
|
|
+ BPF_FUNC_rc_repeat = 77,
|
|
+ BPF_FUNC_rc_keydown = 78,
|
|
+ BPF_FUNC_skb_cgroup_id = 79,
|
|
+ BPF_FUNC_get_current_cgroup_id = 80,
|
|
+ BPF_FUNC_get_local_storage = 81,
|
|
+ BPF_FUNC_sk_select_reuseport = 82,
|
|
+ BPF_FUNC_skb_ancestor_cgroup_id = 83,
|
|
+ __BPF_FUNC_MAX_ID = 84,
|
|
+};
|
|
+
|
|
+struct bpf_storage_buffer {
|
|
+ struct callback_head rcu;
|
|
+ char data[0];
|
|
+};
|
|
+
|
|
+struct bpf_cgroup_storage_map;
|
|
+
|
|
+struct bpf_cgroup_storage {
|
|
+ struct bpf_storage_buffer *buf;
|
|
+ struct bpf_cgroup_storage_map *map;
|
|
+ struct bpf_cgroup_storage_key key;
|
|
+ struct list_head list;
|
|
+ struct rb_node node;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct bpf_prog_array_item {
|
|
+ struct bpf_prog *prog;
|
|
+ struct bpf_cgroup_storage *cgroup_storage;
|
|
+};
|
|
+
|
|
+struct bpf_prog_array {
|
|
+ struct callback_head rcu;
|
|
+ struct bpf_prog_array_item items[0];
|
|
+};
|
|
+
|
|
+struct bpf_map_ops;
|
|
+
|
|
+struct btf;
|
|
+
|
|
+struct bpf_map {
|
|
+ const struct bpf_map_ops *ops;
|
|
+ struct bpf_map *inner_map_meta;
|
|
+ void *security;
|
|
+ enum bpf_map_type map_type;
|
|
+ u32 key_size;
|
|
+ u32 value_size;
|
|
+ u32 max_entries;
|
|
+ u32 map_flags;
|
|
+ u32 pages;
|
|
+ u32 id;
|
|
+ int numa_node;
|
|
+ u32 btf_key_type_id;
|
|
+ u32 btf_value_type_id;
|
|
+ struct btf *btf;
|
|
+ bool unpriv_array;
|
|
+ long: 56;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct user_struct *user;
|
|
+ atomic_t refcnt;
|
|
+ atomic_t usercnt;
|
|
+ struct work_struct work;
|
|
+ char name[16];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bpf_map_dev_ops;
|
|
+
|
|
+struct bpf_offloaded_map {
|
|
+ struct bpf_map map;
|
|
+ struct net_device *netdev;
|
|
+ const struct bpf_map_dev_ops *dev_ops;
|
|
+ void *dev_priv;
|
|
+ struct list_head offloads;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bpf_binary_header {
|
|
+ u32 pages;
|
|
+ u8 image[0];
|
|
+};
|
|
+
|
|
+struct bpf_prog_ops;
|
|
+
|
|
+struct bpf_prog_offload;
|
|
+
|
|
+struct bpf_prog_aux {
|
|
+ atomic_t refcnt;
|
|
+ u32 used_map_cnt;
|
|
+ u32 max_ctx_offset;
|
|
+ u32 stack_depth;
|
|
+ u32 id;
|
|
+ u32 func_cnt;
|
|
+ bool offload_requested;
|
|
+ struct bpf_prog **func;
|
|
+ void *jit_data;
|
|
+ struct latch_tree_node ksym_tnode;
|
|
+ struct list_head ksym_lnode;
|
|
+ const struct bpf_prog_ops *ops;
|
|
+ struct bpf_map **used_maps;
|
|
+ struct bpf_prog *prog;
|
|
+ struct user_struct *user;
|
|
+ u64 load_time;
|
|
+ struct bpf_map *cgroup_storage;
|
|
+ char name[16];
|
|
+ void *security;
|
|
+ struct bpf_prog_offload *offload;
|
|
+ union {
|
|
+ struct work_struct work;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ u32 max_tp_access;
|
|
+};
|
|
+
|
|
+struct btf_type;
|
|
+
|
|
+struct bpf_map_ops {
|
|
+ int (*map_alloc_check)(union bpf_attr *);
|
|
+ struct bpf_map * (*map_alloc)(union bpf_attr *);
|
|
+ void (*map_release)(struct bpf_map *, struct file *);
|
|
+ void (*map_free)(struct bpf_map *);
|
|
+ int (*map_get_next_key)(struct bpf_map *, void *, void *);
|
|
+ void (*map_release_uref)(struct bpf_map *);
|
|
+ void * (*map_lookup_elem_sys_only)(struct bpf_map *, void *);
|
|
+ void * (*map_lookup_elem)(struct bpf_map *, void *);
|
|
+ int (*map_update_elem)(struct bpf_map *, void *, void *, u64);
|
|
+ int (*map_delete_elem)(struct bpf_map *, void *);
|
|
+ void * (*map_fd_get_ptr)(struct bpf_map *, struct file *, int);
|
|
+ void (*map_fd_put_ptr)(void *);
|
|
+ u32 (*map_gen_lookup)(struct bpf_map *, struct bpf_insn *);
|
|
+ u32 (*map_fd_sys_lookup_elem)(void *);
|
|
+ void (*map_seq_show_elem)(struct bpf_map *, void *, struct seq_file *);
|
|
+ int (*map_check_btf)(const struct bpf_map *, const struct btf_type *, const struct btf_type *);
|
|
+};
|
|
+
|
|
+struct btf_type {
|
|
+ __u32 name_off;
|
|
+ __u32 info;
|
|
+ union {
|
|
+ __u32 size;
|
|
+ __u32 type;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct bpf_map_dev_ops {
|
|
+ int (*map_get_next_key)(struct bpf_offloaded_map *, void *, void *);
|
|
+ int (*map_lookup_elem)(struct bpf_offloaded_map *, void *, void *);
|
|
+ int (*map_update_elem)(struct bpf_offloaded_map *, void *, void *, u64);
|
|
+ int (*map_delete_elem)(struct bpf_offloaded_map *, void *);
|
|
+};
|
|
+
|
|
+enum bpf_arg_type {
|
|
+ ARG_DONTCARE = 0,
|
|
+ ARG_CONST_MAP_PTR = 1,
|
|
+ ARG_PTR_TO_MAP_KEY = 2,
|
|
+ ARG_PTR_TO_MAP_VALUE = 3,
|
|
+ ARG_PTR_TO_MEM = 4,
|
|
+ ARG_PTR_TO_MEM_OR_NULL = 5,
|
|
+ ARG_PTR_TO_UNINIT_MEM = 6,
|
|
+ ARG_CONST_SIZE = 7,
|
|
+ ARG_CONST_SIZE_OR_ZERO = 8,
|
|
+ ARG_PTR_TO_CTX = 9,
|
|
+ ARG_ANYTHING = 10,
|
|
+};
|
|
+
|
|
+enum bpf_return_type {
|
|
+ RET_INTEGER = 0,
|
|
+ RET_VOID = 1,
|
|
+ RET_PTR_TO_MAP_VALUE = 2,
|
|
+ RET_PTR_TO_MAP_VALUE_OR_NULL = 3,
|
|
+};
|
|
+
|
|
+struct bpf_func_proto {
|
|
+ u64 (*func)(u64, u64, u64, u64, u64);
|
|
+ bool gpl_only;
|
|
+ bool pkt_access;
|
|
+ enum bpf_return_type ret_type;
|
|
+ enum bpf_arg_type arg1_type;
|
|
+ enum bpf_arg_type arg2_type;
|
|
+ enum bpf_arg_type arg3_type;
|
|
+ enum bpf_arg_type arg4_type;
|
|
+ enum bpf_arg_type arg5_type;
|
|
+};
|
|
+
|
|
+enum bpf_access_type {
|
|
+ BPF_READ = 1,
|
|
+ BPF_WRITE = 2,
|
|
+};
|
|
+
|
|
+enum bpf_reg_type {
|
|
+ NOT_INIT = 0,
|
|
+ SCALAR_VALUE = 1,
|
|
+ PTR_TO_CTX = 2,
|
|
+ CONST_PTR_TO_MAP = 3,
|
|
+ PTR_TO_MAP_VALUE = 4,
|
|
+ PTR_TO_MAP_VALUE_OR_NULL = 5,
|
|
+ PTR_TO_STACK = 6,
|
|
+ PTR_TO_PACKET_META = 7,
|
|
+ PTR_TO_PACKET = 8,
|
|
+ PTR_TO_PACKET_END = 9,
|
|
+ PTR_TO_FLOW_KEYS = 10,
|
|
+ PTR_TO_SOCKET = 11,
|
|
+ PTR_TO_SOCKET_OR_NULL = 12,
|
|
+ PTR_TO_SOCK_COMMON = 13,
|
|
+ PTR_TO_SOCK_COMMON_OR_NULL = 14,
|
|
+ PTR_TO_TCP_SOCK = 15,
|
|
+ PTR_TO_TCP_SOCK_OR_NULL = 16,
|
|
+ PTR_TO_TP_BUFFER = 17,
|
|
+};
|
|
+
|
|
+struct bpf_insn_access_aux {
|
|
+ enum bpf_reg_type reg_type;
|
|
+ int ctx_field_size;
|
|
+};
|
|
+
|
|
+struct bpf_prog_ops {
|
|
+ int (*test_run)(struct bpf_prog *, const union bpf_attr *, union bpf_attr *);
|
|
+};
|
|
+
|
|
+struct bpf_verifier_ops {
|
|
+ const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog *);
|
|
+ bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog *, struct bpf_insn_access_aux *);
|
|
+ int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog *);
|
|
+ int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *);
|
|
+ u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog *, u32 *);
|
|
+};
|
|
+
|
|
+struct bpf_prog_offload {
|
|
+ struct bpf_prog *prog;
|
|
+ struct net_device *netdev;
|
|
+ void *dev_priv;
|
|
+ struct list_head offloads;
|
|
+ bool dev_state;
|
|
+ const struct bpf_prog_offload_ops *dev_ops;
|
|
+ void *jited_image;
|
|
+ u32 jited_len;
|
|
+};
|
|
+
|
|
+struct jit_context {
|
|
+ int cleanup_addr;
|
|
+};
|
|
+
|
|
+struct x64_jit_data {
|
|
+ struct bpf_binary_header *header;
|
|
+ int *addrs;
|
|
+ u8 *image;
|
|
+ int proglen;
|
|
+ struct jit_context ctx;
|
|
+};
|
|
+
|
|
+enum tk_offsets {
|
|
+ TK_OFFS_REAL = 0,
|
|
+ TK_OFFS_BOOT = 1,
|
|
+ TK_OFFS_TAI = 2,
|
|
+ TK_OFFS_MAX = 3,
|
|
+};
|
|
+
|
|
+struct fdtable {
|
|
+ unsigned int max_fds;
|
|
+ struct file **fd;
|
|
+ long unsigned int *close_on_exec;
|
|
+ long unsigned int *open_fds;
|
|
+ long unsigned int *full_fds_bits;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct files_cgroup;
|
|
+
|
|
+struct files_struct {
|
|
+ atomic_t count;
|
|
+ bool resize_in_progress;
|
|
+ wait_queue_head_t resize_wait;
|
|
+ struct fdtable *fdt;
|
|
+ struct fdtable fdtab;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ spinlock_t file_lock;
|
|
+ unsigned int next_fd;
|
|
+ long unsigned int close_on_exec_init[1];
|
|
+ long unsigned int open_fds_init[1];
|
|
+ long unsigned int full_fds_bits_init[1];
|
|
+ struct file *fd_array[64];
|
|
+ struct files_cgroup *files_cgroup;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct robust_list {
|
|
+ struct robust_list *next;
|
|
+};
|
|
+
|
|
+struct robust_list_head {
|
|
+ struct robust_list list;
|
|
+ long int futex_offset;
|
|
+ struct robust_list *list_op_pending;
|
|
+};
|
|
+
|
|
+struct multiprocess_signals {
|
|
+ sigset_t signal;
|
|
+ struct hlist_node node;
|
|
+};
|
|
+
|
|
+typedef int (*proc_visitor)(struct task_struct *, void *);
|
|
+
|
|
+enum {
|
|
+ IOPRIO_CLASS_NONE = 0,
|
|
+ IOPRIO_CLASS_RT = 1,
|
|
+ IOPRIO_CLASS_BE = 2,
|
|
+ IOPRIO_CLASS_IDLE = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FUTEX_STATE_OK = 0,
|
|
+ FUTEX_STATE_EXITING = 1,
|
|
+ FUTEX_STATE_DEAD = 2,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_task_newtask {
|
|
+ struct trace_entry ent;
|
|
+ pid_t pid;
|
|
+ char comm[16];
|
|
+ long unsigned int clone_flags;
|
|
+ short int oom_score_adj;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_task_rename {
|
|
+ struct trace_entry ent;
|
|
+ pid_t pid;
|
|
+ char oldcomm[16];
|
|
+ char newcomm[16];
|
|
+ short int oom_score_adj;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_task_newtask {};
|
|
+
|
|
+struct trace_event_data_offsets_task_rename {};
|
|
+
|
|
+enum kmsg_dump_reason {
|
|
+ KMSG_DUMP_UNDEF = 0,
|
|
+ KMSG_DUMP_PANIC = 1,
|
|
+ KMSG_DUMP_OOPS = 2,
|
|
+ KMSG_DUMP_EMERG = 3,
|
|
+ KMSG_DUMP_RESTART = 4,
|
|
+ KMSG_DUMP_HALT = 5,
|
|
+ KMSG_DUMP_POWEROFF = 6,
|
|
+};
|
|
+
|
|
+struct vt_mode {
|
|
+ char mode;
|
|
+ char waitv;
|
|
+ short int relsig;
|
|
+ short int acqsig;
|
|
+ short int frsig;
|
|
+};
|
|
+
|
|
+struct console_font {
|
|
+ unsigned int width;
|
|
+ unsigned int height;
|
|
+ unsigned int charcount;
|
|
+ unsigned char *data;
|
|
+};
|
|
+
|
|
+struct uni_pagedir;
|
|
+
|
|
+struct uni_screen;
|
|
+
|
|
+struct vc_data {
|
|
+ struct tty_port port;
|
|
+ short unsigned int vc_num;
|
|
+ unsigned int vc_cols;
|
|
+ unsigned int vc_rows;
|
|
+ unsigned int vc_size_row;
|
|
+ unsigned int vc_scan_lines;
|
|
+ long unsigned int vc_origin;
|
|
+ long unsigned int vc_scr_end;
|
|
+ long unsigned int vc_visible_origin;
|
|
+ unsigned int vc_top;
|
|
+ unsigned int vc_bottom;
|
|
+ const struct consw *vc_sw;
|
|
+ short unsigned int *vc_screenbuf;
|
|
+ unsigned int vc_screenbuf_size;
|
|
+ unsigned char vc_mode;
|
|
+ unsigned char vc_attr;
|
|
+ unsigned char vc_def_color;
|
|
+ unsigned char vc_color;
|
|
+ unsigned char vc_s_color;
|
|
+ unsigned char vc_ulcolor;
|
|
+ unsigned char vc_itcolor;
|
|
+ unsigned char vc_halfcolor;
|
|
+ unsigned int vc_cursor_type;
|
|
+ short unsigned int vc_complement_mask;
|
|
+ short unsigned int vc_s_complement_mask;
|
|
+ unsigned int vc_x;
|
|
+ unsigned int vc_y;
|
|
+ unsigned int vc_saved_x;
|
|
+ unsigned int vc_saved_y;
|
|
+ long unsigned int vc_pos;
|
|
+ short unsigned int vc_hi_font_mask;
|
|
+ struct console_font vc_font;
|
|
+ short unsigned int vc_video_erase_char;
|
|
+ unsigned int vc_state;
|
|
+ unsigned int vc_npar;
|
|
+ unsigned int vc_par[16];
|
|
+ struct vt_mode vt_mode;
|
|
+ struct pid *vt_pid;
|
|
+ int vt_newvt;
|
|
+ wait_queue_head_t paste_wait;
|
|
+ unsigned int vc_charset: 1;
|
|
+ unsigned int vc_s_charset: 1;
|
|
+ unsigned int vc_disp_ctrl: 1;
|
|
+ unsigned int vc_toggle_meta: 1;
|
|
+ unsigned int vc_decscnm: 1;
|
|
+ unsigned int vc_decom: 1;
|
|
+ unsigned int vc_decawm: 1;
|
|
+ unsigned int vc_deccm: 1;
|
|
+ unsigned int vc_decim: 1;
|
|
+ unsigned int vc_intensity: 2;
|
|
+ unsigned int vc_italic: 1;
|
|
+ unsigned int vc_underline: 1;
|
|
+ unsigned int vc_blink: 1;
|
|
+ unsigned int vc_reverse: 1;
|
|
+ unsigned int vc_s_intensity: 2;
|
|
+ unsigned int vc_s_italic: 1;
|
|
+ unsigned int vc_s_underline: 1;
|
|
+ unsigned int vc_s_blink: 1;
|
|
+ unsigned int vc_s_reverse: 1;
|
|
+ unsigned int vc_ques: 1;
|
|
+ unsigned int vc_need_wrap: 1;
|
|
+ unsigned int vc_can_do_color: 1;
|
|
+ unsigned int vc_report_mouse: 2;
|
|
+ unsigned char vc_utf: 1;
|
|
+ unsigned char vc_utf_count;
|
|
+ int vc_utf_char;
|
|
+ unsigned int vc_tab_stop[8];
|
|
+ unsigned char vc_palette[48];
|
|
+ short unsigned int *vc_translate;
|
|
+ unsigned char vc_G0_charset;
|
|
+ unsigned char vc_G1_charset;
|
|
+ unsigned char vc_saved_G0;
|
|
+ unsigned char vc_saved_G1;
|
|
+ unsigned int vc_resize_user;
|
|
+ unsigned int vc_bell_pitch;
|
|
+ unsigned int vc_bell_duration;
|
|
+ short unsigned int vc_cur_blink_ms;
|
|
+ struct vc_data **vc_display_fg;
|
|
+ struct uni_pagedir *vc_uni_pagedir;
|
|
+ struct uni_pagedir **vc_uni_pagedir_loc;
|
|
+ struct uni_screen *vc_uni_screen;
|
|
+ bool vc_panic_force_write;
|
|
+};
|
|
+
|
|
+struct vc {
|
|
+ struct vc_data *d;
|
|
+ struct work_struct SAK_work;
|
|
+};
|
|
+
|
|
+struct vt_spawn_console {
|
|
+ spinlock_t lock;
|
|
+ struct pid *pid;
|
|
+ int sig;
|
|
+};
|
|
+
|
|
+struct warn_args {
|
|
+ const char *fmt;
|
|
+ va_list args;
|
|
+};
|
|
+
|
|
+struct smp_hotplug_thread {
|
|
+ struct task_struct **store;
|
|
+ struct list_head list;
|
|
+ int (*thread_should_run)(unsigned int);
|
|
+ void (*thread_fn)(unsigned int);
|
|
+ void (*create)(unsigned int);
|
|
+ void (*setup)(unsigned int);
|
|
+ void (*cleanup)(unsigned int, bool);
|
|
+ void (*park)(unsigned int);
|
|
+ void (*unpark)(unsigned int);
|
|
+ bool selfparking;
|
|
+ const char *thread_comm;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cpuhp_enter {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int cpu;
|
|
+ int target;
|
|
+ int idx;
|
|
+ void *fun;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cpuhp_multi_enter {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int cpu;
|
|
+ int target;
|
|
+ int idx;
|
|
+ void *fun;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cpuhp_exit {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int cpu;
|
|
+ int state;
|
|
+ int idx;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_cpuhp_enter {};
|
|
+
|
|
+struct trace_event_data_offsets_cpuhp_multi_enter {};
|
|
+
|
|
+struct trace_event_data_offsets_cpuhp_exit {};
|
|
+
|
|
+struct cpuhp_cpu_state {
|
|
+ enum cpuhp_state state;
|
|
+ enum cpuhp_state target;
|
|
+ enum cpuhp_state fail;
|
|
+ struct task_struct *thread;
|
|
+ bool should_run;
|
|
+ bool rollback;
|
|
+ bool single;
|
|
+ bool bringup;
|
|
+ bool booted_once;
|
|
+ struct hlist_node *node;
|
|
+ struct hlist_node *last;
|
|
+ enum cpuhp_state cb_state;
|
|
+ int result;
|
|
+ struct completion done_up;
|
|
+ struct completion done_down;
|
|
+};
|
|
+
|
|
+struct cpuhp_step {
|
|
+ const char *name;
|
|
+ union {
|
|
+ int (*single)(unsigned int);
|
|
+ int (*multi)(unsigned int, struct hlist_node *);
|
|
+ } startup;
|
|
+ union {
|
|
+ int (*single)(unsigned int);
|
|
+ int (*multi)(unsigned int, struct hlist_node *);
|
|
+ } teardown;
|
|
+ struct hlist_head list;
|
|
+ bool cant_stop;
|
|
+ bool multi_instance;
|
|
+};
|
|
+
|
|
+enum cpu_mitigations {
|
|
+ CPU_MITIGATIONS_OFF = 0,
|
|
+ CPU_MITIGATIONS_AUTO = 1,
|
|
+ CPU_MITIGATIONS_AUTO_NOSMT = 2,
|
|
+};
|
|
+
|
|
+typedef enum cpuhp_state pto_T_____21;
|
|
+
|
|
+typedef __kernel_long_t __kernel_suseconds_t;
|
|
+
|
|
+struct timeval {
|
|
+ __kernel_time_t tv_sec;
|
|
+ __kernel_suseconds_t tv_usec;
|
|
+};
|
|
+
|
|
+struct wait_queue_entry;
|
|
+
|
|
+typedef int (*wait_queue_func_t)(struct wait_queue_entry *, unsigned int, int, void *);
|
|
+
|
|
+struct wait_queue_entry {
|
|
+ unsigned int flags;
|
|
+ void *private;
|
|
+ wait_queue_func_t func;
|
|
+ struct list_head entry;
|
|
+};
|
|
+
|
|
+typedef struct wait_queue_entry wait_queue_entry_t;
|
|
+
|
|
+struct rusage {
|
|
+ struct timeval ru_utime;
|
|
+ struct timeval ru_stime;
|
|
+ __kernel_long_t ru_maxrss;
|
|
+ __kernel_long_t ru_ixrss;
|
|
+ __kernel_long_t ru_idrss;
|
|
+ __kernel_long_t ru_isrss;
|
|
+ __kernel_long_t ru_minflt;
|
|
+ __kernel_long_t ru_majflt;
|
|
+ __kernel_long_t ru_nswap;
|
|
+ __kernel_long_t ru_inblock;
|
|
+ __kernel_long_t ru_oublock;
|
|
+ __kernel_long_t ru_msgsnd;
|
|
+ __kernel_long_t ru_msgrcv;
|
|
+ __kernel_long_t ru_nsignals;
|
|
+ __kernel_long_t ru_nvcsw;
|
|
+ __kernel_long_t ru_nivcsw;
|
|
+};
|
|
+
|
|
+struct compat_timeval {
|
|
+ compat_time_t tv_sec;
|
|
+ s32 tv_usec;
|
|
+};
|
|
+
|
|
+struct compat_rusage {
|
|
+ struct compat_timeval ru_utime;
|
|
+ struct compat_timeval ru_stime;
|
|
+ compat_long_t ru_maxrss;
|
|
+ compat_long_t ru_ixrss;
|
|
+ compat_long_t ru_idrss;
|
|
+ compat_long_t ru_isrss;
|
|
+ compat_long_t ru_minflt;
|
|
+ compat_long_t ru_majflt;
|
|
+ compat_long_t ru_nswap;
|
|
+ compat_long_t ru_inblock;
|
|
+ compat_long_t ru_oublock;
|
|
+ compat_long_t ru_msgsnd;
|
|
+ compat_long_t ru_msgrcv;
|
|
+ compat_long_t ru_nsignals;
|
|
+ compat_long_t ru_nvcsw;
|
|
+ compat_long_t ru_nivcsw;
|
|
+};
|
|
+
|
|
+struct waitid_info {
|
|
+ pid_t pid;
|
|
+ uid_t uid;
|
|
+ int status;
|
|
+ int cause;
|
|
+};
|
|
+
|
|
+struct wait_opts {
|
|
+ enum pid_type wo_type;
|
|
+ int wo_flags;
|
|
+ struct pid *wo_pid;
|
|
+ struct waitid_info *wo_info;
|
|
+ int wo_stat;
|
|
+ struct rusage *wo_rusage;
|
|
+ wait_queue_entry_t child_wait;
|
|
+ int notask_error;
|
|
+};
|
|
+
|
|
+struct softirq_action {
|
|
+ void (*action)(struct softirq_action *);
|
|
+};
|
|
+
|
|
+struct tasklet_struct {
|
|
+ struct tasklet_struct *next;
|
|
+ long unsigned int state;
|
|
+ atomic_t count;
|
|
+ void (*func)(long unsigned int);
|
|
+ long unsigned int data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TASKLET_STATE_SCHED = 0,
|
|
+ TASKLET_STATE_RUN = 1,
|
|
+};
|
|
+
|
|
+struct tasklet_hrtimer {
|
|
+ struct hrtimer timer;
|
|
+ struct tasklet_struct tasklet;
|
|
+ enum hrtimer_restart (*function)(struct hrtimer *);
|
|
+};
|
|
+
|
|
+struct trace_event_raw_irq_handler_entry {
|
|
+ struct trace_entry ent;
|
|
+ int irq;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_irq_handler_exit {
|
|
+ struct trace_entry ent;
|
|
+ int irq;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_softirq {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int vec;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_irq_handler_entry {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_irq_handler_exit {};
|
|
+
|
|
+struct trace_event_data_offsets_softirq {};
|
|
+
|
|
+struct tasklet_head {
|
|
+ struct tasklet_struct *head;
|
|
+ struct tasklet_struct **tail;
|
|
+};
|
|
+
|
|
+typedef struct tasklet_struct **pto_T_____22;
|
|
+
|
|
+typedef void (*dr_release_t)(struct device *, void *);
|
|
+
|
|
+struct resource_entry {
|
|
+ struct list_head node;
|
|
+ struct resource *res;
|
|
+ resource_size_t offset;
|
|
+ struct resource __res;
|
|
+};
|
|
+
|
|
+struct resource_constraint {
|
|
+ resource_size_t min;
|
|
+ resource_size_t max;
|
|
+ resource_size_t align;
|
|
+ resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t);
|
|
+ void *alignf_data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MAX_IORES_LEVEL = 5,
|
|
+};
|
|
+
|
|
+struct region_devres {
|
|
+ struct resource *parent;
|
|
+ resource_size_t start;
|
|
+ resource_size_t n;
|
|
+};
|
|
+
|
|
+enum sysctl_writes_mode {
|
|
+ SYSCTL_WRITES_LEGACY = -1,
|
|
+ SYSCTL_WRITES_WARN = 0,
|
|
+ SYSCTL_WRITES_STRICT = 1,
|
|
+};
|
|
+
|
|
+struct do_proc_dointvec_minmax_conv_param {
|
|
+ int *min;
|
|
+ int *max;
|
|
+};
|
|
+
|
|
+struct do_proc_douintvec_minmax_conv_param {
|
|
+ unsigned int *min;
|
|
+ unsigned int *max;
|
|
+};
|
|
+
|
|
+struct __sysctl_args {
|
|
+ int *name;
|
|
+ int nlen;
|
|
+ void *oldval;
|
|
+ size_t *oldlenp;
|
|
+ void *newval;
|
|
+ size_t newlen;
|
|
+ long unsigned int __unused[4];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CTL_KERN = 1,
|
|
+ CTL_VM = 2,
|
|
+ CTL_NET = 3,
|
|
+ CTL_PROC = 4,
|
|
+ CTL_FS = 5,
|
|
+ CTL_DEBUG = 6,
|
|
+ CTL_DEV = 7,
|
|
+ CTL_BUS = 8,
|
|
+ CTL_ABI = 9,
|
|
+ CTL_CPU = 10,
|
|
+ CTL_ARLAN = 254,
|
|
+ CTL_S390DBF = 5677,
|
|
+ CTL_SUNRPC = 7249,
|
|
+ CTL_PM = 9899,
|
|
+ CTL_FRV = 9898,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ KERN_OSTYPE = 1,
|
|
+ KERN_OSRELEASE = 2,
|
|
+ KERN_OSREV = 3,
|
|
+ KERN_VERSION = 4,
|
|
+ KERN_SECUREMASK = 5,
|
|
+ KERN_PROF = 6,
|
|
+ KERN_NODENAME = 7,
|
|
+ KERN_DOMAINNAME = 8,
|
|
+ KERN_PANIC = 15,
|
|
+ KERN_REALROOTDEV = 16,
|
|
+ KERN_SPARC_REBOOT = 21,
|
|
+ KERN_CTLALTDEL = 22,
|
|
+ KERN_PRINTK = 23,
|
|
+ KERN_NAMETRANS = 24,
|
|
+ KERN_PPC_HTABRECLAIM = 25,
|
|
+ KERN_PPC_ZEROPAGED = 26,
|
|
+ KERN_PPC_POWERSAVE_NAP = 27,
|
|
+ KERN_MODPROBE = 28,
|
|
+ KERN_SG_BIG_BUFF = 29,
|
|
+ KERN_ACCT = 30,
|
|
+ KERN_PPC_L2CR = 31,
|
|
+ KERN_RTSIGNR = 32,
|
|
+ KERN_RTSIGMAX = 33,
|
|
+ KERN_SHMMAX = 34,
|
|
+ KERN_MSGMAX = 35,
|
|
+ KERN_MSGMNB = 36,
|
|
+ KERN_MSGPOOL = 37,
|
|
+ KERN_SYSRQ = 38,
|
|
+ KERN_MAX_THREADS = 39,
|
|
+ KERN_RANDOM = 40,
|
|
+ KERN_SHMALL = 41,
|
|
+ KERN_MSGMNI = 42,
|
|
+ KERN_SEM = 43,
|
|
+ KERN_SPARC_STOP_A = 44,
|
|
+ KERN_SHMMNI = 45,
|
|
+ KERN_OVERFLOWUID = 46,
|
|
+ KERN_OVERFLOWGID = 47,
|
|
+ KERN_SHMPATH = 48,
|
|
+ KERN_HOTPLUG = 49,
|
|
+ KERN_IEEE_EMULATION_WARNINGS = 50,
|
|
+ KERN_S390_USER_DEBUG_LOGGING = 51,
|
|
+ KERN_CORE_USES_PID = 52,
|
|
+ KERN_TAINTED = 53,
|
|
+ KERN_CADPID = 54,
|
|
+ KERN_PIDMAX = 55,
|
|
+ KERN_CORE_PATTERN = 56,
|
|
+ KERN_PANIC_ON_OOPS = 57,
|
|
+ KERN_HPPA_PWRSW = 58,
|
|
+ KERN_HPPA_UNALIGNED = 59,
|
|
+ KERN_PRINTK_RATELIMIT = 60,
|
|
+ KERN_PRINTK_RATELIMIT_BURST = 61,
|
|
+ KERN_PTY = 62,
|
|
+ KERN_NGROUPS_MAX = 63,
|
|
+ KERN_SPARC_SCONS_PWROFF = 64,
|
|
+ KERN_HZ_TIMER = 65,
|
|
+ KERN_UNKNOWN_NMI_PANIC = 66,
|
|
+ KERN_BOOTLOADER_TYPE = 67,
|
|
+ KERN_RANDOMIZE = 68,
|
|
+ KERN_SETUID_DUMPABLE = 69,
|
|
+ KERN_SPIN_RETRY = 70,
|
|
+ KERN_ACPI_VIDEO_FLAGS = 71,
|
|
+ KERN_IA64_UNALIGNED = 72,
|
|
+ KERN_COMPAT_LOG = 73,
|
|
+ KERN_MAX_LOCK_DEPTH = 74,
|
|
+ KERN_NMI_WATCHDOG = 75,
|
|
+ KERN_PANIC_ON_NMI = 76,
|
|
+ KERN_PANIC_ON_WARN = 77,
|
|
+ KERN_PANIC_PRINT = 78,
|
|
+};
|
|
+
|
|
+struct xfs_sysctl_val {
|
|
+ int min;
|
|
+ int val;
|
|
+ int max;
|
|
+};
|
|
+
|
|
+typedef struct xfs_sysctl_val xfs_sysctl_val_t;
|
|
+
|
|
+struct xfs_param {
|
|
+ xfs_sysctl_val_t sgid_inherit;
|
|
+ xfs_sysctl_val_t symlink_mode;
|
|
+ xfs_sysctl_val_t panic_mask;
|
|
+ xfs_sysctl_val_t error_level;
|
|
+ xfs_sysctl_val_t syncd_timer;
|
|
+ xfs_sysctl_val_t stats_clear;
|
|
+ xfs_sysctl_val_t inherit_sync;
|
|
+ xfs_sysctl_val_t inherit_nodump;
|
|
+ xfs_sysctl_val_t inherit_noatim;
|
|
+ xfs_sysctl_val_t xfs_buf_timer;
|
|
+ xfs_sysctl_val_t xfs_buf_age;
|
|
+ xfs_sysctl_val_t inherit_nosym;
|
|
+ xfs_sysctl_val_t rotorstep;
|
|
+ xfs_sysctl_val_t inherit_nodfrg;
|
|
+ xfs_sysctl_val_t fstrm_timer;
|
|
+ xfs_sysctl_val_t eofb_timer;
|
|
+ xfs_sysctl_val_t cowb_timer;
|
|
+};
|
|
+
|
|
+typedef struct xfs_param xfs_param_t;
|
|
+
|
|
+struct xfs_globals {
|
|
+ int log_recovery_delay;
|
|
+ int mount_delay;
|
|
+ bool bug_on_assert;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NAPI_STATE_SCHED = 0,
|
|
+ NAPI_STATE_MISSED = 1,
|
|
+ NAPI_STATE_DISABLE = 2,
|
|
+ NAPI_STATE_NPSVC = 3,
|
|
+ NAPI_STATE_HASHED = 4,
|
|
+ NAPI_STATE_NO_BUSY_POLL = 5,
|
|
+ NAPI_STATE_IN_BUSY_POLL = 6,
|
|
+};
|
|
+
|
|
+struct compat_sysctl_args {
|
|
+ compat_uptr_t name;
|
|
+ int nlen;
|
|
+ compat_uptr_t oldval;
|
|
+ compat_uptr_t oldlenp;
|
|
+ compat_uptr_t newval;
|
|
+ compat_size_t newlen;
|
|
+ compat_ulong_t __unused[4];
|
|
+};
|
|
+
|
|
+struct __user_cap_header_struct {
|
|
+ __u32 version;
|
|
+ int pid;
|
|
+};
|
|
+
|
|
+typedef struct __user_cap_header_struct *cap_user_header_t;
|
|
+
|
|
+struct __user_cap_data_struct {
|
|
+ __u32 effective;
|
|
+ __u32 permitted;
|
|
+ __u32 inheritable;
|
|
+};
|
|
+
|
|
+typedef struct __user_cap_data_struct *cap_user_data_t;
|
|
+
|
|
+struct sigqueue {
|
|
+ struct list_head list;
|
|
+ int flags;
|
|
+ siginfo_t info;
|
|
+ struct user_struct *user;
|
|
+};
|
|
+
|
|
+struct ptrace_peeksiginfo_args {
|
|
+ __u64 off;
|
|
+ __u32 flags;
|
|
+ __s32 nr;
|
|
+};
|
|
+
|
|
+struct compat_iovec {
|
|
+ compat_uptr_t iov_base;
|
|
+ compat_size_t iov_len;
|
|
+};
|
|
+
|
|
+typedef int __kernel_mqd_t;
|
|
+
|
|
+typedef __kernel_mqd_t mqd_t;
|
|
+
|
|
+typedef long unsigned int old_sigset_t;
|
|
+
|
|
+enum audit_state {
|
|
+ AUDIT_DISABLED = 0,
|
|
+ AUDIT_BUILD_CONTEXT = 1,
|
|
+ AUDIT_RECORD_CONTEXT = 2,
|
|
+};
|
|
+
|
|
+struct audit_cap_data {
|
|
+ kernel_cap_t permitted;
|
|
+ kernel_cap_t inheritable;
|
|
+ union {
|
|
+ unsigned int fE;
|
|
+ kernel_cap_t effective;
|
|
+ };
|
|
+ kernel_cap_t ambient;
|
|
+};
|
|
+
|
|
+struct filename;
|
|
+
|
|
+struct audit_names {
|
|
+ struct list_head list;
|
|
+ struct filename *name;
|
|
+ int name_len;
|
|
+ bool hidden;
|
|
+ long unsigned int ino;
|
|
+ dev_t dev;
|
|
+ umode_t mode;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ dev_t rdev;
|
|
+ u32 osid;
|
|
+ struct audit_cap_data fcap;
|
|
+ unsigned int fcap_ver;
|
|
+ unsigned char type;
|
|
+ bool should_free;
|
|
+};
|
|
+
|
|
+struct mq_attr {
|
|
+ __kernel_long_t mq_flags;
|
|
+ __kernel_long_t mq_maxmsg;
|
|
+ __kernel_long_t mq_msgsize;
|
|
+ __kernel_long_t mq_curmsgs;
|
|
+ __kernel_long_t __reserved[4];
|
|
+};
|
|
+
|
|
+struct audit_proctitle {
|
|
+ int len;
|
|
+ char *value;
|
|
+};
|
|
+
|
|
+struct audit_aux_data;
|
|
+
|
|
+struct audit_tree_refs;
|
|
+
|
|
+struct audit_context {
|
|
+ int dummy;
|
|
+ int in_syscall;
|
|
+ enum audit_state state;
|
|
+ enum audit_state current_state;
|
|
+ unsigned int serial;
|
|
+ int major;
|
|
+ struct timespec64 ctime;
|
|
+ long unsigned int argv[4];
|
|
+ long int return_code;
|
|
+ u64 prio;
|
|
+ int return_valid;
|
|
+ struct audit_names preallocated_names[5];
|
|
+ int name_count;
|
|
+ struct list_head names_list;
|
|
+ char *filterkey;
|
|
+ struct path pwd;
|
|
+ struct audit_aux_data *aux;
|
|
+ struct audit_aux_data *aux_pids;
|
|
+ struct __kernel_sockaddr_storage *sockaddr;
|
|
+ size_t sockaddr_len;
|
|
+ pid_t pid;
|
|
+ pid_t ppid;
|
|
+ kuid_t uid;
|
|
+ kuid_t euid;
|
|
+ kuid_t suid;
|
|
+ kuid_t fsuid;
|
|
+ kgid_t gid;
|
|
+ kgid_t egid;
|
|
+ kgid_t sgid;
|
|
+ kgid_t fsgid;
|
|
+ long unsigned int personality;
|
|
+ int arch;
|
|
+ pid_t target_pid;
|
|
+ kuid_t target_auid;
|
|
+ kuid_t target_uid;
|
|
+ unsigned int target_sessionid;
|
|
+ u32 target_sid;
|
|
+ char target_comm[16];
|
|
+ struct audit_tree_refs *trees;
|
|
+ struct audit_tree_refs *first_trees;
|
|
+ struct list_head killed_trees;
|
|
+ int tree_count;
|
|
+ int type;
|
|
+ union {
|
|
+ struct {
|
|
+ int nargs;
|
|
+ long int args[6];
|
|
+ } socketcall;
|
|
+ struct {
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ umode_t mode;
|
|
+ u32 osid;
|
|
+ int has_perm;
|
|
+ uid_t perm_uid;
|
|
+ gid_t perm_gid;
|
|
+ umode_t perm_mode;
|
|
+ long unsigned int qbytes;
|
|
+ } ipc;
|
|
+ struct {
|
|
+ mqd_t mqdes;
|
|
+ struct mq_attr mqstat;
|
|
+ } mq_getsetattr;
|
|
+ struct {
|
|
+ mqd_t mqdes;
|
|
+ int sigev_signo;
|
|
+ } mq_notify;
|
|
+ struct {
|
|
+ mqd_t mqdes;
|
|
+ size_t msg_len;
|
|
+ unsigned int msg_prio;
|
|
+ struct timespec64 abs_timeout;
|
|
+ } mq_sendrecv;
|
|
+ struct {
|
|
+ int oflag;
|
|
+ umode_t mode;
|
|
+ struct mq_attr attr;
|
|
+ } mq_open;
|
|
+ struct {
|
|
+ pid_t pid;
|
|
+ struct audit_cap_data cap;
|
|
+ } capset;
|
|
+ struct {
|
|
+ int fd;
|
|
+ int flags;
|
|
+ } mmap;
|
|
+ struct {
|
|
+ int argc;
|
|
+ } execve;
|
|
+ struct {
|
|
+ char *name;
|
|
+ } module;
|
|
+ };
|
|
+ int fds[2];
|
|
+ struct audit_proctitle proctitle;
|
|
+};
|
|
+
|
|
+enum siginfo_layout {
|
|
+ SIL_KILL = 0,
|
|
+ SIL_TIMER = 1,
|
|
+ SIL_POLL = 2,
|
|
+ SIL_FAULT = 3,
|
|
+ SIL_FAULT_MCEERR = 4,
|
|
+ SIL_FAULT_BNDERR = 5,
|
|
+ SIL_FAULT_PKUERR = 6,
|
|
+ SIL_CHLD = 7,
|
|
+ SIL_RT = 8,
|
|
+ SIL_SYS = 9,
|
|
+};
|
|
+
|
|
+struct filename {
|
|
+ const char *name;
|
|
+ const char *uptr;
|
|
+ int refcnt;
|
|
+ struct audit_names *aname;
|
|
+ const char iname[0];
|
|
+};
|
|
+
|
|
+typedef u32 compat_old_sigset_t;
|
|
+
|
|
+struct compat_sigaction {
|
|
+ compat_uptr_t sa_handler;
|
|
+ compat_ulong_t sa_flags;
|
|
+ compat_uptr_t sa_restorer;
|
|
+ compat_sigset_t sa_mask;
|
|
+};
|
|
+
|
|
+struct compat_old_sigaction {
|
|
+ compat_uptr_t sa_handler;
|
|
+ compat_old_sigset_t sa_mask;
|
|
+ compat_ulong_t sa_flags;
|
|
+ compat_uptr_t sa_restorer;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TRACE_SIGNAL_DELIVERED = 0,
|
|
+ TRACE_SIGNAL_IGNORED = 1,
|
|
+ TRACE_SIGNAL_ALREADY_PENDING = 2,
|
|
+ TRACE_SIGNAL_OVERFLOW_FAIL = 3,
|
|
+ TRACE_SIGNAL_LOSE_INFO = 4,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_signal_generate {
|
|
+ struct trace_entry ent;
|
|
+ int sig;
|
|
+ int errno;
|
|
+ int code;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ int group;
|
|
+ int result;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_signal_deliver {
|
|
+ struct trace_entry ent;
|
|
+ int sig;
|
|
+ int errno;
|
|
+ int code;
|
|
+ long unsigned int sa_handler;
|
|
+ long unsigned int sa_flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_signal_generate {};
|
|
+
|
|
+struct trace_event_data_offsets_signal_deliver {};
|
|
+
|
|
+typedef __kernel_clock_t clock_t;
|
|
+
|
|
+struct sysinfo {
|
|
+ __kernel_long_t uptime;
|
|
+ __kernel_ulong_t loads[3];
|
|
+ __kernel_ulong_t totalram;
|
|
+ __kernel_ulong_t freeram;
|
|
+ __kernel_ulong_t sharedram;
|
|
+ __kernel_ulong_t bufferram;
|
|
+ __kernel_ulong_t totalswap;
|
|
+ __kernel_ulong_t freeswap;
|
|
+ __u16 procs;
|
|
+ __u16 pad;
|
|
+ __kernel_ulong_t totalhigh;
|
|
+ __kernel_ulong_t freehigh;
|
|
+ __u32 mem_unit;
|
|
+ char _f[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PER_LINUX = 0,
|
|
+ PER_LINUX_32BIT = 8388608,
|
|
+ PER_LINUX_FDPIC = 524288,
|
|
+ PER_SVR4 = 68157441,
|
|
+ PER_SVR3 = 83886082,
|
|
+ PER_SCOSVR3 = 117440515,
|
|
+ PER_OSR5 = 100663299,
|
|
+ PER_WYSEV386 = 83886084,
|
|
+ PER_ISCR4 = 67108869,
|
|
+ PER_BSD = 6,
|
|
+ PER_SUNOS = 67108870,
|
|
+ PER_XENIX = 83886087,
|
|
+ PER_LINUX32 = 8,
|
|
+ PER_LINUX32_3GB = 134217736,
|
|
+ PER_IRIX32 = 67108873,
|
|
+ PER_IRIXN32 = 67108874,
|
|
+ PER_IRIX64 = 67108875,
|
|
+ PER_RISCOS = 12,
|
|
+ PER_SOLARIS = 67108877,
|
|
+ PER_UW7 = 68157454,
|
|
+ PER_OSF4 = 15,
|
|
+ PER_HPUX = 16,
|
|
+ PER_MASK = 255,
|
|
+};
|
|
+
|
|
+struct rlimit64 {
|
|
+ __u64 rlim_cur;
|
|
+ __u64 rlim_max;
|
|
+};
|
|
+
|
|
+struct oldold_utsname {
|
|
+ char sysname[9];
|
|
+ char nodename[9];
|
|
+ char release[9];
|
|
+ char version[9];
|
|
+ char machine[9];
|
|
+};
|
|
+
|
|
+struct old_utsname {
|
|
+ char sysname[65];
|
|
+ char nodename[65];
|
|
+ char release[65];
|
|
+ char version[65];
|
|
+ char machine[65];
|
|
+};
|
|
+
|
|
+enum uts_proc {
|
|
+ UTS_PROC_OSTYPE = 0,
|
|
+ UTS_PROC_OSRELEASE = 1,
|
|
+ UTS_PROC_VERSION = 2,
|
|
+ UTS_PROC_HOSTNAME = 3,
|
|
+ UTS_PROC_DOMAINNAME = 4,
|
|
+};
|
|
+
|
|
+struct prctl_mm_map {
|
|
+ __u64 start_code;
|
|
+ __u64 end_code;
|
|
+ __u64 start_data;
|
|
+ __u64 end_data;
|
|
+ __u64 start_brk;
|
|
+ __u64 brk;
|
|
+ __u64 start_stack;
|
|
+ __u64 arg_start;
|
|
+ __u64 arg_end;
|
|
+ __u64 env_start;
|
|
+ __u64 env_end;
|
|
+ __u64 *auxv;
|
|
+ __u32 auxv_size;
|
|
+ __u32 exe_fd;
|
|
+};
|
|
+
|
|
+struct compat_tms {
|
|
+ compat_clock_t tms_utime;
|
|
+ compat_clock_t tms_stime;
|
|
+ compat_clock_t tms_cutime;
|
|
+ compat_clock_t tms_cstime;
|
|
+};
|
|
+
|
|
+struct compat_rlimit {
|
|
+ compat_ulong_t rlim_cur;
|
|
+ compat_ulong_t rlim_max;
|
|
+};
|
|
+
|
|
+struct tms {
|
|
+ __kernel_clock_t tms_utime;
|
|
+ __kernel_clock_t tms_stime;
|
|
+ __kernel_clock_t tms_cutime;
|
|
+ __kernel_clock_t tms_cstime;
|
|
+};
|
|
+
|
|
+struct getcpu_cache {
|
|
+ long unsigned int blob[16];
|
|
+};
|
|
+
|
|
+struct fd {
|
|
+ struct file *file;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct compat_sysinfo {
|
|
+ s32 uptime;
|
|
+ u32 loads[3];
|
|
+ u32 totalram;
|
|
+ u32 freeram;
|
|
+ u32 sharedram;
|
|
+ u32 bufferram;
|
|
+ u32 totalswap;
|
|
+ u32 freeswap;
|
|
+ u16 procs;
|
|
+ u16 pad;
|
|
+ u32 totalhigh;
|
|
+ u32 freehigh;
|
|
+ u32 mem_unit;
|
|
+ char _f[8];
|
|
+};
|
|
+
|
|
+struct umh_info {
|
|
+ const char *cmdline;
|
|
+ struct file *pipe_to_umh;
|
|
+ struct file *pipe_from_umh;
|
|
+ pid_t pid;
|
|
+};
|
|
+
|
|
+struct wq_flusher;
|
|
+
|
|
+struct worker;
|
|
+
|
|
+struct workqueue_attrs;
|
|
+
|
|
+struct pool_workqueue;
|
|
+
|
|
+struct wq_device;
|
|
+
|
|
+struct workqueue_struct {
|
|
+ struct list_head pwqs;
|
|
+ struct list_head list;
|
|
+ struct mutex mutex;
|
|
+ int work_color;
|
|
+ int flush_color;
|
|
+ atomic_t nr_pwqs_to_flush;
|
|
+ struct wq_flusher *first_flusher;
|
|
+ struct list_head flusher_queue;
|
|
+ struct list_head flusher_overflow;
|
|
+ struct list_head maydays;
|
|
+ struct worker *rescuer;
|
|
+ int nr_drainers;
|
|
+ int saved_max_active;
|
|
+ struct workqueue_attrs *unbound_attrs;
|
|
+ struct pool_workqueue *dfl_pwq;
|
|
+ struct wq_device *wq_dev;
|
|
+ char name[24];
|
|
+ struct callback_head rcu;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ unsigned int flags;
|
|
+ struct pool_workqueue *cpu_pwqs;
|
|
+ struct pool_workqueue *numa_pwq_tbl[0];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct workqueue_attrs {
|
|
+ int nice;
|
|
+ cpumask_var_t cpumask;
|
|
+ bool no_numa;
|
|
+};
|
|
+
|
|
+struct execute_work {
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ WQ_UNBOUND = 2,
|
|
+ WQ_FREEZABLE = 4,
|
|
+ WQ_MEM_RECLAIM = 8,
|
|
+ WQ_HIGHPRI = 16,
|
|
+ WQ_CPU_INTENSIVE = 32,
|
|
+ WQ_SYSFS = 64,
|
|
+ WQ_POWER_EFFICIENT = 128,
|
|
+ __WQ_DRAINING = 65536,
|
|
+ __WQ_ORDERED = 131072,
|
|
+ __WQ_LEGACY = 262144,
|
|
+ __WQ_ORDERED_EXPLICIT = 524288,
|
|
+ __WQ_DYNAMIC = 33554432,
|
|
+ WQ_MAX_ACTIVE = 512,
|
|
+ WQ_MAX_UNBOUND_PER_CPU = 4,
|
|
+ WQ_DFL_ACTIVE = 256,
|
|
+};
|
|
+
|
|
+struct __una_u32 {
|
|
+ u32 x;
|
|
+};
|
|
+
|
|
+enum hk_flags {
|
|
+ HK_FLAG_TIMER = 1,
|
|
+ HK_FLAG_RCU = 2,
|
|
+ HK_FLAG_MISC = 4,
|
|
+ HK_FLAG_SCHED = 8,
|
|
+ HK_FLAG_TICK = 16,
|
|
+ HK_FLAG_DOMAIN = 32,
|
|
+ HK_FLAG_WQ = 64,
|
|
+};
|
|
+
|
|
+struct worker_pool;
|
|
+
|
|
+struct worker {
|
|
+ union {
|
|
+ struct list_head entry;
|
|
+ struct hlist_node hentry;
|
|
+ };
|
|
+ struct work_struct *current_work;
|
|
+ work_func_t current_func;
|
|
+ struct pool_workqueue *current_pwq;
|
|
+ struct list_head scheduled;
|
|
+ struct task_struct *task;
|
|
+ struct worker_pool *pool;
|
|
+ struct list_head node;
|
|
+ long unsigned int last_active;
|
|
+ unsigned int flags;
|
|
+ int id;
|
|
+ int sleeping;
|
|
+ char desc[24];
|
|
+ struct workqueue_struct *rescue_wq;
|
|
+};
|
|
+
|
|
+struct pool_workqueue {
|
|
+ struct worker_pool *pool;
|
|
+ struct workqueue_struct *wq;
|
|
+ int work_color;
|
|
+ int flush_color;
|
|
+ int refcnt;
|
|
+ int nr_in_flight[15];
|
|
+ int nr_active;
|
|
+ int max_active;
|
|
+ struct list_head delayed_works;
|
|
+ struct list_head pwqs_node;
|
|
+ struct list_head mayday_node;
|
|
+ struct work_struct unbound_release_work;
|
|
+ struct callback_head rcu;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct worker_pool {
|
|
+ spinlock_t lock;
|
|
+ int cpu;
|
|
+ int node;
|
|
+ int id;
|
|
+ unsigned int flags;
|
|
+ long unsigned int watchdog_ts;
|
|
+ struct list_head worklist;
|
|
+ int nr_workers;
|
|
+ int nr_idle;
|
|
+ struct list_head idle_list;
|
|
+ struct timer_list idle_timer;
|
|
+ struct timer_list mayday_timer;
|
|
+ struct hlist_head busy_hash[64];
|
|
+ struct worker *manager;
|
|
+ struct list_head workers;
|
|
+ struct completion *detach_completion;
|
|
+ struct ida worker_ida;
|
|
+ struct workqueue_attrs *attrs;
|
|
+ struct hlist_node hash_node;
|
|
+ int refcnt;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ atomic_t nr_running;
|
|
+ struct callback_head rcu;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ POOL_MANAGER_ACTIVE = 1,
|
|
+ POOL_DISASSOCIATED = 4,
|
|
+ WORKER_DIE = 2,
|
|
+ WORKER_IDLE = 4,
|
|
+ WORKER_PREP = 8,
|
|
+ WORKER_CPU_INTENSIVE = 64,
|
|
+ WORKER_UNBOUND = 128,
|
|
+ WORKER_REBOUND = 256,
|
|
+ WORKER_NICED = 512,
|
|
+ WORKER_NOT_RUNNING = 456,
|
|
+ NR_STD_WORKER_POOLS = 2,
|
|
+ UNBOUND_POOL_HASH_ORDER = 6,
|
|
+ BUSY_WORKER_HASH_ORDER = 6,
|
|
+ MAX_IDLE_WORKERS_RATIO = 4,
|
|
+ IDLE_WORKER_TIMEOUT = 300000,
|
|
+ MAYDAY_INITIAL_TIMEOUT = 10,
|
|
+ MAYDAY_INTERVAL = 100,
|
|
+ CREATE_COOLDOWN = 1000,
|
|
+ RESCUER_NICE_LEVEL = -20,
|
|
+ HIGHPRI_NICE_LEVEL = -20,
|
|
+ WQ_NAME_LEN = 24,
|
|
+};
|
|
+
|
|
+struct wq_flusher {
|
|
+ struct list_head list;
|
|
+ int flush_color;
|
|
+ struct completion done;
|
|
+};
|
|
+
|
|
+struct wq_device {
|
|
+ struct workqueue_struct *wq;
|
|
+ struct device dev;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_workqueue_work {
|
|
+ struct trace_entry ent;
|
|
+ void *work;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_workqueue_queue_work {
|
|
+ struct trace_entry ent;
|
|
+ void *work;
|
|
+ void *function;
|
|
+ void *workqueue;
|
|
+ unsigned int req_cpu;
|
|
+ unsigned int cpu;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_workqueue_execute_start {
|
|
+ struct trace_entry ent;
|
|
+ void *work;
|
|
+ void *function;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_workqueue_work {};
|
|
+
|
|
+struct trace_event_data_offsets_workqueue_queue_work {};
|
|
+
|
|
+struct trace_event_data_offsets_workqueue_execute_start {};
|
|
+
|
|
+struct wq_barrier {
|
|
+ struct work_struct work;
|
|
+ struct completion done;
|
|
+ struct task_struct *task;
|
|
+};
|
|
+
|
|
+struct nice_work {
|
|
+ struct work_struct work;
|
|
+ long int nice;
|
|
+};
|
|
+
|
|
+struct cwt_wait {
|
|
+ wait_queue_entry_t wait;
|
|
+ struct work_struct *work;
|
|
+};
|
|
+
|
|
+struct apply_wqattrs_ctx {
|
|
+ struct workqueue_struct *wq;
|
|
+ struct workqueue_attrs *attrs;
|
|
+ struct list_head list;
|
|
+ struct pool_workqueue *dfl_pwq;
|
|
+ struct pool_workqueue *pwq_tbl[0];
|
|
+};
|
|
+
|
|
+struct work_for_cpu {
|
|
+ struct work_struct work;
|
|
+ long int (*fn)(void *);
|
|
+ void *arg;
|
|
+ long int ret;
|
|
+};
|
|
+
|
|
+struct ctl_path {
|
|
+ const char *procname;
|
|
+};
|
|
+
|
|
+typedef void (*task_work_func_t)(struct callback_head *);
|
|
+
|
|
+enum {
|
|
+ KERNEL_PARAM_OPS_FL_NOARG = 1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ KERNEL_PARAM_FL_UNSAFE = 1,
|
|
+ KERNEL_PARAM_FL_HWPARAM = 2,
|
|
+};
|
|
+
|
|
+struct param_attribute {
|
|
+ struct module_attribute mattr;
|
|
+ const struct kernel_param *param;
|
|
+};
|
|
+
|
|
+struct module_param_attrs {
|
|
+ unsigned int num;
|
|
+ struct attribute_group grp;
|
|
+ struct param_attribute attrs[0];
|
|
+};
|
|
+
|
|
+struct module_version_attribute {
|
|
+ struct module_attribute mattr;
|
|
+ const char *module_name;
|
|
+ const char *version;
|
|
+};
|
|
+
|
|
+struct kmalloced_param {
|
|
+ struct list_head list;
|
|
+ char val[0];
|
|
+};
|
|
+
|
|
+struct sched_param {
|
|
+ int sched_priority;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CSS_NO_REF = 1,
|
|
+ CSS_ONLINE = 2,
|
|
+ CSS_RELEASED = 4,
|
|
+ CSS_VISIBLE = 8,
|
|
+ CSS_DYING = 16,
|
|
+};
|
|
+
|
|
+struct kthread_work;
|
|
+
|
|
+typedef void (*kthread_work_func_t)(struct kthread_work *);
|
|
+
|
|
+struct kthread_worker;
|
|
+
|
|
+struct kthread_work {
|
|
+ struct list_head node;
|
|
+ kthread_work_func_t func;
|
|
+ struct kthread_worker *worker;
|
|
+ int canceling;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ KTW_FREEZABLE = 1,
|
|
+};
|
|
+
|
|
+struct kthread_worker {
|
|
+ unsigned int flags;
|
|
+ spinlock_t lock;
|
|
+ struct list_head work_list;
|
|
+ struct list_head delayed_work_list;
|
|
+ struct task_struct *task;
|
|
+ struct kthread_work *current_work;
|
|
+};
|
|
+
|
|
+struct kthread_delayed_work {
|
|
+ struct kthread_work work;
|
|
+ struct timer_list timer;
|
|
+};
|
|
+
|
|
+struct kthread_create_info {
|
|
+ int (*threadfn)(void *);
|
|
+ void *data;
|
|
+ int node;
|
|
+ struct task_struct *result;
|
|
+ struct completion *done;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct kthread {
|
|
+ long unsigned int flags;
|
|
+ unsigned int cpu;
|
|
+ void *data;
|
|
+ struct completion parked;
|
|
+ struct completion exited;
|
|
+ struct cgroup_subsys_state *blkcg_css;
|
|
+};
|
|
+
|
|
+enum KTHREAD_BITS {
|
|
+ KTHREAD_IS_PER_CPU = 0,
|
|
+ KTHREAD_SHOULD_STOP = 1,
|
|
+ KTHREAD_SHOULD_PARK = 2,
|
|
+};
|
|
+
|
|
+struct kthread_flush_work {
|
|
+ struct kthread_work work;
|
|
+ struct completion done;
|
|
+};
|
|
+
|
|
+struct ipc_ids {
|
|
+ int in_use;
|
|
+ short unsigned int seq;
|
|
+ struct rw_semaphore rwsem;
|
|
+ struct idr ipcs_idr;
|
|
+ int max_idx;
|
|
+ int next_id;
|
|
+ struct rhashtable key_ht;
|
|
+};
|
|
+
|
|
+struct ipc_namespace {
|
|
+ refcount_t count;
|
|
+ struct ipc_ids ids[3];
|
|
+ int sem_ctls[4];
|
|
+ int used_sems;
|
|
+ unsigned int msg_ctlmax;
|
|
+ unsigned int msg_ctlmnb;
|
|
+ unsigned int msg_ctlmni;
|
|
+ atomic_t msg_bytes;
|
|
+ atomic_t msg_hdrs;
|
|
+ size_t shm_ctlmax;
|
|
+ size_t shm_ctlall;
|
|
+ long unsigned int shm_tot;
|
|
+ int shm_ctlmni;
|
|
+ int shm_rmid_forced;
|
|
+ struct notifier_block ipcns_nb;
|
|
+ struct vfsmount *mq_mnt;
|
|
+ unsigned int mq_queues_count;
|
|
+ unsigned int mq_queues_max;
|
|
+ unsigned int mq_msg_max;
|
|
+ unsigned int mq_msgsize_max;
|
|
+ unsigned int mq_msg_default;
|
|
+ unsigned int mq_msgsize_default;
|
|
+ struct user_namespace *user_ns;
|
|
+ struct ucounts *ucounts;
|
|
+ struct ns_common ns;
|
|
+};
|
|
+
|
|
+struct raw_notifier_head {
|
|
+ struct notifier_block *head;
|
|
+};
|
|
+
|
|
+struct srcu_notifier_head {
|
|
+ struct mutex mutex;
|
|
+ struct srcu_struct srcu;
|
|
+ struct notifier_block *head;
|
|
+};
|
|
+
|
|
+enum what {
|
|
+ PROC_EVENT_NONE = 0,
|
|
+ PROC_EVENT_FORK = 1,
|
|
+ PROC_EVENT_EXEC = 2,
|
|
+ PROC_EVENT_UID = 4,
|
|
+ PROC_EVENT_GID = 64,
|
|
+ PROC_EVENT_SID = 128,
|
|
+ PROC_EVENT_PTRACE = 256,
|
|
+ PROC_EVENT_COMM = 512,
|
|
+ PROC_EVENT_COREDUMP = 1073741824,
|
|
+ PROC_EVENT_EXIT = -2147483648,
|
|
+};
|
|
+
|
|
+typedef u64 async_cookie_t;
|
|
+
|
|
+typedef void (*async_func_t)(void *, async_cookie_t);
|
|
+
|
|
+struct async_domain {
|
|
+ struct list_head pending;
|
|
+ unsigned int registered: 1;
|
|
+};
|
|
+
|
|
+struct async_entry {
|
|
+ struct list_head domain_list;
|
|
+ struct list_head global_list;
|
|
+ struct work_struct work;
|
|
+ async_cookie_t cookie;
|
|
+ async_func_t func;
|
|
+ void *data;
|
|
+ struct async_domain *domain;
|
|
+};
|
|
+
|
|
+struct smpboot_thread_data {
|
|
+ unsigned int cpu;
|
|
+ unsigned int status;
|
|
+ struct smp_hotplug_thread *ht;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ HP_THREAD_NONE = 0,
|
|
+ HP_THREAD_ACTIVE = 1,
|
|
+ HP_THREAD_PARKED = 2,
|
|
+};
|
|
+
|
|
+typedef void (*rcu_callback_t)(struct callback_head *);
|
|
+
|
|
+typedef void (*call_rcu_func_t)(struct callback_head *, rcu_callback_t);
|
|
+
|
|
+struct preempt_notifier;
|
|
+
|
|
+struct preempt_ops {
|
|
+ void (*sched_in)(struct preempt_notifier *, int);
|
|
+ void (*sched_out)(struct preempt_notifier *, struct task_struct *);
|
|
+};
|
|
+
|
|
+struct preempt_notifier {
|
|
+ struct hlist_node link;
|
|
+ struct preempt_ops *ops;
|
|
+};
|
|
+
|
|
+struct pin_cookie {};
|
|
+
|
|
+struct cfs_rq {
|
|
+ struct load_weight load;
|
|
+ long unsigned int runnable_weight;
|
|
+ unsigned int nr_running;
|
|
+ unsigned int h_nr_running;
|
|
+ u64 exec_clock;
|
|
+ u64 min_vruntime;
|
|
+ struct rb_root_cached tasks_timeline;
|
|
+ struct sched_entity *curr;
|
|
+ struct sched_entity *next;
|
|
+ struct sched_entity *last;
|
|
+ struct sched_entity *skip;
|
|
+ unsigned int nr_spread_over;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct sched_avg avg;
|
|
+ struct {
|
|
+ raw_spinlock_t lock;
|
|
+ int nr;
|
|
+ long unsigned int load_avg;
|
|
+ long unsigned int util_avg;
|
|
+ long unsigned int runnable_sum;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ } removed;
|
|
+ long unsigned int tg_load_avg_contrib;
|
|
+ long int propagate;
|
|
+ long int prop_runnable_sum;
|
|
+ long unsigned int h_load;
|
|
+ u64 last_h_load_update;
|
|
+ struct sched_entity *h_load_next;
|
|
+ struct rq *rq;
|
|
+ int on_list;
|
|
+ struct list_head leaf_cfs_rq_list;
|
|
+ struct task_group *tg;
|
|
+ int runtime_enabled;
|
|
+ s64 runtime_remaining;
|
|
+ u64 throttled_clock;
|
|
+ u64 throttled_clock_task;
|
|
+ u64 throttled_clock_task_time;
|
|
+ int throttled;
|
|
+ int throttle_count;
|
|
+ struct list_head throttled_list;
|
|
+ union {
|
|
+ unsigned int idle_h_nr_running;
|
|
+ long unsigned int idle_h_nr_running_padding;
|
|
+ };
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct rt_prio_array {
|
|
+ long unsigned int bitmap[2];
|
|
+ struct list_head queue[100];
|
|
+};
|
|
+
|
|
+struct rt_rq {
|
|
+ struct rt_prio_array active;
|
|
+ unsigned int rt_nr_running;
|
|
+ unsigned int rr_nr_running;
|
|
+ struct {
|
|
+ int curr;
|
|
+ int next;
|
|
+ } highest_prio;
|
|
+ long unsigned int rt_nr_migratory;
|
|
+ long unsigned int rt_nr_total;
|
|
+ int overloaded;
|
|
+ struct plist_head pushable_tasks;
|
|
+ int rt_queued;
|
|
+ int rt_throttled;
|
|
+ u64 rt_time;
|
|
+ u64 rt_runtime;
|
|
+ raw_spinlock_t rt_runtime_lock;
|
|
+ long unsigned int rt_nr_boosted;
|
|
+ struct rq *rq;
|
|
+ struct task_group *tg;
|
|
+};
|
|
+
|
|
+struct rt_bandwidth {
|
|
+ raw_spinlock_t rt_runtime_lock;
|
|
+ ktime_t rt_period;
|
|
+ u64 rt_runtime;
|
|
+ struct hrtimer rt_period_timer;
|
|
+ unsigned int rt_period_active;
|
|
+};
|
|
+
|
|
+struct cfs_bandwidth {
|
|
+ raw_spinlock_t lock;
|
|
+ ktime_t period;
|
|
+ u64 quota;
|
|
+ u64 runtime;
|
|
+ s64 hierarchical_quota;
|
|
+ short int idle;
|
|
+ short int period_active;
|
|
+ struct hrtimer period_timer;
|
|
+ struct hrtimer slack_timer;
|
|
+ struct list_head throttled_cfs_rq;
|
|
+ int nr_periods;
|
|
+ int nr_throttled;
|
|
+ u64 throttled_time;
|
|
+ bool distribute_running;
|
|
+};
|
|
+
|
|
+struct task_group {
|
|
+ struct cgroup_subsys_state css;
|
|
+ struct sched_entity **se;
|
|
+ struct cfs_rq **cfs_rq;
|
|
+ long unsigned int shares;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ atomic_long_t load_avg;
|
|
+ struct sched_rt_entity **rt_se;
|
|
+ struct rt_rq **rt_rq;
|
|
+ struct rt_bandwidth rt_bandwidth;
|
|
+ struct callback_head rcu;
|
|
+ struct list_head list;
|
|
+ struct task_group *parent;
|
|
+ struct list_head siblings;
|
|
+ struct list_head children;
|
|
+ struct autogroup *autogroup;
|
|
+ struct cfs_bandwidth cfs_bandwidth;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct update_util_data {
|
|
+ void (*func)(struct update_util_data *, u64, unsigned int);
|
|
+};
|
|
+
|
|
+struct autogroup {
|
|
+ struct kref kref;
|
|
+ struct task_group *tg;
|
|
+ struct rw_semaphore lock;
|
|
+ long unsigned int id;
|
|
+ int nice;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = 1,
|
|
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED = 2,
|
|
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = 4,
|
|
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED = 8,
|
|
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = 16,
|
|
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = 32,
|
|
+};
|
|
+
|
|
+struct sched_group {
|
|
+ struct sched_group *next;
|
|
+ atomic_t ref;
|
|
+ unsigned int group_weight;
|
|
+ struct sched_group_capacity *sgc;
|
|
+ int asym_prefer_cpu;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int cpumask[0];
|
|
+};
|
|
+
|
|
+struct sched_group_capacity {
|
|
+ atomic_t ref;
|
|
+ long unsigned int capacity;
|
|
+ long unsigned int min_capacity;
|
|
+ long unsigned int next_update;
|
|
+ int imbalance;
|
|
+ int id;
|
|
+ long unsigned int cpumask[0];
|
|
+};
|
|
+
|
|
+struct wake_q_head {
|
|
+ struct wake_q_node *first;
|
|
+ struct wake_q_node **lastp;
|
|
+};
|
|
+
|
|
+struct sched_attr {
|
|
+ __u32 size;
|
|
+ __u32 sched_policy;
|
|
+ __u64 sched_flags;
|
|
+ __s32 sched_nice;
|
|
+ __u32 sched_priority;
|
|
+ __u64 sched_runtime;
|
|
+ __u64 sched_deadline;
|
|
+ __u64 sched_period;
|
|
+};
|
|
+
|
|
+struct cpuidle_driver___2;
|
|
+
|
|
+struct cpuidle_state {
|
|
+ char name[16];
|
|
+ char desc[32];
|
|
+ unsigned int flags;
|
|
+ unsigned int exit_latency;
|
|
+ int power_usage;
|
|
+ unsigned int target_residency;
|
|
+ bool disabled;
|
|
+ int (*enter)(struct cpuidle_device *, struct cpuidle_driver___2 *, int);
|
|
+ int (*enter_dead)(struct cpuidle_device *, int);
|
|
+ void (*enter_s2idle)(struct cpuidle_device *, struct cpuidle_driver___2 *, int);
|
|
+};
|
|
+
|
|
+struct cpuidle_driver___2 {
|
|
+ const char *name;
|
|
+ struct module *owner;
|
|
+ int refcnt;
|
|
+ unsigned int bctimer: 1;
|
|
+ struct cpuidle_state states[10];
|
|
+ int state_count;
|
|
+ int safe_state_index;
|
|
+ struct cpumask *cpumask;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CFTYPE_ONLY_ON_ROOT = 1,
|
|
+ CFTYPE_NOT_ON_ROOT = 2,
|
|
+ CFTYPE_NS_DELEGATABLE = 4,
|
|
+ CFTYPE_NO_PREFIX = 8,
|
|
+ CFTYPE_WORLD_WRITABLE = 16,
|
|
+ __CFTYPE_ONLY_ON_DFL = 65536,
|
|
+ __CFTYPE_NOT_ON_DFL = 131072,
|
|
+};
|
|
+
|
|
+struct rcu_synchronize {
|
|
+ struct callback_head head;
|
|
+ struct completion completion;
|
|
+};
|
|
+
|
|
+typedef int (*cpu_stop_fn_t)(void *);
|
|
+
|
|
+struct cpu_stop_done;
|
|
+
|
|
+struct cpu_stop_work {
|
|
+ struct list_head list;
|
|
+ cpu_stop_fn_t fn;
|
|
+ void *arg;
|
|
+ struct cpu_stop_done *done;
|
|
+};
|
|
+
|
|
+struct cpupri_vec {
|
|
+ atomic_t count;
|
|
+ cpumask_var_t mask;
|
|
+};
|
|
+
|
|
+struct cpupri {
|
|
+ struct cpupri_vec pri_to_cpu[102];
|
|
+ int *cpu_to_pri;
|
|
+};
|
|
+
|
|
+struct cpudl_item {
|
|
+ u64 dl;
|
|
+ int cpu;
|
|
+ int idx;
|
|
+};
|
|
+
|
|
+struct cpudl {
|
|
+ raw_spinlock_t lock;
|
|
+ int size;
|
|
+ cpumask_var_t free_cpus;
|
|
+ struct cpudl_item *elements;
|
|
+};
|
|
+
|
|
+struct dl_bandwidth {
|
|
+ raw_spinlock_t dl_runtime_lock;
|
|
+ u64 dl_runtime;
|
|
+ u64 dl_period;
|
|
+};
|
|
+
|
|
+struct dl_bw {
|
|
+ raw_spinlock_t lock;
|
|
+ u64 bw;
|
|
+ u64 total_bw;
|
|
+};
|
|
+
|
|
+typedef int (*tg_visitor)(struct task_group *, void *);
|
|
+
|
|
+struct dl_rq {
|
|
+ struct rb_root_cached root;
|
|
+ long unsigned int dl_nr_running;
|
|
+ struct {
|
|
+ u64 curr;
|
|
+ u64 next;
|
|
+ } earliest_dl;
|
|
+ long unsigned int dl_nr_migratory;
|
|
+ int overloaded;
|
|
+ struct rb_root_cached pushable_dl_tasks_root;
|
|
+ u64 running_bw;
|
|
+ u64 this_bw;
|
|
+ u64 extra_bw;
|
|
+ u64 bw_ratio;
|
|
+};
|
|
+
|
|
+struct root_domain;
|
|
+
|
|
+struct rq {
|
|
+ raw_spinlock_t lock;
|
|
+ unsigned int nr_running;
|
|
+ unsigned int nr_numa_running;
|
|
+ unsigned int nr_preferred_running;
|
|
+ unsigned int numa_migrate_on;
|
|
+ long unsigned int cpu_load[5];
|
|
+ long unsigned int last_load_update_tick;
|
|
+ long unsigned int last_blocked_load_update_tick;
|
|
+ unsigned int has_blocked_load;
|
|
+ unsigned int nohz_tick_stopped;
|
|
+ atomic_t nohz_flags;
|
|
+ struct load_weight load;
|
|
+ long unsigned int nr_load_updates;
|
|
+ u64 nr_switches;
|
|
+ struct cfs_rq cfs;
|
|
+ struct rt_rq rt;
|
|
+ struct dl_rq dl;
|
|
+ struct sparsemask *cfs_overload_cpus;
|
|
+ struct list_head leaf_cfs_rq_list;
|
|
+ struct list_head *tmp_alone_branch;
|
|
+ long unsigned int nr_uninterruptible;
|
|
+ struct task_struct *curr;
|
|
+ struct task_struct *idle;
|
|
+ struct task_struct *stop;
|
|
+ long unsigned int next_balance;
|
|
+ struct mm_struct *prev_mm;
|
|
+ unsigned int clock_update_flags;
|
|
+ u64 clock;
|
|
+ u64 clock_task;
|
|
+ atomic_t nr_iowait;
|
|
+ int membarrier_state;
|
|
+ struct root_domain *rd;
|
|
+ struct sched_domain *sd;
|
|
+ long unsigned int cpu_capacity;
|
|
+ long unsigned int cpu_capacity_orig;
|
|
+ struct callback_head *balance_callback;
|
|
+ unsigned char idle_balance;
|
|
+ int active_balance;
|
|
+ int push_cpu;
|
|
+ struct cpu_stop_work active_balance_work;
|
|
+ int cpu;
|
|
+ int online;
|
|
+ struct list_head cfs_tasks;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct sched_avg avg_rt;
|
|
+ struct sched_avg avg_dl;
|
|
+ struct sched_avg avg_irq;
|
|
+ u64 idle_stamp;
|
|
+ u64 avg_idle;
|
|
+ u64 max_idle_balance_cost;
|
|
+ u64 prev_irq_time;
|
|
+ u64 prev_steal_time;
|
|
+ u64 prev_steal_time_rq;
|
|
+ long unsigned int calc_load_update;
|
|
+ long int calc_load_active;
|
|
+ int hrtick_csd_pending;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ call_single_data_t hrtick_csd;
|
|
+ struct hrtimer hrtick_timer;
|
|
+ struct sched_info rq_sched_info;
|
|
+ long long unsigned int rq_cpu_time;
|
|
+ unsigned int yld_count;
|
|
+ unsigned int sched_count;
|
|
+ unsigned int sched_goidle;
|
|
+ unsigned int ttwu_count;
|
|
+ unsigned int ttwu_local;
|
|
+ unsigned int found_idle_core;
|
|
+ unsigned int found_idle_cpu;
|
|
+ unsigned int found_idle_cpu_easy;
|
|
+ unsigned int nofound_idle_cpu;
|
|
+ long unsigned int find_time;
|
|
+ unsigned int steal;
|
|
+ unsigned int steal_fail;
|
|
+ struct llist_head wake_list;
|
|
+ struct cpuidle_state *idle_state;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct root_domain {
|
|
+ atomic_t refcount;
|
|
+ atomic_t rto_count;
|
|
+ struct callback_head rcu;
|
|
+ cpumask_var_t span;
|
|
+ cpumask_var_t online;
|
|
+ bool overload;
|
|
+ cpumask_var_t dlo_mask;
|
|
+ atomic_t dlo_count;
|
|
+ struct dl_bw dl_bw;
|
|
+ struct cpudl cpudl;
|
|
+ struct irq_work rto_push_work;
|
|
+ raw_spinlock_t rto_lock;
|
|
+ int rto_loop;
|
|
+ int rto_cpu;
|
|
+ atomic_t rto_loop_next;
|
|
+ atomic_t rto_loop_start;
|
|
+ cpumask_var_t rto_mask;
|
|
+ struct cpupri cpupri;
|
|
+ long unsigned int max_cpu_capacity;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct cputime {
|
|
+ u64 utime;
|
|
+ u64 stime;
|
|
+};
|
|
+
|
|
+struct rq_cputime {
|
|
+ raw_spinlock_t lock;
|
|
+ long long unsigned int sum_idle_time;
|
|
+ long long unsigned int last_entry_idle;
|
|
+ struct cputime cpu_prev_time;
|
|
+ struct cputime cpu_last_time;
|
|
+};
|
|
+
|
|
+struct rq_flags {
|
|
+ long unsigned int flags;
|
|
+ struct pin_cookie cookie;
|
|
+ unsigned int clock_update_flags;
|
|
+};
|
|
+
|
|
+enum numa_topology_type {
|
|
+ NUMA_DIRECT = 0,
|
|
+ NUMA_GLUELESS_MESH = 1,
|
|
+ NUMA_BACKPLANE = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ __SCHED_FEAT_GENTLE_FAIR_SLEEPERS = 0,
|
|
+ __SCHED_FEAT_START_DEBIT = 1,
|
|
+ __SCHED_FEAT_NEXT_BUDDY = 2,
|
|
+ __SCHED_FEAT_LAST_BUDDY = 3,
|
|
+ __SCHED_FEAT_CACHE_HOT_BUDDY = 4,
|
|
+ __SCHED_FEAT_WAKEUP_PREEMPTION = 5,
|
|
+ __SCHED_FEAT_HRTICK = 6,
|
|
+ __SCHED_FEAT_DOUBLE_TICK = 7,
|
|
+ __SCHED_FEAT_LB_BIAS = 8,
|
|
+ __SCHED_FEAT_NONTASK_CAPACITY = 9,
|
|
+ __SCHED_FEAT_TTWU_QUEUE = 10,
|
|
+ __SCHED_FEAT_SIS_AVG_CPU = 11,
|
|
+ __SCHED_FEAT_SIS_PROP = 12,
|
|
+ __SCHED_FEAT_STEAL = 13,
|
|
+ __SCHED_FEAT_WARN_DOUBLE_CLOCK = 14,
|
|
+ __SCHED_FEAT_RT_PUSH_IPI = 15,
|
|
+ __SCHED_FEAT_RT_RUNTIME_SHARE = 16,
|
|
+ __SCHED_FEAT_LB_MIN = 17,
|
|
+ __SCHED_FEAT_ATTACH_AGE_LOAD = 18,
|
|
+ __SCHED_FEAT_WA_IDLE = 19,
|
|
+ __SCHED_FEAT_WA_WEIGHT = 20,
|
|
+ __SCHED_FEAT_WA_BIAS = 21,
|
|
+ __SCHED_FEAT_UTIL_EST = 22,
|
|
+ __SCHED_FEAT_NR = 23,
|
|
+};
|
|
+
|
|
+struct irqtime {
|
|
+ u64 total;
|
|
+ u64 tick_delta;
|
|
+ u64 irq_start_time;
|
|
+ struct u64_stats_sync sync;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_kthread_stop {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_kthread_stop_ret {
|
|
+ struct trace_entry ent;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_wakeup_template {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ int prio;
|
|
+ int success;
|
|
+ int target_cpu;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_switch {
|
|
+ struct trace_entry ent;
|
|
+ char prev_comm[16];
|
|
+ pid_t prev_pid;
|
|
+ int prev_prio;
|
|
+ long int prev_state;
|
|
+ char next_comm[16];
|
|
+ pid_t next_pid;
|
|
+ int next_prio;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_migrate_task {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ int prio;
|
|
+ int orig_cpu;
|
|
+ int dest_cpu;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_process_template {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ int prio;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_process_wait {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ int prio;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_process_fork {
|
|
+ struct trace_entry ent;
|
|
+ char parent_comm[16];
|
|
+ pid_t parent_pid;
|
|
+ char child_comm[16];
|
|
+ pid_t child_pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_process_exec {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_filename;
|
|
+ pid_t pid;
|
|
+ pid_t old_pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_stat_template {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ u64 delay;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_stat_runtime {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ u64 runtime;
|
|
+ u64 vruntime;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_pi_setprio {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ int oldprio;
|
|
+ int newprio;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_process_hang {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ pid_t pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_move_task_template {
|
|
+ struct trace_entry ent;
|
|
+ pid_t pid;
|
|
+ pid_t tgid;
|
|
+ pid_t ngid;
|
|
+ int src_cpu;
|
|
+ int src_nid;
|
|
+ int dst_cpu;
|
|
+ int dst_nid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_swap_numa {
|
|
+ struct trace_entry ent;
|
|
+ pid_t src_pid;
|
|
+ pid_t src_tgid;
|
|
+ pid_t src_ngid;
|
|
+ int src_cpu;
|
|
+ int src_nid;
|
|
+ pid_t dst_pid;
|
|
+ pid_t dst_tgid;
|
|
+ pid_t dst_ngid;
|
|
+ int dst_cpu;
|
|
+ int dst_nid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sched_wake_idle_without_ipi {
|
|
+ struct trace_entry ent;
|
|
+ int cpu;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_sched_kthread_stop {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_kthread_stop_ret {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_wakeup_template {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_switch {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_migrate_task {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_process_template {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_process_wait {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_process_fork {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_process_exec {
|
|
+ u32 filename;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_sched_stat_template {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_stat_runtime {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_pi_setprio {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_process_hang {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_move_task_template {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_swap_numa {};
|
|
+
|
|
+struct trace_event_data_offsets_sched_wake_idle_without_ipi {};
|
|
+
|
|
+struct migration_arg {
|
|
+ struct task_struct *task;
|
|
+ int dest_cpu;
|
|
+};
|
|
+
|
|
+struct migration_swap_arg {
|
|
+ struct task_struct *src_task;
|
|
+ struct task_struct *dst_task;
|
|
+ int src_cpu;
|
|
+ int dst_cpu;
|
|
+};
|
|
+
|
|
+struct tick_work {
|
|
+ int cpu;
|
|
+ atomic_t state;
|
|
+ struct delayed_work work;
|
|
+};
|
|
+
|
|
+struct cfs_schedulable_data {
|
|
+ struct task_group *tg;
|
|
+ u64 period;
|
|
+ u64 quota;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ cpuset = 0,
|
|
+ possible = 1,
|
|
+ fail = 2,
|
|
+};
|
|
+
|
|
+enum tick_dep_bits {
|
|
+ TICK_DEP_BIT_POSIX_TIMER = 0,
|
|
+ TICK_DEP_BIT_PERF_EVENTS = 1,
|
|
+ TICK_DEP_BIT_SCHED = 2,
|
|
+ TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
|
|
+};
|
|
+
|
|
+struct sched_clock_data {
|
|
+ u64 tick_raw;
|
|
+ u64 tick_gtod;
|
|
+ u64 clock;
|
|
+};
|
|
+
|
|
+typedef u64 pao_T_____5;
|
|
+
|
|
+struct idle_timer {
|
|
+ struct hrtimer timer;
|
|
+ int done;
|
|
+};
|
|
+
|
|
+struct numa_group {
|
|
+ atomic_t refcount;
|
|
+ spinlock_t lock;
|
|
+ int nr_tasks;
|
|
+ pid_t gid;
|
|
+ int active_nodes;
|
|
+ struct callback_head rcu;
|
|
+ long unsigned int total_faults;
|
|
+ long unsigned int max_faults_cpu;
|
|
+ long unsigned int *faults_cpu;
|
|
+ long unsigned int faults[0];
|
|
+};
|
|
+
|
|
+struct sparsemask_chunk {
|
|
+ long unsigned int word;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct sparsemask {
|
|
+ short int nelems;
|
|
+ short int density;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct sparsemask_chunk chunks[0];
|
|
+};
|
|
+
|
|
+enum numa_faults_stats {
|
|
+ NUMA_MEM = 0,
|
|
+ NUMA_CPU = 1,
|
|
+ NUMA_MEMBUF = 2,
|
|
+ NUMA_CPUBUF = 3,
|
|
+};
|
|
+
|
|
+struct numa_stats {
|
|
+ long unsigned int load;
|
|
+ long unsigned int compute_capacity;
|
|
+ unsigned int nr_running;
|
|
+};
|
|
+
|
|
+struct task_numa_env {
|
|
+ struct task_struct *p;
|
|
+ int src_cpu;
|
|
+ int src_nid;
|
|
+ int dst_cpu;
|
|
+ int dst_nid;
|
|
+ struct numa_stats src_stats;
|
|
+ struct numa_stats dst_stats;
|
|
+ int imbalance_pct;
|
|
+ int dist;
|
|
+ struct task_struct *best_task;
|
|
+ long int best_imp;
|
|
+ int best_cpu;
|
|
+};
|
|
+
|
|
+enum fbq_type {
|
|
+ regular = 0,
|
|
+ remote = 1,
|
|
+ all = 2,
|
|
+};
|
|
+
|
|
+struct lb_env {
|
|
+ struct sched_domain *sd;
|
|
+ struct rq *src_rq;
|
|
+ int src_cpu;
|
|
+ int dst_cpu;
|
|
+ struct rq *dst_rq;
|
|
+ struct cpumask *dst_grpmask;
|
|
+ int new_dst_cpu;
|
|
+ enum cpu_idle_type idle;
|
|
+ long int imbalance;
|
|
+ struct cpumask *cpus;
|
|
+ unsigned int flags;
|
|
+ unsigned int loop;
|
|
+ unsigned int loop_break;
|
|
+ unsigned int loop_max;
|
|
+ enum fbq_type fbq_type;
|
|
+ struct list_head tasks;
|
|
+};
|
|
+
|
|
+enum group_type {
|
|
+ group_other = 0,
|
|
+ group_imbalanced = 1,
|
|
+ group_overloaded = 2,
|
|
+};
|
|
+
|
|
+struct sg_lb_stats {
|
|
+ long unsigned int avg_load;
|
|
+ long unsigned int group_load;
|
|
+ long unsigned int sum_weighted_load;
|
|
+ long unsigned int load_per_task;
|
|
+ long unsigned int group_capacity;
|
|
+ long unsigned int group_util;
|
|
+ unsigned int sum_nr_running;
|
|
+ unsigned int idle_cpus;
|
|
+ unsigned int group_weight;
|
|
+ enum group_type group_type;
|
|
+ int group_no_capacity;
|
|
+ unsigned int nr_numa_running;
|
|
+ unsigned int nr_preferred_running;
|
|
+};
|
|
+
|
|
+struct sd_lb_stats {
|
|
+ struct sched_group *busiest;
|
|
+ struct sched_group *local;
|
|
+ long unsigned int total_running;
|
|
+ long unsigned int total_load;
|
|
+ long unsigned int total_capacity;
|
|
+ long unsigned int avg_load;
|
|
+ struct sg_lb_stats busiest_stat;
|
|
+ struct sg_lb_stats local_stat;
|
|
+};
|
|
+
|
|
+typedef struct task_group *rt_rq_iter_t;
|
|
+
|
|
+struct rt_schedulable_data {
|
|
+ struct task_group *tg;
|
|
+ u64 rt_period;
|
|
+ u64 rt_runtime;
|
|
+};
|
|
+
|
|
+struct wait_bit_key {
|
|
+ void *flags;
|
|
+ int bit_nr;
|
|
+ long unsigned int timeout;
|
|
+};
|
|
+
|
|
+struct wait_bit_queue_entry {
|
|
+ struct wait_bit_key key;
|
|
+ struct wait_queue_entry wq_entry;
|
|
+};
|
|
+
|
|
+typedef int wait_bit_action_f(struct wait_bit_key *, int);
|
|
+
|
|
+struct sched_domain_attr {
|
|
+ int relax_domain_level;
|
|
+};
|
|
+
|
|
+struct s_data {
|
|
+ struct sched_domain **sd;
|
|
+ struct root_domain *rd;
|
|
+};
|
|
+
|
|
+enum s_alloc {
|
|
+ sa_rootdomain = 0,
|
|
+ sa_sd = 1,
|
|
+ sa_sd_storage = 2,
|
|
+ sa_none = 3,
|
|
+};
|
|
+
|
|
+enum cpuacct_stat_index {
|
|
+ CPUACCT_STAT_USER = 0,
|
|
+ CPUACCT_STAT_SYSTEM = 1,
|
|
+ CPUACCT_STAT_NSTATS = 2,
|
|
+};
|
|
+
|
|
+struct cpuacct_usage {
|
|
+ u64 usages[2];
|
|
+};
|
|
+
|
|
+struct cpuacct {
|
|
+ struct cgroup_subsys_state css;
|
|
+ struct cpuacct_usage *cpuusage;
|
|
+ struct kernel_cpustat *cpustat;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MEMBARRIER_FLAG_SYNC_CORE = 1,
|
|
+};
|
|
+
|
|
+enum membarrier_cmd {
|
|
+ MEMBARRIER_CMD_QUERY = 0,
|
|
+ MEMBARRIER_CMD_GLOBAL = 1,
|
|
+ MEMBARRIER_CMD_GLOBAL_EXPEDITED = 2,
|
|
+ MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = 4,
|
|
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED = 8,
|
|
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = 16,
|
|
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = 32,
|
|
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = 64,
|
|
+ MEMBARRIER_CMD_SHARED = 1,
|
|
+};
|
|
+
|
|
+struct ww_acquire_ctx;
|
|
+
|
|
+struct mutex_waiter {
|
|
+ struct list_head list;
|
|
+ struct task_struct *task;
|
|
+ struct ww_acquire_ctx *ww_ctx;
|
|
+};
|
|
+
|
|
+struct ww_acquire_ctx {
|
|
+ struct task_struct *task;
|
|
+ long unsigned int stamp;
|
|
+ unsigned int acquired;
|
|
+ short unsigned int wounded;
|
|
+ short unsigned int is_wait_die;
|
|
+};
|
|
+
|
|
+struct ww_mutex {
|
|
+ struct mutex base;
|
|
+ struct ww_acquire_ctx *ctx;
|
|
+};
|
|
+
|
|
+struct semaphore_waiter {
|
|
+ struct list_head list;
|
|
+ struct task_struct *task;
|
|
+ bool up;
|
|
+};
|
|
+
|
|
+struct optimistic_spin_node {
|
|
+ struct optimistic_spin_node *next;
|
|
+ struct optimistic_spin_node *prev;
|
|
+ int locked;
|
|
+ int cpu;
|
|
+};
|
|
+
|
|
+enum qlock_stats {
|
|
+ qstat_pv_hash_hops = 0,
|
|
+ qstat_pv_kick_unlock = 1,
|
|
+ qstat_pv_kick_wake = 2,
|
|
+ qstat_pv_latency_kick = 3,
|
|
+ qstat_pv_latency_wake = 4,
|
|
+ qstat_pv_lock_stealing = 5,
|
|
+ qstat_pv_spurious_wakeup = 6,
|
|
+ qstat_pv_wait_again = 7,
|
|
+ qstat_pv_wait_early = 8,
|
|
+ qstat_pv_wait_head = 9,
|
|
+ qstat_pv_wait_node = 10,
|
|
+ qstat_lock_pending = 11,
|
|
+ qstat_lock_slowpath = 12,
|
|
+ qstat_lock_use_node2 = 13,
|
|
+ qstat_lock_use_node3 = 14,
|
|
+ qstat_lock_use_node4 = 15,
|
|
+ qstat_lock_no_node = 16,
|
|
+ qstat_num = 17,
|
|
+ qstat_reset_cnts = 17,
|
|
+};
|
|
+
|
|
+struct mcs_spinlock {
|
|
+ struct mcs_spinlock *next;
|
|
+ unsigned int locked;
|
|
+ int count;
|
|
+};
|
|
+
|
|
+struct qnode {
|
|
+ struct mcs_spinlock mcs;
|
|
+ long int reserved[2];
|
|
+};
|
|
+
|
|
+enum vcpu_state {
|
|
+ vcpu_running = 0,
|
|
+ vcpu_halted = 1,
|
|
+ vcpu_hashed = 2,
|
|
+};
|
|
+
|
|
+struct pv_node {
|
|
+ struct mcs_spinlock mcs;
|
|
+ int cpu;
|
|
+ u8 state;
|
|
+};
|
|
+
|
|
+struct pv_hash_entry {
|
|
+ struct qspinlock *lock;
|
|
+ struct pv_node *node;
|
|
+};
|
|
+
|
|
+struct hrtimer_sleeper {
|
|
+ struct hrtimer timer;
|
|
+ struct task_struct *task;
|
|
+};
|
|
+
|
|
+struct rt_mutex;
|
|
+
|
|
+struct rt_mutex_waiter {
|
|
+ struct rb_node tree_entry;
|
|
+ struct rb_node pi_tree_entry;
|
|
+ struct task_struct *task;
|
|
+ struct rt_mutex *lock;
|
|
+ int prio;
|
|
+ u64 deadline;
|
|
+};
|
|
+
|
|
+struct rt_mutex {
|
|
+ raw_spinlock_t wait_lock;
|
|
+ struct rb_root_cached waiters;
|
|
+ struct task_struct *owner;
|
|
+};
|
|
+
|
|
+enum rtmutex_chainwalk {
|
|
+ RT_MUTEX_MIN_CHAINWALK = 0,
|
|
+ RT_MUTEX_FULL_CHAINWALK = 1,
|
|
+};
|
|
+
|
|
+enum rwsem_waiter_type {
|
|
+ RWSEM_WAITING_FOR_WRITE = 0,
|
|
+ RWSEM_WAITING_FOR_READ = 1,
|
|
+};
|
|
+
|
|
+struct rwsem_waiter {
|
|
+ struct list_head list;
|
|
+ struct task_struct *task;
|
|
+ enum rwsem_waiter_type type;
|
|
+};
|
|
+
|
|
+enum rwsem_wake_type {
|
|
+ RWSEM_WAKE_ANY = 0,
|
|
+ RWSEM_WAKE_READERS = 1,
|
|
+ RWSEM_WAKE_READ_OWNED = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PM_QOS_RESERVED = 0,
|
|
+ PM_QOS_CPU_DMA_LATENCY = 1,
|
|
+ PM_QOS_NETWORK_LATENCY = 2,
|
|
+ PM_QOS_NETWORK_THROUGHPUT = 3,
|
|
+ PM_QOS_MEMORY_BANDWIDTH = 4,
|
|
+ PM_QOS_NUM_CLASSES = 5,
|
|
+};
|
|
+
|
|
+struct pm_qos_request {
|
|
+ struct plist_node node;
|
|
+ int pm_qos_class;
|
|
+ struct delayed_work work;
|
|
+};
|
|
+
|
|
+enum pm_qos_req_action {
|
|
+ PM_QOS_ADD_REQ = 0,
|
|
+ PM_QOS_UPDATE_REQ = 1,
|
|
+ PM_QOS_REMOVE_REQ = 2,
|
|
+};
|
|
+
|
|
+struct pm_qos_object {
|
|
+ struct pm_qos_constraints *constraints;
|
|
+ struct miscdevice pm_qos_power_miscdev;
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TEST_NONE = 0,
|
|
+ TEST_CORE = 1,
|
|
+ TEST_CPUS = 2,
|
|
+ TEST_PLATFORM = 3,
|
|
+ TEST_DEVICES = 4,
|
|
+ TEST_FREEZER = 5,
|
|
+ __TEST_AFTER_LAST = 6,
|
|
+};
|
|
+
|
|
+struct pm_vt_switch {
|
|
+ struct list_head head;
|
|
+ struct device *dev;
|
|
+ bool required;
|
|
+};
|
|
+
|
|
+struct platform_suspend_ops {
|
|
+ int (*valid)(suspend_state_t);
|
|
+ int (*begin)(suspend_state_t);
|
|
+ int (*prepare)();
|
|
+ int (*prepare_late)();
|
|
+ int (*enter)(suspend_state_t);
|
|
+ void (*wake)();
|
|
+ void (*finish)();
|
|
+ bool (*suspend_again)();
|
|
+ void (*end)();
|
|
+ void (*recover)();
|
|
+};
|
|
+
|
|
+struct platform_s2idle_ops {
|
|
+ int (*begin)();
|
|
+ int (*prepare)();
|
|
+ void (*wake)();
|
|
+ void (*sync)();
|
|
+ void (*restore)();
|
|
+ void (*end)();
|
|
+};
|
|
+
|
|
+struct platform_hibernation_ops {
|
|
+ int (*begin)();
|
|
+ void (*end)();
|
|
+ int (*pre_snapshot)();
|
|
+ void (*finish)();
|
|
+ int (*prepare)();
|
|
+ int (*enter)();
|
|
+ void (*leave)();
|
|
+ int (*pre_restore)();
|
|
+ void (*restore_cleanup)();
|
|
+ void (*recover)();
|
|
+};
|
|
+
|
|
+enum {
|
|
+ HIBERNATION_INVALID = 0,
|
|
+ HIBERNATION_PLATFORM = 1,
|
|
+ HIBERNATION_SHUTDOWN = 2,
|
|
+ HIBERNATION_REBOOT = 3,
|
|
+ HIBERNATION_SUSPEND = 4,
|
|
+ HIBERNATION_TEST_RESUME = 5,
|
|
+ __HIBERNATION_AFTER_LAST = 6,
|
|
+};
|
|
+
|
|
+struct swsusp_info {
|
|
+ struct new_utsname uts;
|
|
+ u32 version_code;
|
|
+ long unsigned int num_physpages;
|
|
+ int cpus;
|
|
+ long unsigned int image_pages;
|
|
+ long unsigned int pages;
|
|
+ long unsigned int size;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct snapshot_handle {
|
|
+ unsigned int cur;
|
|
+ void *buffer;
|
|
+ int sync_read;
|
|
+};
|
|
+
|
|
+struct linked_page {
|
|
+ struct linked_page *next;
|
|
+ char data[4088];
|
|
+};
|
|
+
|
|
+struct chain_allocator {
|
|
+ struct linked_page *chain;
|
|
+ unsigned int used_space;
|
|
+ gfp_t gfp_mask;
|
|
+ int safe_needed;
|
|
+};
|
|
+
|
|
+struct rtree_node {
|
|
+ struct list_head list;
|
|
+ long unsigned int *data;
|
|
+};
|
|
+
|
|
+struct mem_zone_bm_rtree {
|
|
+ struct list_head list;
|
|
+ struct list_head nodes;
|
|
+ struct list_head leaves;
|
|
+ long unsigned int start_pfn;
|
|
+ long unsigned int end_pfn;
|
|
+ struct rtree_node *rtree;
|
|
+ int levels;
|
|
+ unsigned int blocks;
|
|
+};
|
|
+
|
|
+struct bm_position {
|
|
+ struct mem_zone_bm_rtree *zone;
|
|
+ struct rtree_node *node;
|
|
+ long unsigned int node_pfn;
|
|
+ int node_bit;
|
|
+};
|
|
+
|
|
+struct memory_bitmap {
|
|
+ struct list_head zones;
|
|
+ struct linked_page *p_list;
|
|
+ struct bm_position cur;
|
|
+};
|
|
+
|
|
+struct mem_extent {
|
|
+ struct list_head hook;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+};
|
|
+
|
|
+struct nosave_region {
|
|
+ struct list_head list;
|
|
+ long unsigned int start_pfn;
|
|
+ long unsigned int end_pfn;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int val;
|
|
+} swp_entry_t;
|
|
+
|
|
+enum req_opf {
|
|
+ REQ_OP_READ = 0,
|
|
+ REQ_OP_WRITE = 1,
|
|
+ REQ_OP_FLUSH = 2,
|
|
+ REQ_OP_DISCARD = 3,
|
|
+ REQ_OP_ZONE_REPORT = 4,
|
|
+ REQ_OP_SECURE_ERASE = 5,
|
|
+ REQ_OP_ZONE_RESET = 6,
|
|
+ REQ_OP_WRITE_SAME = 7,
|
|
+ REQ_OP_WRITE_ZEROES = 9,
|
|
+ REQ_OP_SCSI_IN = 32,
|
|
+ REQ_OP_SCSI_OUT = 33,
|
|
+ REQ_OP_DRV_IN = 34,
|
|
+ REQ_OP_DRV_OUT = 35,
|
|
+ REQ_OP_LAST = 36,
|
|
+};
|
|
+
|
|
+enum req_flag_bits {
|
|
+ __REQ_FAILFAST_DEV = 8,
|
|
+ __REQ_FAILFAST_TRANSPORT = 9,
|
|
+ __REQ_FAILFAST_DRIVER = 10,
|
|
+ __REQ_SYNC = 11,
|
|
+ __REQ_META = 12,
|
|
+ __REQ_PRIO = 13,
|
|
+ __REQ_NOMERGE = 14,
|
|
+ __REQ_IDLE = 15,
|
|
+ __REQ_INTEGRITY = 16,
|
|
+ __REQ_FUA = 17,
|
|
+ __REQ_PREFLUSH = 18,
|
|
+ __REQ_RAHEAD = 19,
|
|
+ __REQ_BACKGROUND = 20,
|
|
+ __REQ_NOWAIT = 21,
|
|
+ __REQ_NOUNMAP = 22,
|
|
+ __REQ_DRV = 23,
|
|
+ __REQ_SWAP = 24,
|
|
+ __REQ_NR_BITS = 25,
|
|
+};
|
|
+
|
|
+struct swap_map_page {
|
|
+ sector_t entries[511];
|
|
+ sector_t next_swap;
|
|
+};
|
|
+
|
|
+struct swap_map_page_list {
|
|
+ struct swap_map_page *map;
|
|
+ struct swap_map_page_list *next;
|
|
+};
|
|
+
|
|
+struct swap_map_handle {
|
|
+ struct swap_map_page *cur;
|
|
+ struct swap_map_page_list *maps;
|
|
+ sector_t cur_swap;
|
|
+ sector_t first_sector;
|
|
+ unsigned int k;
|
|
+ long unsigned int reqd_free_pages;
|
|
+ u32 crc32;
|
|
+};
|
|
+
|
|
+struct swsusp_header {
|
|
+ char reserved[4060];
|
|
+ u32 crc32;
|
|
+ sector_t image;
|
|
+ unsigned int flags;
|
|
+ char orig_sig[10];
|
|
+ char sig[10];
|
|
+};
|
|
+
|
|
+struct swsusp_extent {
|
|
+ struct rb_node node;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+};
|
|
+
|
|
+struct hib_bio_batch {
|
|
+ atomic_t count;
|
|
+ wait_queue_head_t wait;
|
|
+ blk_status_t error;
|
|
+};
|
|
+
|
|
+struct crc_data {
|
|
+ struct task_struct *thr;
|
|
+ atomic_t ready;
|
|
+ atomic_t stop;
|
|
+ unsigned int run_threads;
|
|
+ wait_queue_head_t go;
|
|
+ wait_queue_head_t done;
|
|
+ u32 *crc32;
|
|
+ size_t *unc_len[3];
|
|
+ unsigned char *unc[3];
|
|
+};
|
|
+
|
|
+struct cmp_data {
|
|
+ struct task_struct *thr;
|
|
+ atomic_t ready;
|
|
+ atomic_t stop;
|
|
+ int ret;
|
|
+ wait_queue_head_t go;
|
|
+ wait_queue_head_t done;
|
|
+ size_t unc_len;
|
|
+ size_t cmp_len;
|
|
+ unsigned char unc[131072];
|
|
+ unsigned char cmp[143360];
|
|
+ unsigned char wrk[16384];
|
|
+};
|
|
+
|
|
+struct dec_data {
|
|
+ struct task_struct *thr;
|
|
+ atomic_t ready;
|
|
+ atomic_t stop;
|
|
+ int ret;
|
|
+ wait_queue_head_t go;
|
|
+ wait_queue_head_t done;
|
|
+ size_t unc_len;
|
|
+ size_t cmp_len;
|
|
+ unsigned char unc[131072];
|
|
+ unsigned char cmp[143360];
|
|
+};
|
|
+
|
|
+typedef s64 compat_loff_t;
|
|
+
|
|
+struct resume_swap_area {
|
|
+ __kernel_loff_t offset;
|
|
+ __u32 dev;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct snapshot_data {
|
|
+ struct snapshot_handle handle;
|
|
+ int swap;
|
|
+ int mode;
|
|
+ bool frozen;
|
|
+ bool ready;
|
|
+ bool platform_support;
|
|
+ bool free_bitmaps;
|
|
+};
|
|
+
|
|
+struct compat_resume_swap_area {
|
|
+ compat_loff_t offset;
|
|
+ u32 dev;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct sysrq_key_op {
|
|
+ void (*handler)(int);
|
|
+ char *help_msg;
|
|
+ char *action_msg;
|
|
+ int enable_mask;
|
|
+};
|
|
+
|
|
+enum kdb_msgsrc {
|
|
+ KDB_MSGSRC_INTERNAL = 0,
|
|
+ KDB_MSGSRC_PRINTK = 1,
|
|
+};
|
|
+
|
|
+struct kmsg_dumper {
|
|
+ struct list_head list;
|
|
+ void (*dump)(struct kmsg_dumper *, enum kmsg_dump_reason);
|
|
+ enum kmsg_dump_reason max_reason;
|
|
+ bool active;
|
|
+ bool registered;
|
|
+ u32 cur_idx;
|
|
+ u32 next_idx;
|
|
+ u64 cur_seq;
|
|
+ u64 next_seq;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_console {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_msg;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_console {
|
|
+ u32 msg;
|
|
+};
|
|
+
|
|
+struct console_cmdline {
|
|
+ char name[16];
|
|
+ int index;
|
|
+ char *options;
|
|
+};
|
|
+
|
|
+enum devkmsg_log_bits {
|
|
+ __DEVKMSG_LOG_BIT_ON = 0,
|
|
+ __DEVKMSG_LOG_BIT_OFF = 1,
|
|
+ __DEVKMSG_LOG_BIT_LOCK = 2,
|
|
+};
|
|
+
|
|
+enum devkmsg_log_masks {
|
|
+ DEVKMSG_LOG_MASK_ON = 1,
|
|
+ DEVKMSG_LOG_MASK_OFF = 2,
|
|
+ DEVKMSG_LOG_MASK_LOCK = 4,
|
|
+};
|
|
+
|
|
+enum con_msg_format_flags {
|
|
+ MSG_FORMAT_DEFAULT = 0,
|
|
+ MSG_FORMAT_SYSLOG = 1,
|
|
+};
|
|
+
|
|
+enum log_flags {
|
|
+ LOG_NEWLINE = 2,
|
|
+ LOG_PREFIX = 4,
|
|
+ LOG_CONT = 8,
|
|
+};
|
|
+
|
|
+struct printk_log {
|
|
+ u64 ts_nsec;
|
|
+ u16 len;
|
|
+ u16 text_len;
|
|
+ u16 dict_len;
|
|
+ u8 facility;
|
|
+ u8 flags: 5;
|
|
+ u8 level: 3;
|
|
+};
|
|
+
|
|
+struct devkmsg_user {
|
|
+ u64 seq;
|
|
+ u32 idx;
|
|
+ struct ratelimit_state rs;
|
|
+ struct mutex lock;
|
|
+ char buf[8192];
|
|
+};
|
|
+
|
|
+struct cont {
|
|
+ char buf[992];
|
|
+ size_t len;
|
|
+ struct task_struct *owner;
|
|
+ u64 ts_nsec;
|
|
+ u8 level;
|
|
+ u8 facility;
|
|
+ enum log_flags flags;
|
|
+};
|
|
+
|
|
+struct printk_safe_seq_buf {
|
|
+ atomic_t len;
|
|
+ atomic_t message_lost;
|
|
+ struct irq_work work;
|
|
+ unsigned char buffer[8160];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ _IRQ_DEFAULT_INIT_FLAGS = 0,
|
|
+ _IRQ_PER_CPU = 512,
|
|
+ _IRQ_LEVEL = 256,
|
|
+ _IRQ_NOPROBE = 1024,
|
|
+ _IRQ_NOREQUEST = 2048,
|
|
+ _IRQ_NOTHREAD = 65536,
|
|
+ _IRQ_NOAUTOEN = 4096,
|
|
+ _IRQ_MOVE_PCNTXT = 16384,
|
|
+ _IRQ_NO_BALANCING = 8192,
|
|
+ _IRQ_NESTED_THREAD = 32768,
|
|
+ _IRQ_PER_CPU_DEVID = 131072,
|
|
+ _IRQ_IS_POLLED = 262144,
|
|
+ _IRQ_DISABLE_UNLAZY = 524288,
|
|
+ _IRQF_MODIFY_MASK = 1048335,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQTF_RUNTHREAD = 0,
|
|
+ IRQTF_WARNED = 1,
|
|
+ IRQTF_AFFINITY = 2,
|
|
+ IRQTF_FORCED_THREAD = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQS_AUTODETECT = 1,
|
|
+ IRQS_SPURIOUS_DISABLED = 2,
|
|
+ IRQS_POLL_INPROGRESS = 8,
|
|
+ IRQS_ONESHOT = 32,
|
|
+ IRQS_REPLAY = 64,
|
|
+ IRQS_WAITING = 128,
|
|
+ IRQS_PENDING = 512,
|
|
+ IRQS_SUSPENDED = 2048,
|
|
+ IRQS_TIMINGS = 4096,
|
|
+ IRQS_NMI = 8192,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQC_IS_HARDIRQ = 0,
|
|
+ IRQC_IS_NESTED = 1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQ_STARTUP_NORMAL = 0,
|
|
+ IRQ_STARTUP_MANAGED = 1,
|
|
+ IRQ_STARTUP_ABORT = 2,
|
|
+};
|
|
+
|
|
+struct irq_devres {
|
|
+ unsigned int irq;
|
|
+ void *dev_id;
|
|
+};
|
|
+
|
|
+struct irq_desc_devres {
|
|
+ unsigned int from;
|
|
+ unsigned int cnt;
|
|
+};
|
|
+
|
|
+typedef u64 acpi_size;
|
|
+
|
|
+typedef u64 acpi_io_address;
|
|
+
|
|
+typedef u32 acpi_object_type;
|
|
+
|
|
+union acpi_object {
|
|
+ acpi_object_type type;
|
|
+ struct {
|
|
+ acpi_object_type type;
|
|
+ u64 value;
|
|
+ } integer;
|
|
+ struct {
|
|
+ acpi_object_type type;
|
|
+ u32 length;
|
|
+ char *pointer;
|
|
+ } string;
|
|
+ struct {
|
|
+ acpi_object_type type;
|
|
+ u32 length;
|
|
+ u8 *pointer;
|
|
+ } buffer;
|
|
+ struct {
|
|
+ acpi_object_type type;
|
|
+ u32 count;
|
|
+ union acpi_object *elements;
|
|
+ } package;
|
|
+ struct {
|
|
+ acpi_object_type type;
|
|
+ acpi_object_type actual_type;
|
|
+ acpi_handle handle;
|
|
+ } reference;
|
|
+ struct {
|
|
+ acpi_object_type type;
|
|
+ u32 proc_id;
|
|
+ acpi_io_address pblk_address;
|
|
+ u32 pblk_length;
|
|
+ } processor;
|
|
+ struct {
|
|
+ acpi_object_type type;
|
|
+ u32 system_level;
|
|
+ u32 resource_order;
|
|
+ } power_resource;
|
|
+};
|
|
+
|
|
+struct acpi_buffer {
|
|
+ acpi_size length;
|
|
+ void *pointer;
|
|
+};
|
|
+
|
|
+struct acpi_hotplug_profile {
|
|
+ struct kobject kobj;
|
|
+ int (*scan_dependent)(struct acpi_device *);
|
|
+ void (*notify_online)(struct acpi_device *);
|
|
+ bool enabled: 1;
|
|
+ bool demand_offline: 1;
|
|
+};
|
|
+
|
|
+struct acpi_device_status {
|
|
+ u32 present: 1;
|
|
+ u32 enabled: 1;
|
|
+ u32 show_in_ui: 1;
|
|
+ u32 functional: 1;
|
|
+ u32 battery_present: 1;
|
|
+ u32 reserved: 27;
|
|
+};
|
|
+
|
|
+struct acpi_device_flags {
|
|
+ u32 dynamic_status: 1;
|
|
+ u32 removable: 1;
|
|
+ u32 ejectable: 1;
|
|
+ u32 power_manageable: 1;
|
|
+ u32 match_driver: 1;
|
|
+ u32 initialized: 1;
|
|
+ u32 visited: 1;
|
|
+ u32 hotplug_notify: 1;
|
|
+ u32 is_dock_station: 1;
|
|
+ u32 of_compatible_ok: 1;
|
|
+ u32 coherent_dma: 1;
|
|
+ u32 cca_seen: 1;
|
|
+ u32 enumeration_by_parent: 1;
|
|
+ u32 reserved: 19;
|
|
+};
|
|
+
|
|
+typedef char acpi_bus_id[8];
|
|
+
|
|
+struct acpi_pnp_type {
|
|
+ u32 hardware_id: 1;
|
|
+ u32 bus_address: 1;
|
|
+ u32 platform_id: 1;
|
|
+ u32 reserved: 29;
|
|
+};
|
|
+
|
|
+typedef long unsigned int acpi_bus_address;
|
|
+
|
|
+typedef char acpi_device_name[40];
|
|
+
|
|
+typedef char acpi_device_class[20];
|
|
+
|
|
+struct acpi_device_pnp {
|
|
+ acpi_bus_id bus_id;
|
|
+ struct acpi_pnp_type type;
|
|
+ acpi_bus_address bus_address;
|
|
+ char *unique_id;
|
|
+ struct list_head ids;
|
|
+ acpi_device_name device_name;
|
|
+ acpi_device_class device_class;
|
|
+ union acpi_object *str_obj;
|
|
+};
|
|
+
|
|
+struct acpi_device_power_flags {
|
|
+ u32 explicit_get: 1;
|
|
+ u32 power_resources: 1;
|
|
+ u32 inrush_current: 1;
|
|
+ u32 power_removed: 1;
|
|
+ u32 ignore_parent: 1;
|
|
+ u32 dsw_present: 1;
|
|
+ u32 reserved: 26;
|
|
+};
|
|
+
|
|
+struct acpi_device_power_state {
|
|
+ struct {
|
|
+ u8 valid: 1;
|
|
+ u8 explicit_set: 1;
|
|
+ u8 reserved: 6;
|
|
+ } flags;
|
|
+ int power;
|
|
+ int latency;
|
|
+ struct list_head resources;
|
|
+};
|
|
+
|
|
+struct acpi_device_power {
|
|
+ int state;
|
|
+ struct acpi_device_power_flags flags;
|
|
+ struct acpi_device_power_state states[5];
|
|
+};
|
|
+
|
|
+struct acpi_device_wakeup_flags {
|
|
+ u8 valid: 1;
|
|
+ u8 notifier_present: 1;
|
|
+};
|
|
+
|
|
+struct acpi_device_wakeup_context {
|
|
+ void (*func)(struct acpi_device_wakeup_context *);
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct acpi_device_wakeup {
|
|
+ acpi_handle gpe_device;
|
|
+ u64 gpe_number;
|
|
+ u64 sleep_state;
|
|
+ struct list_head resources;
|
|
+ struct acpi_device_wakeup_flags flags;
|
|
+ struct acpi_device_wakeup_context context;
|
|
+ struct wakeup_source *ws;
|
|
+ int prepare_count;
|
|
+ int enable_count;
|
|
+};
|
|
+
|
|
+struct acpi_device_perf_flags {
|
|
+ u8 reserved: 8;
|
|
+};
|
|
+
|
|
+struct acpi_device_perf_state;
|
|
+
|
|
+struct acpi_device_perf {
|
|
+ int state;
|
|
+ struct acpi_device_perf_flags flags;
|
|
+ int state_count;
|
|
+ struct acpi_device_perf_state *states;
|
|
+};
|
|
+
|
|
+struct acpi_device_dir {
|
|
+ struct proc_dir_entry *entry;
|
|
+};
|
|
+
|
|
+struct acpi_device_data {
|
|
+ const union acpi_object *pointer;
|
|
+ const union acpi_object *properties;
|
|
+ const union acpi_object *of_compatible;
|
|
+ struct list_head subnodes;
|
|
+};
|
|
+
|
|
+struct acpi_scan_handler;
|
|
+
|
|
+struct acpi_hotplug_context;
|
|
+
|
|
+struct acpi_driver;
|
|
+
|
|
+struct acpi_gpio_mapping;
|
|
+
|
|
+struct acpi_device {
|
|
+ int device_type;
|
|
+ acpi_handle handle;
|
|
+ struct fwnode_handle fwnode;
|
|
+ struct acpi_device *parent;
|
|
+ struct list_head children;
|
|
+ struct list_head node;
|
|
+ struct list_head wakeup_list;
|
|
+ struct list_head del_list;
|
|
+ struct acpi_device_status status;
|
|
+ struct acpi_device_flags flags;
|
|
+ struct acpi_device_pnp pnp;
|
|
+ struct acpi_device_power power;
|
|
+ struct acpi_device_wakeup wakeup;
|
|
+ struct acpi_device_perf performance;
|
|
+ struct acpi_device_dir dir;
|
|
+ struct acpi_device_data data;
|
|
+ struct acpi_scan_handler *handler;
|
|
+ struct acpi_hotplug_context *hp;
|
|
+ struct acpi_driver *driver;
|
|
+ const struct acpi_gpio_mapping *driver_gpios;
|
|
+ void *driver_data;
|
|
+ struct device dev;
|
|
+ unsigned int physical_node_count;
|
|
+ unsigned int dep_unmet;
|
|
+ struct list_head physical_node_list;
|
|
+ struct mutex physical_node_lock;
|
|
+ void (*remove)(struct acpi_device *);
|
|
+};
|
|
+
|
|
+struct acpi_scan_handler {
|
|
+ const struct acpi_device_id *ids;
|
|
+ struct list_head list_node;
|
|
+ bool (*match)(const char *, const struct acpi_device_id **);
|
|
+ int (*attach)(struct acpi_device *, const struct acpi_device_id *);
|
|
+ void (*detach)(struct acpi_device *);
|
|
+ void (*bind)(struct device *);
|
|
+ void (*unbind)(struct device *);
|
|
+ struct acpi_hotplug_profile hotplug;
|
|
+};
|
|
+
|
|
+struct acpi_hotplug_context {
|
|
+ struct acpi_device *self;
|
|
+ int (*notify)(struct acpi_device *, u32);
|
|
+ void (*uevent)(struct acpi_device *, u32);
|
|
+ void (*fixup)(struct acpi_device *);
|
|
+};
|
|
+
|
|
+typedef int (*acpi_op_add)(struct acpi_device *);
|
|
+
|
|
+typedef int (*acpi_op_remove)(struct acpi_device *);
|
|
+
|
|
+typedef void (*acpi_op_notify)(struct acpi_device *, u32);
|
|
+
|
|
+struct acpi_device_ops {
|
|
+ acpi_op_add add;
|
|
+ acpi_op_remove remove;
|
|
+ acpi_op_notify notify;
|
|
+};
|
|
+
|
|
+struct acpi_driver {
|
|
+ char name[80];
|
|
+ char class[80];
|
|
+ const struct acpi_device_id *ids;
|
|
+ unsigned int flags;
|
|
+ struct acpi_device_ops ops;
|
|
+ struct device_driver drv;
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct acpi_device_perf_state {
|
|
+ struct {
|
|
+ u8 valid: 1;
|
|
+ u8 reserved: 7;
|
|
+ } flags;
|
|
+ u8 power;
|
|
+ u8 performance;
|
|
+ int latency;
|
|
+};
|
|
+
|
|
+struct acpi_gpio_params;
|
|
+
|
|
+struct acpi_gpio_mapping {
|
|
+ const char *name;
|
|
+ const struct acpi_gpio_params *data;
|
|
+ unsigned int size;
|
|
+ unsigned int quirks;
|
|
+};
|
|
+
|
|
+struct acpi_gpio_params {
|
|
+ unsigned int crs_entry_index;
|
|
+ unsigned int line_index;
|
|
+ bool active_low;
|
|
+};
|
|
+
|
|
+struct of_phandle_args {
|
|
+ struct device_node *np;
|
|
+ int args_count;
|
|
+ uint32_t args[16];
|
|
+};
|
|
+
|
|
+struct irqchip_fwid {
|
|
+ struct fwnode_handle fwnode;
|
|
+ unsigned int type;
|
|
+ char *name;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ AFFINITY = 0,
|
|
+ AFFINITY_LIST = 1,
|
|
+ EFFECTIVE = 2,
|
|
+ EFFECTIVE_LIST = 3,
|
|
+};
|
|
+
|
|
+struct irq_affinity {
|
|
+ int pre_vectors;
|
|
+ int post_vectors;
|
|
+};
|
|
+
|
|
+struct cpumap {
|
|
+ unsigned int available;
|
|
+ unsigned int allocated;
|
|
+ unsigned int managed;
|
|
+ unsigned int managed_allocated;
|
|
+ bool initialized;
|
|
+ bool online;
|
|
+ long unsigned int alloc_map[4];
|
|
+ long unsigned int managed_map[4];
|
|
+};
|
|
+
|
|
+struct irq_matrix___2 {
|
|
+ unsigned int matrix_bits;
|
|
+ unsigned int alloc_start;
|
|
+ unsigned int alloc_end;
|
|
+ unsigned int alloc_size;
|
|
+ unsigned int global_available;
|
|
+ unsigned int global_reserved;
|
|
+ unsigned int systembits_inalloc;
|
|
+ unsigned int total_allocated;
|
|
+ unsigned int online_maps;
|
|
+ struct cpumap *maps;
|
|
+ long unsigned int scratch_map[4];
|
|
+ long unsigned int system_map[4];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_irq_matrix_global {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int online_maps;
|
|
+ unsigned int global_available;
|
|
+ unsigned int global_reserved;
|
|
+ unsigned int total_allocated;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_irq_matrix_global_update {
|
|
+ struct trace_entry ent;
|
|
+ int bit;
|
|
+ unsigned int online_maps;
|
|
+ unsigned int global_available;
|
|
+ unsigned int global_reserved;
|
|
+ unsigned int total_allocated;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_irq_matrix_cpu {
|
|
+ struct trace_entry ent;
|
|
+ int bit;
|
|
+ unsigned int cpu;
|
|
+ bool online;
|
|
+ unsigned int available;
|
|
+ unsigned int allocated;
|
|
+ unsigned int managed;
|
|
+ unsigned int online_maps;
|
|
+ unsigned int global_available;
|
|
+ unsigned int global_reserved;
|
|
+ unsigned int total_allocated;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_irq_matrix_global {};
|
|
+
|
|
+struct trace_event_data_offsets_irq_matrix_global_update {};
|
|
+
|
|
+struct trace_event_data_offsets_irq_matrix_cpu {};
|
|
+
|
|
+struct trace_event_raw_rcu_utilization {
|
|
+ struct trace_entry ent;
|
|
+ const char *s;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_rcu_utilization {};
|
|
+
|
|
+enum {
|
|
+ GP_IDLE = 0,
|
|
+ GP_PENDING = 1,
|
|
+ GP_PASSED = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CB_IDLE = 0,
|
|
+ CB_PENDING = 1,
|
|
+ CB_REPLAY = 2,
|
|
+};
|
|
+
|
|
+struct rcu_cblist {
|
|
+ struct callback_head *head;
|
|
+ struct callback_head **tail;
|
|
+ long int len;
|
|
+ long int len_lazy;
|
|
+};
|
|
+
|
|
+enum rcutorture_type {
|
|
+ RCU_FLAVOR = 0,
|
|
+ RCU_BH_FLAVOR = 1,
|
|
+ RCU_SCHED_FLAVOR = 2,
|
|
+ RCU_TASKS_FLAVOR = 3,
|
|
+ SRCU_FLAVOR = 4,
|
|
+ INVALID_RCU_FLAVOR = 5,
|
|
+};
|
|
+
|
|
+enum tick_device_mode {
|
|
+ TICKDEV_MODE_PERIODIC = 0,
|
|
+ TICKDEV_MODE_ONESHOT = 1,
|
|
+};
|
|
+
|
|
+struct tick_device___2 {
|
|
+ struct clock_event_device *evtdev;
|
|
+ enum tick_device_mode mode;
|
|
+};
|
|
+
|
|
+struct rcu_dynticks {
|
|
+ long int dynticks_nesting;
|
|
+ long int dynticks_nmi_nesting;
|
|
+ atomic_t dynticks;
|
|
+ bool rcu_need_heavy_qs;
|
|
+ long unsigned int rcu_qs_ctr;
|
|
+ bool rcu_urgent_qs;
|
|
+};
|
|
+
|
|
+struct rcu_state;
|
|
+
|
|
+struct rcu_exp_work {
|
|
+ smp_call_func_t rew_func;
|
|
+ struct rcu_state *rew_rsp;
|
|
+ long unsigned int rew_s;
|
|
+ struct work_struct rew_work;
|
|
+};
|
|
+
|
|
+struct rcu_node {
|
|
+ raw_spinlock_t lock;
|
|
+ long unsigned int gp_seq;
|
|
+ long unsigned int gp_seq_needed;
|
|
+ long unsigned int completedqs;
|
|
+ long unsigned int qsmask;
|
|
+ long unsigned int rcu_gp_init_mask;
|
|
+ long unsigned int qsmaskinit;
|
|
+ long unsigned int qsmaskinitnext;
|
|
+ long unsigned int expmask;
|
|
+ long unsigned int expmaskinit;
|
|
+ long unsigned int expmaskinitnext;
|
|
+ long unsigned int ffmask;
|
|
+ long unsigned int grpmask;
|
|
+ int grplo;
|
|
+ int grphi;
|
|
+ u8 grpnum;
|
|
+ u8 level;
|
|
+ bool wait_blkd_tasks;
|
|
+ struct rcu_node *parent;
|
|
+ struct list_head blkd_tasks;
|
|
+ struct list_head *gp_tasks;
|
|
+ struct list_head *exp_tasks;
|
|
+ struct list_head *boost_tasks;
|
|
+ struct rt_mutex boost_mtx;
|
|
+ long unsigned int boost_time;
|
|
+ struct task_struct *boost_kthread_task;
|
|
+ unsigned int boost_kthread_status;
|
|
+ struct swait_queue_head nocb_gp_wq[2];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ raw_spinlock_t fqslock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ spinlock_t exp_lock;
|
|
+ long unsigned int exp_seq_rq;
|
|
+ wait_queue_head_t exp_wq[4];
|
|
+ struct rcu_exp_work rew;
|
|
+ bool exp_need_flush;
|
|
+ long: 56;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct rcu_data;
|
|
+
|
|
+struct rcu_state {
|
|
+ struct rcu_node node[521];
|
|
+ struct rcu_node *level[4];
|
|
+ struct rcu_data *rda;
|
|
+ call_rcu_func_t call;
|
|
+ int ncpus;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ u8 boost;
|
|
+ long unsigned int gp_seq;
|
|
+ struct task_struct *gp_kthread;
|
|
+ struct swait_queue_head gp_wq;
|
|
+ short int gp_flags;
|
|
+ short int gp_state;
|
|
+ struct mutex barrier_mutex;
|
|
+ atomic_t barrier_cpu_count;
|
|
+ struct completion barrier_completion;
|
|
+ long unsigned int barrier_sequence;
|
|
+ struct mutex exp_mutex;
|
|
+ struct mutex exp_wake_mutex;
|
|
+ long unsigned int expedited_sequence;
|
|
+ atomic_t expedited_need_qs;
|
|
+ struct swait_queue_head expedited_wq;
|
|
+ int ncpus_snap;
|
|
+ long unsigned int jiffies_force_qs;
|
|
+ long unsigned int jiffies_kick_kthreads;
|
|
+ long unsigned int n_force_qs;
|
|
+ long unsigned int gp_start;
|
|
+ long unsigned int gp_activity;
|
|
+ long unsigned int gp_req_activity;
|
|
+ long unsigned int jiffies_stall;
|
|
+ long unsigned int jiffies_resched;
|
|
+ long unsigned int n_force_qs_gpstart;
|
|
+ long unsigned int gp_max;
|
|
+ const char *name;
|
|
+ char abbr;
|
|
+ struct list_head flavors;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ spinlock_t ofl_lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+union rcu_noqs {
|
|
+ struct {
|
|
+ u8 norm;
|
|
+ u8 exp;
|
|
+ } b;
|
|
+ u16 s;
|
|
+};
|
|
+
|
|
+struct rcu_data {
|
|
+ long unsigned int gp_seq;
|
|
+ long unsigned int gp_seq_needed;
|
|
+ long unsigned int rcu_qs_ctr_snap;
|
|
+ union rcu_noqs cpu_no_qs;
|
|
+ bool core_needs_qs;
|
|
+ bool beenonline;
|
|
+ bool gpwrap;
|
|
+ struct rcu_node *mynode;
|
|
+ long unsigned int grpmask;
|
|
+ long unsigned int ticks_this_gp;
|
|
+ struct rcu_segcblist cblist;
|
|
+ long int qlen_last_fqs_check;
|
|
+ long unsigned int n_force_qs_snap;
|
|
+ long int blimit;
|
|
+ struct rcu_dynticks *dynticks;
|
|
+ int dynticks_snap;
|
|
+ long unsigned int dynticks_fqs;
|
|
+ long unsigned int cond_resched_completed;
|
|
+ struct callback_head barrier_head;
|
|
+ int exp_dynticks_snap;
|
|
+ struct callback_head *nocb_head;
|
|
+ struct callback_head **nocb_tail;
|
|
+ atomic_long_t nocb_q_count;
|
|
+ atomic_long_t nocb_q_count_lazy;
|
|
+ struct callback_head *nocb_follower_head;
|
|
+ struct callback_head **nocb_follower_tail;
|
|
+ struct swait_queue_head nocb_wq;
|
|
+ struct task_struct *nocb_kthread;
|
|
+ raw_spinlock_t nocb_lock;
|
|
+ int nocb_defer_wakeup;
|
|
+ struct timer_list nocb_timer;
|
|
+ struct callback_head *nocb_gp_head;
|
|
+ struct callback_head **nocb_gp_tail;
|
|
+ bool nocb_leader_sleep;
|
|
+ struct rcu_data *nocb_next_follower;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct rcu_data *nocb_leader;
|
|
+ unsigned int softirq_snap;
|
|
+ struct irq_work rcu_iw;
|
|
+ bool rcu_iw_pending;
|
|
+ long unsigned int rcu_iw_gp_seq;
|
|
+ long unsigned int rcu_ofl_gp_seq;
|
|
+ short int rcu_ofl_gp_flags;
|
|
+ long unsigned int rcu_onl_gp_seq;
|
|
+ short int rcu_onl_gp_flags;
|
|
+ int cpu;
|
|
+ struct rcu_state *rsp;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct patch_data {
|
|
+ struct klp_patch *patch;
|
|
+ atomic_t cpu_count;
|
|
+};
|
|
+
|
|
+struct klp_find_arg {
|
|
+ const char *objname;
|
|
+ const char *name;
|
|
+ long unsigned int addr;
|
|
+ long unsigned int count;
|
|
+ long unsigned int pos;
|
|
+};
|
|
+
|
|
+struct dma_devres {
|
|
+ size_t size;
|
|
+ void *vaddr;
|
|
+ dma_addr_t dma_handle;
|
|
+ long unsigned int attrs;
|
|
+};
|
|
+
|
|
+enum dma_sync_target {
|
|
+ SYNC_FOR_CPU = 0,
|
|
+ SYNC_FOR_DEVICE = 1,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_swiotlb_bounced {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_dev_name;
|
|
+ u64 dma_mask;
|
|
+ dma_addr_t dev_addr;
|
|
+ size_t size;
|
|
+ enum swiotlb_force swiotlb_force;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_swiotlb_bounced {
|
|
+ u32 dev_name;
|
|
+};
|
|
+
|
|
+enum kcmp_type {
|
|
+ KCMP_FILE = 0,
|
|
+ KCMP_VM = 1,
|
|
+ KCMP_FILES = 2,
|
|
+ KCMP_FS = 3,
|
|
+ KCMP_SIGHAND = 4,
|
|
+ KCMP_IO = 5,
|
|
+ KCMP_SYSVSEM = 6,
|
|
+ KCMP_EPOLL_TFD = 7,
|
|
+ KCMP_TYPES = 8,
|
|
+};
|
|
+
|
|
+struct kcmp_epoll_slot {
|
|
+ __u32 efd;
|
|
+ __u32 tfd;
|
|
+ __u32 toff;
|
|
+};
|
|
+
|
|
+enum profile_type {
|
|
+ PROFILE_TASK_EXIT = 0,
|
|
+ PROFILE_MUNMAP = 1,
|
|
+};
|
|
+
|
|
+struct profile_hit {
|
|
+ u32 pc;
|
|
+ u32 hits;
|
|
+};
|
|
+
|
|
+typedef __kernel_suseconds_t suseconds_t;
|
|
+
|
|
+typedef __kernel_time_t time_t;
|
|
+
|
|
+typedef __u64 timeu64_t;
|
|
+
|
|
+struct itimerspec {
|
|
+ struct timespec it_interval;
|
|
+ struct timespec it_value;
|
|
+};
|
|
+
|
|
+struct __kernel_old_timeval {
|
|
+ __kernel_long_t tv_sec;
|
|
+ __kernel_long_t tv_usec;
|
|
+};
|
|
+
|
|
+struct itimerspec64 {
|
|
+ struct timespec64 it_interval;
|
|
+ struct timespec64 it_value;
|
|
+};
|
|
+
|
|
+struct timex {
|
|
+ unsigned int modes;
|
|
+ __kernel_long_t offset;
|
|
+ __kernel_long_t freq;
|
|
+ __kernel_long_t maxerror;
|
|
+ __kernel_long_t esterror;
|
|
+ int status;
|
|
+ __kernel_long_t constant;
|
|
+ __kernel_long_t precision;
|
|
+ __kernel_long_t tolerance;
|
|
+ struct timeval time;
|
|
+ __kernel_long_t tick;
|
|
+ __kernel_long_t ppsfreq;
|
|
+ __kernel_long_t jitter;
|
|
+ int shift;
|
|
+ __kernel_long_t stabil;
|
|
+ __kernel_long_t jitcnt;
|
|
+ __kernel_long_t calcnt;
|
|
+ __kernel_long_t errcnt;
|
|
+ __kernel_long_t stbcnt;
|
|
+ int tai;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct compat_itimerspec {
|
|
+ struct compat_timespec it_interval;
|
|
+ struct compat_timespec it_value;
|
|
+};
|
|
+
|
|
+struct compat_timex {
|
|
+ compat_uint_t modes;
|
|
+ compat_long_t offset;
|
|
+ compat_long_t freq;
|
|
+ compat_long_t maxerror;
|
|
+ compat_long_t esterror;
|
|
+ compat_int_t status;
|
|
+ compat_long_t constant;
|
|
+ compat_long_t precision;
|
|
+ compat_long_t tolerance;
|
|
+ struct compat_timeval time;
|
|
+ compat_long_t tick;
|
|
+ compat_long_t ppsfreq;
|
|
+ compat_long_t jitter;
|
|
+ compat_int_t shift;
|
|
+ compat_long_t stabil;
|
|
+ compat_long_t jitcnt;
|
|
+ compat_long_t calcnt;
|
|
+ compat_long_t errcnt;
|
|
+ compat_long_t stbcnt;
|
|
+ compat_int_t tai;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct itimerval {
|
|
+ struct timeval it_interval;
|
|
+ struct timeval it_value;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_timer_class {
|
|
+ struct trace_entry ent;
|
|
+ void *timer;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_timer_start {
|
|
+ struct trace_entry ent;
|
|
+ void *timer;
|
|
+ void *function;
|
|
+ long unsigned int expires;
|
|
+ long unsigned int now;
|
|
+ unsigned int flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_timer_expire_entry {
|
|
+ struct trace_entry ent;
|
|
+ void *timer;
|
|
+ long unsigned int now;
|
|
+ void *function;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_hrtimer_init {
|
|
+ struct trace_entry ent;
|
|
+ void *hrtimer;
|
|
+ clockid_t clockid;
|
|
+ enum hrtimer_mode mode;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_hrtimer_start {
|
|
+ struct trace_entry ent;
|
|
+ void *hrtimer;
|
|
+ void *function;
|
|
+ s64 expires;
|
|
+ s64 softexpires;
|
|
+ enum hrtimer_mode mode;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_hrtimer_expire_entry {
|
|
+ struct trace_entry ent;
|
|
+ void *hrtimer;
|
|
+ s64 now;
|
|
+ void *function;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_hrtimer_class {
|
|
+ struct trace_entry ent;
|
|
+ void *hrtimer;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_itimer_state {
|
|
+ struct trace_entry ent;
|
|
+ int which;
|
|
+ long long unsigned int expires;
|
|
+ long int value_sec;
|
|
+ long int value_usec;
|
|
+ long int interval_sec;
|
|
+ long int interval_usec;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_itimer_expire {
|
|
+ struct trace_entry ent;
|
|
+ int which;
|
|
+ pid_t pid;
|
|
+ long long unsigned int now;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_tick_stop {
|
|
+ struct trace_entry ent;
|
|
+ int success;
|
|
+ int dependency;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_timer_class {};
|
|
+
|
|
+struct trace_event_data_offsets_timer_start {};
|
|
+
|
|
+struct trace_event_data_offsets_timer_expire_entry {};
|
|
+
|
|
+struct trace_event_data_offsets_hrtimer_init {};
|
|
+
|
|
+struct trace_event_data_offsets_hrtimer_start {};
|
|
+
|
|
+struct trace_event_data_offsets_hrtimer_expire_entry {};
|
|
+
|
|
+struct trace_event_data_offsets_hrtimer_class {};
|
|
+
|
|
+struct trace_event_data_offsets_itimer_state {};
|
|
+
|
|
+struct trace_event_data_offsets_itimer_expire {};
|
|
+
|
|
+struct trace_event_data_offsets_tick_stop {};
|
|
+
|
|
+struct timer_base {
|
|
+ raw_spinlock_t lock;
|
|
+ struct timer_list *running_timer;
|
|
+ long unsigned int clk;
|
|
+ long unsigned int next_expiry;
|
|
+ unsigned int cpu;
|
|
+ bool is_idle;
|
|
+ bool must_forward_clk;
|
|
+ long unsigned int pending_map[9];
|
|
+ struct hlist_head vectors[576];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct process_timer {
|
|
+ struct timer_list timer;
|
|
+ struct task_struct *task;
|
|
+};
|
|
+
|
|
+struct system_time_snapshot {
|
|
+ u64 cycles;
|
|
+ ktime_t real;
|
|
+ ktime_t raw;
|
|
+ unsigned int clock_was_set_seq;
|
|
+ u8 cs_was_changed_seq;
|
|
+};
|
|
+
|
|
+struct system_device_crosststamp {
|
|
+ ktime_t device;
|
|
+ ktime_t sys_realtime;
|
|
+ ktime_t sys_monoraw;
|
|
+};
|
|
+
|
|
+enum timekeeping_adv_mode {
|
|
+ TK_ADV_TICK = 0,
|
|
+ TK_ADV_FREQ = 1,
|
|
+};
|
|
+
|
|
+struct tk_fast {
|
|
+ seqcount_t seq;
|
|
+ struct tk_read_base base[2];
|
|
+};
|
|
+
|
|
+typedef s64 int64_t;
|
|
+
|
|
+enum tick_nohz_mode {
|
|
+ NOHZ_MODE_INACTIVE = 0,
|
|
+ NOHZ_MODE_LOWRES = 1,
|
|
+ NOHZ_MODE_HIGHRES = 2,
|
|
+};
|
|
+
|
|
+struct tick_sched {
|
|
+ struct hrtimer sched_timer;
|
|
+ long unsigned int check_clocks;
|
|
+ enum tick_nohz_mode nohz_mode;
|
|
+ unsigned int inidle: 1;
|
|
+ unsigned int tick_stopped: 1;
|
|
+ unsigned int idle_active: 1;
|
|
+ unsigned int do_timer_last: 1;
|
|
+ unsigned int got_idle_tick: 1;
|
|
+ ktime_t last_tick;
|
|
+ ktime_t next_tick;
|
|
+ long unsigned int idle_jiffies;
|
|
+ long unsigned int idle_calls;
|
|
+ long unsigned int idle_sleeps;
|
|
+ ktime_t idle_entrytime;
|
|
+ ktime_t idle_waketime;
|
|
+ ktime_t idle_exittime;
|
|
+ ktime_t idle_sleeptime;
|
|
+ ktime_t iowait_sleeptime;
|
|
+ long unsigned int last_jiffies;
|
|
+ u64 timer_expires;
|
|
+ u64 timer_expires_base;
|
|
+ u64 next_timer;
|
|
+ ktime_t idle_expires;
|
|
+ atomic_t tick_dep_mask;
|
|
+};
|
|
+
|
|
+struct timer_list_iter {
|
|
+ int cpu;
|
|
+ bool second_pass;
|
|
+ u64 now;
|
|
+};
|
|
+
|
|
+struct tm {
|
|
+ int tm_sec;
|
|
+ int tm_min;
|
|
+ int tm_hour;
|
|
+ int tm_mday;
|
|
+ int tm_mon;
|
|
+ long int tm_year;
|
|
+ int tm_wday;
|
|
+ int tm_yday;
|
|
+};
|
|
+
|
|
+struct cyclecounter {
|
|
+ u64 (*read)(const struct cyclecounter *);
|
|
+ u64 mask;
|
|
+ u32 mult;
|
|
+ u32 shift;
|
|
+};
|
|
+
|
|
+struct timecounter {
|
|
+ const struct cyclecounter *cc;
|
|
+ u64 cycle_last;
|
|
+ u64 nsec;
|
|
+ u64 mask;
|
|
+ u64 frac;
|
|
+};
|
|
+
|
|
+typedef __kernel_timer_t timer_t;
|
|
+
|
|
+struct rtc_wkalrm {
|
|
+ unsigned char enabled;
|
|
+ unsigned char pending;
|
|
+ struct rtc_time time;
|
|
+};
|
|
+
|
|
+struct class_interface {
|
|
+ struct list_head node;
|
|
+ struct class *class;
|
|
+ int (*add_dev)(struct device *, struct class_interface *);
|
|
+ void (*remove_dev)(struct device *, struct class_interface *);
|
|
+};
|
|
+
|
|
+struct rtc_class_ops {
|
|
+ int (*ioctl)(struct device *, unsigned int, long unsigned int);
|
|
+ int (*read_time)(struct device *, struct rtc_time *);
|
|
+ int (*set_time)(struct device *, struct rtc_time *);
|
|
+ int (*read_alarm)(struct device *, struct rtc_wkalrm *);
|
|
+ int (*set_alarm)(struct device *, struct rtc_wkalrm *);
|
|
+ int (*proc)(struct device *, struct seq_file *);
|
|
+ int (*set_mmss64)(struct device *, time64_t);
|
|
+ int (*set_mmss)(struct device *, long unsigned int);
|
|
+ int (*read_callback)(struct device *, int);
|
|
+ int (*alarm_irq_enable)(struct device *, unsigned int);
|
|
+ int (*read_offset)(struct device *, long int *);
|
|
+ int (*set_offset)(struct device *, long int);
|
|
+};
|
|
+
|
|
+struct rtc_timer {
|
|
+ struct timerqueue_node node;
|
|
+ ktime_t period;
|
|
+ void (*func)(void *);
|
|
+ void *private_data;
|
|
+ int enabled;
|
|
+};
|
|
+
|
|
+struct nvmem_device;
|
|
+
|
|
+struct rtc_device {
|
|
+ struct device dev;
|
|
+ struct module *owner;
|
|
+ int id;
|
|
+ const struct rtc_class_ops *ops;
|
|
+ struct mutex ops_lock;
|
|
+ struct cdev char_dev;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int irq_data;
|
|
+ spinlock_t irq_lock;
|
|
+ wait_queue_head_t irq_queue;
|
|
+ struct fasync_struct *async_queue;
|
|
+ int irq_freq;
|
|
+ int max_user_freq;
|
|
+ struct timerqueue_head timerqueue;
|
|
+ struct rtc_timer aie_timer;
|
|
+ struct rtc_timer uie_rtctimer;
|
|
+ struct hrtimer pie_timer;
|
|
+ int pie_enabled;
|
|
+ struct work_struct irqwork;
|
|
+ int uie_unsupported;
|
|
+ long int set_offset_nsec;
|
|
+ bool registered;
|
|
+ struct nvmem_device *nvmem;
|
|
+ bool nvram_old_abi;
|
|
+ struct bin_attribute *nvram;
|
|
+ time64_t range_min;
|
|
+ timeu64_t range_max;
|
|
+ time64_t start_secs;
|
|
+ time64_t offset_secs;
|
|
+ bool set_start_time;
|
|
+};
|
|
+
|
|
+enum alarmtimer_type {
|
|
+ ALARM_REALTIME = 0,
|
|
+ ALARM_BOOTTIME = 1,
|
|
+ ALARM_NUMTYPE = 2,
|
|
+ ALARM_REALTIME_FREEZER = 3,
|
|
+ ALARM_BOOTTIME_FREEZER = 4,
|
|
+};
|
|
+
|
|
+enum alarmtimer_restart {
|
|
+ ALARMTIMER_NORESTART = 0,
|
|
+ ALARMTIMER_RESTART = 1,
|
|
+};
|
|
+
|
|
+struct alarm {
|
|
+ struct timerqueue_node node;
|
|
+ struct hrtimer timer;
|
|
+ enum alarmtimer_restart (*function)(struct alarm *, ktime_t);
|
|
+ enum alarmtimer_type type;
|
|
+ int state;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct platform_driver {
|
|
+ int (*probe)(struct platform_device *);
|
|
+ int (*remove)(struct platform_device *);
|
|
+ void (*shutdown)(struct platform_device *);
|
|
+ int (*suspend)(struct platform_device *, pm_message_t);
|
|
+ int (*resume)(struct platform_device *);
|
|
+ struct device_driver driver;
|
|
+ const struct platform_device_id *id_table;
|
|
+ bool prevent_deferred_probe;
|
|
+};
|
|
+
|
|
+struct cpu_timer_list {
|
|
+ struct list_head entry;
|
|
+ u64 expires;
|
|
+ u64 incr;
|
|
+ struct task_struct *task;
|
|
+ int firing;
|
|
+};
|
|
+
|
|
+struct k_clock;
|
|
+
|
|
+struct k_itimer {
|
|
+ struct list_head list;
|
|
+ struct hlist_node t_hash;
|
|
+ spinlock_t it_lock;
|
|
+ const struct k_clock *kclock;
|
|
+ clockid_t it_clock;
|
|
+ timer_t it_id;
|
|
+ int it_active;
|
|
+ s64 it_overrun;
|
|
+ s64 it_overrun_last;
|
|
+ int it_requeue_pending;
|
|
+ int it_sigev_notify;
|
|
+ ktime_t it_interval;
|
|
+ struct signal_struct *it_signal;
|
|
+ union {
|
|
+ struct pid *it_pid;
|
|
+ struct task_struct *it_process;
|
|
+ };
|
|
+ struct sigqueue *sigq;
|
|
+ union {
|
|
+ struct {
|
|
+ struct hrtimer timer;
|
|
+ } real;
|
|
+ struct cpu_timer_list cpu;
|
|
+ struct {
|
|
+ struct alarm alarmtimer;
|
|
+ } alarm;
|
|
+ struct callback_head rcu;
|
|
+ } it;
|
|
+};
|
|
+
|
|
+struct k_clock {
|
|
+ int (*clock_getres)(const clockid_t, struct timespec64 *);
|
|
+ int (*clock_set)(const clockid_t, const struct timespec64 *);
|
|
+ int (*clock_get)(const clockid_t, struct timespec64 *);
|
|
+ int (*clock_adj)(const clockid_t, struct timex *);
|
|
+ int (*timer_create)(struct k_itimer *);
|
|
+ int (*nsleep)(const clockid_t, int, const struct timespec64 *);
|
|
+ int (*timer_set)(struct k_itimer *, int, struct itimerspec64 *, struct itimerspec64 *);
|
|
+ int (*timer_del)(struct k_itimer *);
|
|
+ void (*timer_get)(struct k_itimer *, struct itimerspec64 *);
|
|
+ void (*timer_rearm)(struct k_itimer *);
|
|
+ s64 (*timer_forward)(struct k_itimer *, ktime_t);
|
|
+ ktime_t (*timer_remaining)(struct k_itimer *, ktime_t);
|
|
+ int (*timer_try_to_cancel)(struct k_itimer *);
|
|
+ void (*timer_arm)(struct k_itimer *, ktime_t, bool, bool);
|
|
+};
|
|
+
|
|
+struct trace_event_raw_alarmtimer_suspend {
|
|
+ struct trace_entry ent;
|
|
+ s64 expires;
|
|
+ unsigned char alarm_type;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_alarm_class {
|
|
+ struct trace_entry ent;
|
|
+ void *alarm;
|
|
+ unsigned char alarm_type;
|
|
+ s64 expires;
|
|
+ s64 now;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_alarmtimer_suspend {};
|
|
+
|
|
+struct trace_event_data_offsets_alarm_class {};
|
|
+
|
|
+struct alarm_base {
|
|
+ spinlock_t lock;
|
|
+ struct timerqueue_head timerqueue;
|
|
+ ktime_t (*gettime)();
|
|
+ clockid_t base_clockid;
|
|
+};
|
|
+
|
|
+struct sigevent {
|
|
+ sigval_t sigev_value;
|
|
+ int sigev_signo;
|
|
+ int sigev_notify;
|
|
+ union {
|
|
+ int _pad[12];
|
|
+ int _tid;
|
|
+ struct {
|
|
+ void (*_function)(sigval_t);
|
|
+ void *_attribute;
|
|
+ } _sigev_thread;
|
|
+ } _sigev_un;
|
|
+};
|
|
+
|
|
+typedef struct sigevent sigevent_t;
|
|
+
|
|
+struct compat_sigevent {
|
|
+ compat_sigval_t sigev_value;
|
|
+ compat_int_t sigev_signo;
|
|
+ compat_int_t sigev_notify;
|
|
+ union {
|
|
+ compat_int_t _pad[13];
|
|
+ compat_int_t _tid;
|
|
+ struct {
|
|
+ compat_uptr_t _function;
|
|
+ compat_uptr_t _attribute;
|
|
+ } _sigev_thread;
|
|
+ } _sigev_un;
|
|
+};
|
|
+
|
|
+typedef unsigned int uint;
|
|
+
|
|
+struct posix_clock;
|
|
+
|
|
+struct posix_clock_operations {
|
|
+ struct module *owner;
|
|
+ int (*clock_adjtime)(struct posix_clock *, struct timex *);
|
|
+ int (*clock_gettime)(struct posix_clock *, struct timespec64 *);
|
|
+ int (*clock_getres)(struct posix_clock *, struct timespec64 *);
|
|
+ int (*clock_settime)(struct posix_clock *, const struct timespec64 *);
|
|
+ long int (*ioctl)(struct posix_clock *, unsigned int, long unsigned int);
|
|
+ int (*open)(struct posix_clock *, fmode_t);
|
|
+ __poll_t (*poll)(struct posix_clock *, struct file *, poll_table *);
|
|
+ int (*release)(struct posix_clock *);
|
|
+ ssize_t (*read)(struct posix_clock *, uint, char *, size_t);
|
|
+};
|
|
+
|
|
+struct posix_clock {
|
|
+ struct posix_clock_operations ops;
|
|
+ struct cdev cdev;
|
|
+ struct device *dev;
|
|
+ struct rw_semaphore rwsem;
|
|
+ bool zombie;
|
|
+};
|
|
+
|
|
+struct posix_clock_desc {
|
|
+ struct file *fp;
|
|
+ struct posix_clock *clk;
|
|
+};
|
|
+
|
|
+struct compat_itimerval {
|
|
+ struct compat_timeval it_interval;
|
|
+ struct compat_timeval it_value;
|
|
+};
|
|
+
|
|
+struct ce_unbind {
|
|
+ struct clock_event_device *ce;
|
|
+ int res;
|
|
+};
|
|
+
|
|
+typedef ktime_t pto_T_____23;
|
|
+
|
|
+union futex_key {
|
|
+ struct {
|
|
+ u64 i_seq;
|
|
+ long unsigned int pgoff;
|
|
+ unsigned int offset;
|
|
+ } shared;
|
|
+ struct {
|
|
+ union {
|
|
+ struct mm_struct *mm;
|
|
+ u64 __tmp;
|
|
+ };
|
|
+ long unsigned int address;
|
|
+ unsigned int offset;
|
|
+ } private;
|
|
+ struct {
|
|
+ u64 ptr;
|
|
+ long unsigned int word;
|
|
+ unsigned int offset;
|
|
+ } both;
|
|
+};
|
|
+
|
|
+struct futex_pi_state {
|
|
+ struct list_head list;
|
|
+ struct rt_mutex pi_mutex;
|
|
+ struct task_struct *owner;
|
|
+ atomic_t refcount;
|
|
+ union futex_key key;
|
|
+};
|
|
+
|
|
+struct futex_q {
|
|
+ struct plist_node list;
|
|
+ struct task_struct *task;
|
|
+ spinlock_t *lock_ptr;
|
|
+ union futex_key key;
|
|
+ struct futex_pi_state *pi_state;
|
|
+ struct rt_mutex_waiter *rt_waiter;
|
|
+ union futex_key *requeue_pi_key;
|
|
+ u32 bitset;
|
|
+};
|
|
+
|
|
+struct futex_hash_bucket {
|
|
+ atomic_t waiters;
|
|
+ spinlock_t lock;
|
|
+ struct plist_head chain;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+enum futex_access {
|
|
+ FUTEX_READ = 0,
|
|
+ FUTEX_WRITE = 1,
|
|
+};
|
|
+
|
|
+struct dma_chan {
|
|
+ int lock;
|
|
+ const char *device_id;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CSD_FLAG_LOCK = 1,
|
|
+ CSD_FLAG_SYNCHRONOUS = 2,
|
|
+};
|
|
+
|
|
+struct call_function_data {
|
|
+ call_single_data_t *csd;
|
|
+ cpumask_var_t cpumask;
|
|
+ cpumask_var_t cpumask_ipi;
|
|
+};
|
|
+
|
|
+struct smp_call_on_cpu_struct {
|
|
+ struct work_struct work;
|
|
+ struct completion done;
|
|
+ int (*func)(void *);
|
|
+ void *data;
|
|
+ int ret;
|
|
+ int cpu;
|
|
+};
|
|
+
|
|
+struct latch_tree_root {
|
|
+ seqcount_t seq;
|
|
+ struct rb_root tree[2];
|
|
+};
|
|
+
|
|
+struct latch_tree_ops {
|
|
+ bool (*less)(struct latch_tree_node *, struct latch_tree_node *);
|
|
+ int (*comp)(void *, struct latch_tree_node *);
|
|
+};
|
|
+
|
|
+struct modversion_info {
|
|
+ long unsigned int crc;
|
|
+ char name[56];
|
|
+};
|
|
+
|
|
+struct module_use {
|
|
+ struct list_head source_list;
|
|
+ struct list_head target_list;
|
|
+ struct module *source;
|
|
+ struct module *target;
|
|
+};
|
|
+
|
|
+struct module_sect_attr {
|
|
+ struct module_attribute mattr;
|
|
+ char *name;
|
|
+ long unsigned int address;
|
|
+};
|
|
+
|
|
+struct module_sect_attrs {
|
|
+ struct attribute_group grp;
|
|
+ unsigned int nsections;
|
|
+ struct module_sect_attr attrs[0];
|
|
+};
|
|
+
|
|
+struct module_notes_attrs {
|
|
+ struct kobject *dir;
|
|
+ unsigned int notes;
|
|
+ struct bin_attribute attrs[0];
|
|
+};
|
|
+
|
|
+struct symsearch {
|
|
+ const struct kernel_symbol *start;
|
|
+ const struct kernel_symbol *stop;
|
|
+ const s32 *crcs;
|
|
+ enum {
|
|
+ NOT_GPL_ONLY = 0,
|
|
+ GPL_ONLY = 1,
|
|
+ WILL_BE_GPL_ONLY = 2,
|
|
+ } licence;
|
|
+ bool unused;
|
|
+};
|
|
+
|
|
+enum kernel_read_file_id {
|
|
+ READING_UNKNOWN = 0,
|
|
+ READING_FIRMWARE = 1,
|
|
+ READING_FIRMWARE_PREALLOC_BUFFER = 2,
|
|
+ READING_MODULE = 3,
|
|
+ READING_KEXEC_IMAGE = 4,
|
|
+ READING_KEXEC_INITRAMFS = 5,
|
|
+ READING_POLICY = 6,
|
|
+ READING_X509_CERTIFICATE = 7,
|
|
+ READING_MAX_ID = 8,
|
|
+};
|
|
+
|
|
+enum kernel_load_data_id {
|
|
+ LOADING_UNKNOWN = 0,
|
|
+ LOADING_FIRMWARE = 1,
|
|
+ LOADING_FIRMWARE_PREALLOC_BUFFER = 2,
|
|
+ LOADING_MODULE = 3,
|
|
+ LOADING_KEXEC_IMAGE = 4,
|
|
+ LOADING_KEXEC_INITRAMFS = 5,
|
|
+ LOADING_POLICY = 6,
|
|
+ LOADING_X509_CERTIFICATE = 7,
|
|
+ LOADING_MAX_ID = 8,
|
|
+};
|
|
+
|
|
+struct load_info {
|
|
+ const char *name;
|
|
+ struct module *mod;
|
|
+ Elf64_Ehdr *hdr;
|
|
+ long unsigned int len;
|
|
+ Elf64_Shdr *sechdrs;
|
|
+ char *secstrings;
|
|
+ char *strtab;
|
|
+ long unsigned int symoffs;
|
|
+ long unsigned int stroffs;
|
|
+ struct _ddebug *debug;
|
|
+ unsigned int num_debug;
|
|
+ bool sig_ok;
|
|
+ long unsigned int mod_kallsyms_init_off;
|
|
+ struct {
|
|
+ unsigned int sym;
|
|
+ unsigned int str;
|
|
+ unsigned int mod;
|
|
+ unsigned int vers;
|
|
+ unsigned int info;
|
|
+ unsigned int pcpu;
|
|
+ } index;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_module_load {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int taints;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_module_free {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_module_refcnt {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ip;
|
|
+ int refcnt;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_module_request {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ip;
|
|
+ bool wait;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_module_load {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_module_free {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_module_refcnt {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_module_request {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct mod_tree_root {
|
|
+ struct latch_tree_root root;
|
|
+ long unsigned int addr_min;
|
|
+ long unsigned int addr_max;
|
|
+};
|
|
+
|
|
+struct find_symbol_arg {
|
|
+ const char *name;
|
|
+ bool gplok;
|
|
+ bool warn;
|
|
+ struct module *owner;
|
|
+ const s32 *crc;
|
|
+ const struct kernel_symbol *sym;
|
|
+};
|
|
+
|
|
+struct mod_initfree {
|
|
+ struct callback_head rcu;
|
|
+ void *module_init;
|
|
+};
|
|
+
|
|
+struct asymmetric_key_subtype;
|
|
+
|
|
+enum pkey_id_type {
|
|
+ PKEY_ID_PGP = 0,
|
|
+ PKEY_ID_X509 = 1,
|
|
+ PKEY_ID_PKCS7 = 2,
|
|
+};
|
|
+
|
|
+struct module_signature {
|
|
+ u8 algo;
|
|
+ u8 hash;
|
|
+ u8 id_type;
|
|
+ u8 signer_len;
|
|
+ u8 key_id_len;
|
|
+ u8 __pad[3];
|
|
+ __be32 sig_len;
|
|
+};
|
|
+
|
|
+struct kallsym_iter {
|
|
+ loff_t pos;
|
|
+ loff_t pos_arch_end;
|
|
+ loff_t pos_mod_end;
|
|
+ loff_t pos_ftrace_mod_end;
|
|
+ long unsigned int value;
|
|
+ unsigned int nameoff;
|
|
+ char type;
|
|
+ char name[128];
|
|
+ char module_name[56];
|
|
+ int exported;
|
|
+ int show_value;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SB_UNFROZEN = 0,
|
|
+ SB_FREEZE_WRITE = 1,
|
|
+ SB_FREEZE_PAGEFAULT = 2,
|
|
+ SB_FREEZE_FS = 3,
|
|
+ SB_FREEZE_COMPLETE = 4,
|
|
+};
|
|
+
|
|
+typedef __u16 comp_t;
|
|
+
|
|
+struct acct_v3 {
|
|
+ char ac_flag;
|
|
+ char ac_version;
|
|
+ __u16 ac_tty;
|
|
+ __u32 ac_exitcode;
|
|
+ __u32 ac_uid;
|
|
+ __u32 ac_gid;
|
|
+ __u32 ac_pid;
|
|
+ __u32 ac_ppid;
|
|
+ __u32 ac_btime;
|
|
+ __u32 ac_etime;
|
|
+ comp_t ac_utime;
|
|
+ comp_t ac_stime;
|
|
+ comp_t ac_mem;
|
|
+ comp_t ac_io;
|
|
+ comp_t ac_rw;
|
|
+ comp_t ac_minflt;
|
|
+ comp_t ac_majflt;
|
|
+ comp_t ac_swaps;
|
|
+ char ac_comm[16];
|
|
+};
|
|
+
|
|
+typedef struct acct_v3 acct_t;
|
|
+
|
|
+struct fs_pin {
|
|
+ wait_queue_head_t wait;
|
|
+ int done;
|
|
+ struct hlist_node s_list;
|
|
+ struct hlist_node m_list;
|
|
+ void (*kill)(struct fs_pin *);
|
|
+};
|
|
+
|
|
+struct bsd_acct_struct {
|
|
+ struct fs_pin pin;
|
|
+ atomic_long_t count;
|
|
+ struct callback_head rcu;
|
|
+ struct mutex lock;
|
|
+ int active;
|
|
+ long unsigned int needcheck;
|
|
+ struct file *file;
|
|
+ struct pid_namespace *ns;
|
|
+ struct work_struct work;
|
|
+ struct completion done;
|
|
+};
|
|
+
|
|
+enum compound_dtor_id {
|
|
+ NULL_COMPOUND_DTOR = 0,
|
|
+ COMPOUND_PAGE_DTOR = 1,
|
|
+ HUGETLB_PAGE_DTOR = 2,
|
|
+ TRANSHUGE_PAGE_DTOR = 3,
|
|
+ NR_COMPOUND_DTORS = 4,
|
|
+};
|
|
+
|
|
+struct elf64_note {
|
|
+ Elf64_Word n_namesz;
|
|
+ Elf64_Word n_descsz;
|
|
+ Elf64_Word n_type;
|
|
+};
|
|
+
|
|
+typedef long unsigned int elf_greg_t;
|
|
+
|
|
+typedef elf_greg_t elf_gregset_t[27];
|
|
+
|
|
+struct elf_siginfo {
|
|
+ int si_signo;
|
|
+ int si_code;
|
|
+ int si_errno;
|
|
+};
|
|
+
|
|
+struct elf_prstatus {
|
|
+ struct elf_siginfo pr_info;
|
|
+ short int pr_cursig;
|
|
+ long unsigned int pr_sigpend;
|
|
+ long unsigned int pr_sighold;
|
|
+ pid_t pr_pid;
|
|
+ pid_t pr_ppid;
|
|
+ pid_t pr_pgrp;
|
|
+ pid_t pr_sid;
|
|
+ struct timeval pr_utime;
|
|
+ struct timeval pr_stime;
|
|
+ struct timeval pr_cutime;
|
|
+ struct timeval pr_cstime;
|
|
+ elf_gregset_t pr_reg;
|
|
+ int pr_fpvalid;
|
|
+};
|
|
+
|
|
+struct kexec_sha_region {
|
|
+ long unsigned int start;
|
|
+ long unsigned int len;
|
|
+};
|
|
+
|
|
+struct compat_kexec_segment {
|
|
+ compat_uptr_t buf;
|
|
+ compat_size_t bufsz;
|
|
+ compat_ulong_t mem;
|
|
+ compat_size_t memsz;
|
|
+};
|
|
+
|
|
+enum migrate_reason {
|
|
+ MR_COMPACTION = 0,
|
|
+ MR_MEMORY_FAILURE = 1,
|
|
+ MR_MEMORY_HOTPLUG = 2,
|
|
+ MR_SYSCALL = 3,
|
|
+ MR_MEMPOLICY_MBIND = 4,
|
|
+ MR_NUMA_MISPLACED = 5,
|
|
+ MR_CONTIG_RANGE = 6,
|
|
+ MR_TYPES = 7,
|
|
+};
|
|
+
|
|
+enum kernfs_node_type {
|
|
+ KERNFS_DIR = 1,
|
|
+ KERNFS_FILE = 2,
|
|
+ KERNFS_LINK = 4,
|
|
+};
|
|
+
|
|
+enum kernfs_root_flag {
|
|
+ KERNFS_ROOT_CREATE_DEACTIVATED = 1,
|
|
+ KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 2,
|
|
+ KERNFS_ROOT_SUPPORT_EXPORTOP = 4,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CGRP_NOTIFY_ON_RELEASE = 0,
|
|
+ CGRP_CPUSET_CLONE_CHILDREN = 1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CGRP_ROOT_NOPREFIX = 2,
|
|
+ CGRP_ROOT_XATTR = 4,
|
|
+ CGRP_ROOT_NS_DELEGATE = 8,
|
|
+ CGRP_ROOT_CPUSET_V2_MODE = 16,
|
|
+};
|
|
+
|
|
+struct cgroup_taskset {
|
|
+ struct list_head src_csets;
|
|
+ struct list_head dst_csets;
|
|
+ int nr_tasks;
|
|
+ int ssid;
|
|
+ struct list_head *csets;
|
|
+ struct css_set *cur_cset;
|
|
+ struct task_struct *cur_task;
|
|
+};
|
|
+
|
|
+struct css_task_iter {
|
|
+ struct cgroup_subsys *ss;
|
|
+ unsigned int flags;
|
|
+ struct list_head *cset_pos;
|
|
+ struct list_head *cset_head;
|
|
+ struct list_head *tcset_pos;
|
|
+ struct list_head *tcset_head;
|
|
+ struct list_head *task_pos;
|
|
+ struct list_head *tasks_head;
|
|
+ struct list_head *mg_tasks_head;
|
|
+ struct list_head *dying_tasks_head;
|
|
+ struct list_head *cur_tasks_head;
|
|
+ struct css_set *cur_cset;
|
|
+ struct css_set *cur_dcset;
|
|
+ struct task_struct *cur_task;
|
|
+ struct list_head iters_node;
|
|
+};
|
|
+
|
|
+struct cgrp_cset_link {
|
|
+ struct cgroup *cgrp;
|
|
+ struct css_set *cset;
|
|
+ struct list_head cset_link;
|
|
+ struct list_head cgrp_link;
|
|
+};
|
|
+
|
|
+struct cgroup_mgctx {
|
|
+ struct list_head preloaded_src_csets;
|
|
+ struct list_head preloaded_dst_csets;
|
|
+ struct cgroup_taskset tset;
|
|
+ u16 ss_mask;
|
|
+};
|
|
+
|
|
+struct cgroup_sb_opts {
|
|
+ u16 subsys_mask;
|
|
+ unsigned int flags;
|
|
+ char *release_agent;
|
|
+ bool cpuset_clone_children;
|
|
+ char *name;
|
|
+ bool none;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cgroup_root {
|
|
+ struct trace_entry ent;
|
|
+ int root;
|
|
+ u16 ss_mask;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cgroup {
|
|
+ struct trace_entry ent;
|
|
+ int root;
|
|
+ int id;
|
|
+ int level;
|
|
+ u32 __data_loc_path;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cgroup_migrate {
|
|
+ struct trace_entry ent;
|
|
+ int dst_root;
|
|
+ int dst_id;
|
|
+ int dst_level;
|
|
+ int pid;
|
|
+ u32 __data_loc_dst_path;
|
|
+ u32 __data_loc_comm;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_cgroup_root {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_cgroup {
|
|
+ u32 path;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_cgroup_migrate {
|
|
+ u32 dst_path;
|
|
+ u32 comm;
|
|
+};
|
|
+
|
|
+struct cgroupstats {
|
|
+ __u64 nr_sleeping;
|
|
+ __u64 nr_running;
|
|
+ __u64 nr_stopped;
|
|
+ __u64 nr_uninterruptible;
|
|
+ __u64 nr_io_wait;
|
|
+};
|
|
+
|
|
+enum cgroup_filetype {
|
|
+ CGROUP_FILE_PROCS = 0,
|
|
+ CGROUP_FILE_TASKS = 1,
|
|
+};
|
|
+
|
|
+struct cgroup_pidlist {
|
|
+ struct {
|
|
+ enum cgroup_filetype type;
|
|
+ struct pid_namespace *ns;
|
|
+ } key;
|
|
+ pid_t *list;
|
|
+ int length;
|
|
+ struct list_head links;
|
|
+ struct cgroup *owner;
|
|
+ struct delayed_work destroy_dwork;
|
|
+};
|
|
+
|
|
+enum freezer_state_flags {
|
|
+ CGROUP_FREEZER_ONLINE = 1,
|
|
+ CGROUP_FREEZING_SELF = 2,
|
|
+ CGROUP_FREEZING_PARENT = 4,
|
|
+ CGROUP_FROZEN = 8,
|
|
+ CGROUP_FREEZING = 6,
|
|
+};
|
|
+
|
|
+struct freezer {
|
|
+ struct cgroup_subsys_state css;
|
|
+ unsigned int state;
|
|
+};
|
|
+
|
|
+struct pids_cgroup {
|
|
+ struct cgroup_subsys_state css;
|
|
+ atomic64_t counter;
|
|
+ atomic64_t limit;
|
|
+ struct cgroup_file events_file;
|
|
+ atomic64_t events_limit;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ char *from;
|
|
+ char *to;
|
|
+} substring_t;
|
|
+
|
|
+enum rdmacg_resource_type {
|
|
+ RDMACG_RESOURCE_HCA_HANDLE = 0,
|
|
+ RDMACG_RESOURCE_HCA_OBJECT = 1,
|
|
+ RDMACG_RESOURCE_MAX = 2,
|
|
+};
|
|
+
|
|
+struct rdma_cgroup {
|
|
+ struct cgroup_subsys_state css;
|
|
+ struct list_head rpools;
|
|
+};
|
|
+
|
|
+struct rdmacg_device {
|
|
+ struct list_head dev_node;
|
|
+ struct list_head rpools;
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+enum rdmacg_file_type {
|
|
+ RDMACG_RESOURCE_TYPE_MAX = 0,
|
|
+ RDMACG_RESOURCE_TYPE_STAT = 1,
|
|
+};
|
|
+
|
|
+struct rdmacg_resource {
|
|
+ int max;
|
|
+ int usage;
|
|
+};
|
|
+
|
|
+struct rdmacg_resource_pool {
|
|
+ struct rdmacg_device *device;
|
|
+ struct rdmacg_resource resources[2];
|
|
+ struct list_head cg_node;
|
|
+ struct list_head dev_node;
|
|
+ u64 usage_sum;
|
|
+ int num_max_cnt;
|
|
+};
|
|
+
|
|
+struct fmeter {
|
|
+ int cnt;
|
|
+ int val;
|
|
+ time64_t time;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct cpuset {
|
|
+ struct cgroup_subsys_state css;
|
|
+ long unsigned int flags;
|
|
+ cpumask_var_t cpus_allowed;
|
|
+ nodemask_t mems_allowed;
|
|
+ cpumask_var_t effective_cpus;
|
|
+ nodemask_t effective_mems;
|
|
+ nodemask_t old_mems_allowed;
|
|
+ struct fmeter fmeter;
|
|
+ int attach_in_progress;
|
|
+ int pn;
|
|
+ int relax_domain_level;
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ CS_ONLINE = 0,
|
|
+ CS_CPU_EXCLUSIVE = 1,
|
|
+ CS_MEM_EXCLUSIVE = 2,
|
|
+ CS_MEM_HARDWALL = 3,
|
|
+ CS_MEMORY_MIGRATE = 4,
|
|
+ CS_SCHED_LOAD_BALANCE = 5,
|
|
+ CS_SPREAD_PAGE = 6,
|
|
+ CS_SPREAD_SLAB = 7,
|
|
+} cpuset_flagbits_t;
|
|
+
|
|
+struct cpuset_migrate_mm_work {
|
|
+ struct work_struct work;
|
|
+ struct mm_struct *mm;
|
|
+ nodemask_t from;
|
|
+ nodemask_t to;
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ FILE_MEMORY_MIGRATE = 0,
|
|
+ FILE_CPULIST = 1,
|
|
+ FILE_MEMLIST = 2,
|
|
+ FILE_EFFECTIVE_CPULIST = 3,
|
|
+ FILE_EFFECTIVE_MEMLIST = 4,
|
|
+ FILE_CPU_EXCLUSIVE = 5,
|
|
+ FILE_MEM_EXCLUSIVE = 6,
|
|
+ FILE_MEM_HARDWALL = 7,
|
|
+ FILE_SCHED_LOAD_BALANCE = 8,
|
|
+ FILE_SCHED_RELAX_DOMAIN_LEVEL = 9,
|
|
+ FILE_MEMORY_PRESSURE_ENABLED = 10,
|
|
+ FILE_MEMORY_PRESSURE = 11,
|
|
+ FILE_SPREAD_PAGE = 12,
|
|
+ FILE_SPREAD_SLAB = 13,
|
|
+} cpuset_filetype_t;
|
|
+
|
|
+struct key_preparsed_payload {
|
|
+ char *description;
|
|
+ union key_payload payload;
|
|
+ const void *data;
|
|
+ size_t datalen;
|
|
+ size_t quotalen;
|
|
+ time64_t expiry;
|
|
+};
|
|
+
|
|
+struct key_match_data {
|
|
+ bool (*cmp)(const struct key *, const struct key_match_data *);
|
|
+ const void *raw_data;
|
|
+ void *preparsed;
|
|
+ unsigned int lookup_type;
|
|
+};
|
|
+
|
|
+struct idmap_key {
|
|
+ bool map_up;
|
|
+ u32 id;
|
|
+ u32 count;
|
|
+};
|
|
+
|
|
+struct cpu_stop_done {
|
|
+ atomic_t nr_todo;
|
|
+ int ret;
|
|
+ struct completion completion;
|
|
+};
|
|
+
|
|
+struct cpu_stopper {
|
|
+ struct task_struct *thread;
|
|
+ raw_spinlock_t lock;
|
|
+ bool enabled;
|
|
+ struct list_head works;
|
|
+ struct cpu_stop_work stop_work;
|
|
+};
|
|
+
|
|
+enum multi_stop_state {
|
|
+ MULTI_STOP_NONE = 0,
|
|
+ MULTI_STOP_PREPARE = 1,
|
|
+ MULTI_STOP_DISABLE_IRQ = 2,
|
|
+ MULTI_STOP_RUN = 3,
|
|
+ MULTI_STOP_EXIT = 4,
|
|
+};
|
|
+
|
|
+struct multi_stop_data {
|
|
+ cpu_stop_fn_t fn;
|
|
+ void *data;
|
|
+ unsigned int num_threads;
|
|
+ const struct cpumask *active_cpus;
|
|
+ enum multi_stop_state state;
|
|
+ atomic_t thread_ack;
|
|
+};
|
|
+
|
|
+struct cpu_vfs_cap_data {
|
|
+ __u32 magic_etc;
|
|
+ kernel_cap_t permitted;
|
|
+ kernel_cap_t inheritable;
|
|
+};
|
|
+
|
|
+enum audit_nlgrps {
|
|
+ AUDIT_NLGRP_NONE = 0,
|
|
+ AUDIT_NLGRP_READLOG = 1,
|
|
+ __AUDIT_NLGRP_MAX = 2,
|
|
+};
|
|
+
|
|
+struct audit_status {
|
|
+ __u32 mask;
|
|
+ __u32 enabled;
|
|
+ __u32 failure;
|
|
+ __u32 pid;
|
|
+ __u32 rate_limit;
|
|
+ __u32 backlog_limit;
|
|
+ __u32 lost;
|
|
+ __u32 backlog;
|
|
+ union {
|
|
+ __u32 version;
|
|
+ __u32 feature_bitmap;
|
|
+ };
|
|
+ __u32 backlog_wait_time;
|
|
+};
|
|
+
|
|
+struct audit_features {
|
|
+ __u32 vers;
|
|
+ __u32 mask;
|
|
+ __u32 features;
|
|
+ __u32 lock;
|
|
+};
|
|
+
|
|
+struct audit_tty_status {
|
|
+ __u32 enabled;
|
|
+ __u32 log_passwd;
|
|
+};
|
|
+
|
|
+struct audit_sig_info {
|
|
+ uid_t uid;
|
|
+ pid_t pid;
|
|
+ char ctx[0];
|
|
+};
|
|
+
|
|
+struct net_generic {
|
|
+ union {
|
|
+ struct {
|
|
+ unsigned int len;
|
|
+ struct callback_head rcu;
|
|
+ } s;
|
|
+ void *ptr[0];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct scm_creds {
|
|
+ u32 pid;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+};
|
|
+
|
|
+struct netlink_skb_parms {
|
|
+ struct scm_creds creds;
|
|
+ __u32 portid;
|
|
+ __u32 dst_group;
|
|
+ __u32 flags;
|
|
+ struct sock *sk;
|
|
+ bool nsid_is_set;
|
|
+ int nsid;
|
|
+};
|
|
+
|
|
+struct netlink_kernel_cfg {
|
|
+ unsigned int groups;
|
|
+ unsigned int flags;
|
|
+ void (*input)(struct sk_buff *);
|
|
+ struct mutex *cb_mutex;
|
|
+ int (*bind)(struct net *, int);
|
|
+ void (*unbind)(struct net *, int);
|
|
+ bool (*compare)(struct net *, struct sock *);
|
|
+};
|
|
+
|
|
+struct audit_netlink_list {
|
|
+ __u32 portid;
|
|
+ struct net *net;
|
|
+ struct sk_buff_head q;
|
|
+};
|
|
+
|
|
+struct audit_net {
|
|
+ struct sock *sk;
|
|
+};
|
|
+
|
|
+struct auditd_connection {
|
|
+ struct pid *pid;
|
|
+ u32 portid;
|
|
+ struct net *net;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct audit_ctl_mutex {
|
|
+ struct mutex lock;
|
|
+ void *owner;
|
|
+};
|
|
+
|
|
+struct audit_buffer {
|
|
+ struct sk_buff *skb;
|
|
+ struct audit_context *ctx;
|
|
+ gfp_t gfp_mask;
|
|
+};
|
|
+
|
|
+struct audit_reply {
|
|
+ __u32 portid;
|
|
+ struct net *net;
|
|
+ struct sk_buff *skb;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Audit_equal = 0,
|
|
+ Audit_not_equal = 1,
|
|
+ Audit_bitmask = 2,
|
|
+ Audit_bittest = 3,
|
|
+ Audit_lt = 4,
|
|
+ Audit_gt = 5,
|
|
+ Audit_le = 6,
|
|
+ Audit_ge = 7,
|
|
+ Audit_bad = 8,
|
|
+};
|
|
+
|
|
+struct audit_rule_data {
|
|
+ __u32 flags;
|
|
+ __u32 action;
|
|
+ __u32 field_count;
|
|
+ __u32 mask[64];
|
|
+ __u32 fields[64];
|
|
+ __u32 values[64];
|
|
+ __u32 fieldflags[64];
|
|
+ __u32 buflen;
|
|
+ char buf[0];
|
|
+};
|
|
+
|
|
+struct audit_field;
|
|
+
|
|
+struct audit_watch;
|
|
+
|
|
+struct audit_tree;
|
|
+
|
|
+struct audit_fsnotify_mark;
|
|
+
|
|
+struct audit_krule {
|
|
+ u32 pflags;
|
|
+ u32 flags;
|
|
+ u32 listnr;
|
|
+ u32 action;
|
|
+ u32 mask[64];
|
|
+ u32 buflen;
|
|
+ u32 field_count;
|
|
+ char *filterkey;
|
|
+ struct audit_field *fields;
|
|
+ struct audit_field *arch_f;
|
|
+ struct audit_field *inode_f;
|
|
+ struct audit_watch *watch;
|
|
+ struct audit_tree *tree;
|
|
+ struct audit_fsnotify_mark *exe;
|
|
+ struct list_head rlist;
|
|
+ struct list_head list;
|
|
+ u64 prio;
|
|
+};
|
|
+
|
|
+struct audit_field {
|
|
+ u32 type;
|
|
+ union {
|
|
+ u32 val;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ struct {
|
|
+ char *lsm_str;
|
|
+ void *lsm_rule;
|
|
+ };
|
|
+ };
|
|
+ u32 op;
|
|
+};
|
|
+
|
|
+struct audit_entry {
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+ struct audit_krule rule;
|
|
+};
|
|
+
|
|
+struct audit_buffer___2;
|
|
+
|
|
+typedef int __kernel_key_t;
|
|
+
|
|
+typedef __kernel_key_t key_t;
|
|
+
|
|
+struct kern_ipc_perm {
|
|
+ spinlock_t lock;
|
|
+ bool deleted;
|
|
+ int id;
|
|
+ key_t key;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ kuid_t cuid;
|
|
+ kgid_t cgid;
|
|
+ umode_t mode;
|
|
+ long unsigned int seq;
|
|
+ void *security;
|
|
+ struct rhash_head khtnode;
|
|
+ struct callback_head rcu;
|
|
+ refcount_t refcount;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+typedef struct fsnotify_mark_connector *fsnotify_connp_t;
|
|
+
|
|
+struct fsnotify_mark_connector {
|
|
+ spinlock_t lock;
|
|
+ unsigned int type;
|
|
+ union {
|
|
+ fsnotify_connp_t *obj;
|
|
+ struct fsnotify_mark_connector *destroy_next;
|
|
+ };
|
|
+ struct hlist_head list;
|
|
+};
|
|
+
|
|
+enum fsnotify_obj_type {
|
|
+ FSNOTIFY_OBJ_TYPE_INODE = 0,
|
|
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT = 1,
|
|
+ FSNOTIFY_OBJ_TYPE_COUNT = 2,
|
|
+ FSNOTIFY_OBJ_TYPE_DETACHED = 2,
|
|
+};
|
|
+
|
|
+struct audit_aux_data {
|
|
+ struct audit_aux_data *next;
|
|
+ int type;
|
|
+};
|
|
+
|
|
+struct audit_chunk;
|
|
+
|
|
+struct audit_tree_refs {
|
|
+ struct audit_tree_refs *next;
|
|
+ struct audit_chunk *c[31];
|
|
+};
|
|
+
|
|
+struct audit_aux_data_pids {
|
|
+ struct audit_aux_data d;
|
|
+ pid_t target_pid[16];
|
|
+ kuid_t target_auid[16];
|
|
+ kuid_t target_uid[16];
|
|
+ unsigned int target_sessionid[16];
|
|
+ u32 target_sid[16];
|
|
+ char target_comm[256];
|
|
+ int pid_count;
|
|
+};
|
|
+
|
|
+struct audit_aux_data_bprm_fcaps {
|
|
+ struct audit_aux_data d;
|
|
+ struct audit_cap_data fcap;
|
|
+ unsigned int fcap_ver;
|
|
+ struct audit_cap_data old_pcap;
|
|
+ struct audit_cap_data new_pcap;
|
|
+};
|
|
+
|
|
+struct audit_parent;
|
|
+
|
|
+struct audit_watch {
|
|
+ refcount_t count;
|
|
+ dev_t dev;
|
|
+ char *path;
|
|
+ long unsigned int ino;
|
|
+ struct audit_parent *parent;
|
|
+ struct list_head wlist;
|
|
+ struct list_head rules;
|
|
+};
|
|
+
|
|
+struct fsnotify_group;
|
|
+
|
|
+struct fsnotify_iter_info;
|
|
+
|
|
+struct fsnotify_mark;
|
|
+
|
|
+struct fsnotify_event;
|
|
+
|
|
+struct fsnotify_ops {
|
|
+ int (*handle_event)(struct fsnotify_group *, struct inode *, u32, const void *, int, const unsigned char *, u32, struct fsnotify_iter_info *);
|
|
+ void (*free_group_priv)(struct fsnotify_group *);
|
|
+ void (*freeing_mark)(struct fsnotify_mark *, struct fsnotify_group *);
|
|
+ void (*free_event)(struct fsnotify_event *);
|
|
+ void (*free_mark)(struct fsnotify_mark *);
|
|
+};
|
|
+
|
|
+struct inotify_group_private_data {
|
|
+ spinlock_t idr_lock;
|
|
+ struct idr idr;
|
|
+ struct ucounts *ucounts;
|
|
+};
|
|
+
|
|
+struct fanotify_group_private_data {
|
|
+ struct list_head access_list;
|
|
+ wait_queue_head_t access_waitq;
|
|
+ int f_flags;
|
|
+ unsigned int max_marks;
|
|
+ struct user_struct *user;
|
|
+ bool audit;
|
|
+};
|
|
+
|
|
+struct fsnotify_group {
|
|
+ const struct fsnotify_ops *ops;
|
|
+ refcount_t refcnt;
|
|
+ spinlock_t notification_lock;
|
|
+ struct list_head notification_list;
|
|
+ wait_queue_head_t notification_waitq;
|
|
+ unsigned int q_len;
|
|
+ unsigned int max_events;
|
|
+ unsigned int priority;
|
|
+ bool shutdown;
|
|
+ struct mutex mark_mutex;
|
|
+ atomic_t num_marks;
|
|
+ atomic_t user_waits;
|
|
+ struct list_head marks_list;
|
|
+ struct fasync_struct *fsn_fa;
|
|
+ struct fsnotify_event *overflow_event;
|
|
+ struct mem_cgroup *memcg;
|
|
+ union {
|
|
+ void *private;
|
|
+ struct inotify_group_private_data inotify_data;
|
|
+ struct fanotify_group_private_data fanotify_data;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct fsnotify_iter_info {
|
|
+ struct fsnotify_mark *marks[2];
|
|
+ unsigned int report_mask;
|
|
+ int srcu_idx;
|
|
+};
|
|
+
|
|
+struct fsnotify_mark {
|
|
+ __u32 mask;
|
|
+ refcount_t refcnt;
|
|
+ struct fsnotify_group *group;
|
|
+ struct list_head g_list;
|
|
+ spinlock_t lock;
|
|
+ struct hlist_node obj_list;
|
|
+ struct fsnotify_mark_connector *connector;
|
|
+ __u32 ignored_mask;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct fsnotify_event {
|
|
+ struct list_head list;
|
|
+ struct inode *inode;
|
|
+ u32 mask;
|
|
+};
|
|
+
|
|
+struct audit_parent {
|
|
+ struct list_head watches;
|
|
+ struct fsnotify_mark mark;
|
|
+};
|
|
+
|
|
+struct audit_fsnotify_mark {
|
|
+ dev_t dev;
|
|
+ long unsigned int ino;
|
|
+ char *path;
|
|
+ struct fsnotify_mark mark;
|
|
+ struct audit_krule *rule;
|
|
+};
|
|
+
|
|
+struct audit_chunk___2;
|
|
+
|
|
+struct audit_tree {
|
|
+ refcount_t count;
|
|
+ int goner;
|
|
+ struct audit_chunk___2 *root;
|
|
+ struct list_head chunks;
|
|
+ struct list_head rules;
|
|
+ struct list_head list;
|
|
+ struct list_head same_root;
|
|
+ struct callback_head head;
|
|
+ char pathname[0];
|
|
+};
|
|
+
|
|
+struct node___2 {
|
|
+ struct list_head list;
|
|
+ struct audit_tree *owner;
|
|
+ unsigned int index;
|
|
+};
|
|
+
|
|
+struct audit_chunk___2 {
|
|
+ struct list_head hash;
|
|
+ long unsigned int key;
|
|
+ struct fsnotify_mark mark;
|
|
+ struct list_head trees;
|
|
+ int dead;
|
|
+ int count;
|
|
+ atomic_long_t refs;
|
|
+ struct callback_head head;
|
|
+ struct node___2 owners[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ HASH_SIZE = 128,
|
|
+};
|
|
+
|
|
+struct kprobe_blacklist_entry {
|
|
+ struct list_head list;
|
|
+ long unsigned int start_addr;
|
|
+ long unsigned int end_addr;
|
|
+};
|
|
+
|
|
+struct kprobe_insn_page {
|
|
+ struct list_head list;
|
|
+ kprobe_opcode_t *insns;
|
|
+ struct kprobe_insn_cache *cache;
|
|
+ int nused;
|
|
+ int ngarbage;
|
|
+ char slot_used[0];
|
|
+};
|
|
+
|
|
+enum kprobe_slot_state {
|
|
+ SLOT_CLEAN = 0,
|
|
+ SLOT_DIRTY = 1,
|
|
+ SLOT_USED = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ KDB_NOT_INITIALIZED = 0,
|
|
+ KDB_INIT_EARLY = 1,
|
|
+ KDB_INIT_FULL = 2,
|
|
+};
|
|
+
|
|
+struct kgdb_state {
|
|
+ int ex_vector;
|
|
+ int signo;
|
|
+ int err_code;
|
|
+ int cpu;
|
|
+ int pass_exception;
|
|
+ long unsigned int thr_query;
|
|
+ long unsigned int threadid;
|
|
+ long int kgdb_usethreadid;
|
|
+ struct pt_regs *linux_regs;
|
|
+ atomic_t *send_ready;
|
|
+};
|
|
+
|
|
+struct debuggerinfo_struct {
|
|
+ void *debuggerinfo;
|
|
+ struct task_struct *task;
|
|
+ int exception_state;
|
|
+ int ret_state;
|
|
+ int irq_depth;
|
|
+ int enter_kgdb;
|
|
+};
|
|
+
|
|
+struct _kdb_bp {
|
|
+ long unsigned int bp_addr;
|
|
+ unsigned int bp_free: 1;
|
|
+ unsigned int bp_enabled: 1;
|
|
+ unsigned int bp_type: 4;
|
|
+ unsigned int bp_installed: 1;
|
|
+ unsigned int bp_delay: 1;
|
|
+ unsigned int bp_delayed: 1;
|
|
+ unsigned int bph_length;
|
|
+};
|
|
+
|
|
+typedef struct _kdb_bp kdb_bp_t;
|
|
+
|
|
+typedef enum {
|
|
+ KDB_ENABLE_ALL = 1,
|
|
+ KDB_ENABLE_MEM_READ = 2,
|
|
+ KDB_ENABLE_MEM_WRITE = 4,
|
|
+ KDB_ENABLE_REG_READ = 8,
|
|
+ KDB_ENABLE_REG_WRITE = 16,
|
|
+ KDB_ENABLE_INSPECT = 32,
|
|
+ KDB_ENABLE_FLOW_CTRL = 64,
|
|
+ KDB_ENABLE_SIGNAL = 128,
|
|
+ KDB_ENABLE_REBOOT = 256,
|
|
+ KDB_ENABLE_ALWAYS_SAFE = 512,
|
|
+ KDB_ENABLE_MASK = 1023,
|
|
+ KDB_ENABLE_ALL_NO_ARGS = 1024,
|
|
+ KDB_ENABLE_MEM_READ_NO_ARGS = 2048,
|
|
+ KDB_ENABLE_MEM_WRITE_NO_ARGS = 4096,
|
|
+ KDB_ENABLE_REG_READ_NO_ARGS = 8192,
|
|
+ KDB_ENABLE_REG_WRITE_NO_ARGS = 16384,
|
|
+ KDB_ENABLE_INSPECT_NO_ARGS = 32768,
|
|
+ KDB_ENABLE_FLOW_CTRL_NO_ARGS = 65536,
|
|
+ KDB_ENABLE_SIGNAL_NO_ARGS = 131072,
|
|
+ KDB_ENABLE_REBOOT_NO_ARGS = 262144,
|
|
+ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = 524288,
|
|
+ KDB_ENABLE_MASK_NO_ARGS = 1047552,
|
|
+ KDB_REPEAT_NO_ARGS = 1073741824,
|
|
+ KDB_REPEAT_WITH_ARGS = -2147483648,
|
|
+} kdb_cmdflags_t;
|
|
+
|
|
+typedef int (*kdb_func_t)(int, const char **);
|
|
+
|
|
+typedef enum {
|
|
+ KDB_REASON_ENTER = 1,
|
|
+ KDB_REASON_ENTER_SLAVE = 2,
|
|
+ KDB_REASON_BREAK = 3,
|
|
+ KDB_REASON_DEBUG = 4,
|
|
+ KDB_REASON_OOPS = 5,
|
|
+ KDB_REASON_SWITCH = 6,
|
|
+ KDB_REASON_KEYBOARD = 7,
|
|
+ KDB_REASON_NMI = 8,
|
|
+ KDB_REASON_RECURSE = 9,
|
|
+ KDB_REASON_SSTEP = 10,
|
|
+ KDB_REASON_SYSTEM_NMI = 11,
|
|
+} kdb_reason_t;
|
|
+
|
|
+struct __ksymtab {
|
|
+ long unsigned int value;
|
|
+ const char *mod_name;
|
|
+ long unsigned int mod_start;
|
|
+ long unsigned int mod_end;
|
|
+ const char *sec_name;
|
|
+ long unsigned int sec_start;
|
|
+ long unsigned int sec_end;
|
|
+ const char *sym_name;
|
|
+ long unsigned int sym_start;
|
|
+ long unsigned int sym_end;
|
|
+};
|
|
+
|
|
+typedef struct __ksymtab kdb_symtab_t;
|
|
+
|
|
+struct _kdbtab {
|
|
+ char *cmd_name;
|
|
+ kdb_func_t cmd_func;
|
|
+ char *cmd_usage;
|
|
+ char *cmd_help;
|
|
+ short int cmd_minlen;
|
|
+ kdb_cmdflags_t cmd_flags;
|
|
+};
|
|
+
|
|
+typedef struct _kdbtab kdbtab_t;
|
|
+
|
|
+typedef enum {
|
|
+ KDB_DB_BPT = 0,
|
|
+ KDB_DB_SS = 1,
|
|
+ KDB_DB_SSBPT = 2,
|
|
+ KDB_DB_NOBPT = 3,
|
|
+} kdb_dbtrap_t;
|
|
+
|
|
+struct _kdbmsg {
|
|
+ int km_diag;
|
|
+ char *km_msg;
|
|
+};
|
|
+
|
|
+typedef struct _kdbmsg kdbmsg_t;
|
|
+
|
|
+struct defcmd_set {
|
|
+ int count;
|
|
+ int usable;
|
|
+ char *name;
|
|
+ char *usage;
|
|
+ char *help;
|
|
+ char **command;
|
|
+};
|
|
+
|
|
+struct debug_alloc_header {
|
|
+ u32 next;
|
|
+ u32 size;
|
|
+ void *caller;
|
|
+};
|
|
+
|
|
+typedef short unsigned int u_short;
|
|
+
|
|
+typedef struct perf_event *pto_T_____24;
|
|
+
|
|
+struct seccomp_filter {
|
|
+ refcount_t usage;
|
|
+ bool log;
|
|
+ struct seccomp_filter *prev;
|
|
+ struct bpf_prog *prog;
|
|
+};
|
|
+
|
|
+struct seccomp_metadata {
|
|
+ __u64 filter_off;
|
|
+ __u64 flags;
|
|
+};
|
|
+
|
|
+struct sock_fprog {
|
|
+ short unsigned int len;
|
|
+ struct sock_filter *filter;
|
|
+};
|
|
+
|
|
+struct compat_sock_fprog {
|
|
+ u16 len;
|
|
+ compat_uptr_t filter;
|
|
+};
|
|
+
|
|
+struct seccomp_log_name {
|
|
+ u32 log;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct rchan;
|
|
+
|
|
+struct rchan_buf {
|
|
+ void *start;
|
|
+ void *data;
|
|
+ size_t offset;
|
|
+ size_t subbufs_produced;
|
|
+ size_t subbufs_consumed;
|
|
+ struct rchan *chan;
|
|
+ wait_queue_head_t read_wait;
|
|
+ struct irq_work wakeup_work;
|
|
+ struct dentry *dentry;
|
|
+ struct kref kref;
|
|
+ struct page **page_array;
|
|
+ unsigned int page_count;
|
|
+ unsigned int finalized;
|
|
+ size_t *padding;
|
|
+ size_t prev_padding;
|
|
+ size_t bytes_consumed;
|
|
+ size_t early_bytes;
|
|
+ unsigned int cpu;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct rchan_callbacks;
|
|
+
|
|
+struct rchan {
|
|
+ u32 version;
|
|
+ size_t subbuf_size;
|
|
+ size_t n_subbufs;
|
|
+ size_t alloc_size;
|
|
+ struct rchan_callbacks *cb;
|
|
+ struct kref kref;
|
|
+ void *private_data;
|
|
+ size_t last_toobig;
|
|
+ struct rchan_buf **buf;
|
|
+ int is_global;
|
|
+ struct list_head list;
|
|
+ struct dentry *parent;
|
|
+ int has_base_filename;
|
|
+ char base_filename[255];
|
|
+};
|
|
+
|
|
+struct rchan_callbacks {
|
|
+ int (*subbuf_start)(struct rchan_buf *, void *, void *, size_t);
|
|
+ void (*buf_mapped)(struct rchan_buf *, struct file *);
|
|
+ void (*buf_unmapped)(struct rchan_buf *, struct file *);
|
|
+ struct dentry * (*create_buf_file)(const char *, struct dentry *, umode_t, struct rchan_buf *, int *);
|
|
+ int (*remove_buf_file)(struct dentry *);
|
|
+};
|
|
+
|
|
+struct partial_page {
|
|
+ unsigned int offset;
|
|
+ unsigned int len;
|
|
+ long unsigned int private;
|
|
+};
|
|
+
|
|
+struct splice_pipe_desc {
|
|
+ struct page **pages;
|
|
+ struct partial_page *partial;
|
|
+ int nr_pages;
|
|
+ unsigned int nr_pages_max;
|
|
+ const struct pipe_buf_operations *ops;
|
|
+ void (*spd_release)(struct splice_pipe_desc *, unsigned int);
|
|
+};
|
|
+
|
|
+struct rchan_percpu_buf_dispatcher {
|
|
+ struct rchan_buf *buf;
|
|
+ struct dentry *dentry;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TASKSTATS_TYPE_UNSPEC = 0,
|
|
+ TASKSTATS_TYPE_PID = 1,
|
|
+ TASKSTATS_TYPE_TGID = 2,
|
|
+ TASKSTATS_TYPE_STATS = 3,
|
|
+ TASKSTATS_TYPE_AGGR_PID = 4,
|
|
+ TASKSTATS_TYPE_AGGR_TGID = 5,
|
|
+ TASKSTATS_TYPE_NULL = 6,
|
|
+ __TASKSTATS_TYPE_MAX = 7,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TASKSTATS_CMD_ATTR_UNSPEC = 0,
|
|
+ TASKSTATS_CMD_ATTR_PID = 1,
|
|
+ TASKSTATS_CMD_ATTR_TGID = 2,
|
|
+ TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 3,
|
|
+ TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 4,
|
|
+ __TASKSTATS_CMD_ATTR_MAX = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CGROUPSTATS_CMD_UNSPEC = 3,
|
|
+ CGROUPSTATS_CMD_GET = 4,
|
|
+ CGROUPSTATS_CMD_NEW = 5,
|
|
+ __CGROUPSTATS_CMD_MAX = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CGROUPSTATS_TYPE_UNSPEC = 0,
|
|
+ CGROUPSTATS_TYPE_CGROUP_STATS = 1,
|
|
+ __CGROUPSTATS_TYPE_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CGROUPSTATS_CMD_ATTR_UNSPEC = 0,
|
|
+ CGROUPSTATS_CMD_ATTR_FD = 1,
|
|
+ __CGROUPSTATS_CMD_ATTR_MAX = 2,
|
|
+};
|
|
+
|
|
+struct genlmsghdr {
|
|
+ __u8 cmd;
|
|
+ __u8 version;
|
|
+ __u16 reserved;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLA_UNSPEC = 0,
|
|
+ NLA_U8 = 1,
|
|
+ NLA_U16 = 2,
|
|
+ NLA_U32 = 3,
|
|
+ NLA_U64 = 4,
|
|
+ NLA_STRING = 5,
|
|
+ NLA_FLAG = 6,
|
|
+ NLA_MSECS = 7,
|
|
+ NLA_NESTED = 8,
|
|
+ NLA_NESTED_COMPAT = 9,
|
|
+ NLA_NUL_STRING = 10,
|
|
+ NLA_BINARY = 11,
|
|
+ NLA_S8 = 12,
|
|
+ NLA_S16 = 13,
|
|
+ NLA_S32 = 14,
|
|
+ NLA_S64 = 15,
|
|
+ NLA_BITFIELD32 = 16,
|
|
+ __NLA_TYPE_MAX = 17,
|
|
+};
|
|
+
|
|
+struct genl_multicast_group {
|
|
+ char name[16];
|
|
+};
|
|
+
|
|
+struct genl_ops;
|
|
+
|
|
+struct genl_info;
|
|
+
|
|
+struct genl_family {
|
|
+ int id;
|
|
+ unsigned int hdrsize;
|
|
+ char name[16];
|
|
+ unsigned int version;
|
|
+ unsigned int maxattr;
|
|
+ bool netnsok;
|
|
+ bool parallel_ops;
|
|
+ int (*pre_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *);
|
|
+ void (*post_doit)(const struct genl_ops *, struct sk_buff *, struct genl_info *);
|
|
+ int (*mcast_bind)(struct net *, int);
|
|
+ void (*mcast_unbind)(struct net *, int);
|
|
+ struct nlattr **attrbuf;
|
|
+ const struct genl_ops *ops;
|
|
+ const struct genl_multicast_group *mcgrps;
|
|
+ unsigned int n_ops;
|
|
+ unsigned int n_mcgrps;
|
|
+ unsigned int mcgrp_offset;
|
|
+ struct module *module;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct genl_ops {
|
|
+ const struct nla_policy *policy;
|
|
+ int (*doit)(struct sk_buff *, struct genl_info *);
|
|
+ int (*start)(struct netlink_callback *);
|
|
+ int (*dumpit)(struct sk_buff *, struct netlink_callback *);
|
|
+ int (*done)(struct netlink_callback *);
|
|
+ u8 cmd;
|
|
+ u8 internal_flags;
|
|
+ u8 flags;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+struct genl_info {
|
|
+ u32 snd_seq;
|
|
+ u32 snd_portid;
|
|
+ struct nlmsghdr *nlhdr;
|
|
+ struct genlmsghdr *genlhdr;
|
|
+ void *userhdr;
|
|
+ struct nlattr **attrs;
|
|
+ possible_net_t _net;
|
|
+ void *user_ptr[2];
|
|
+ struct netlink_ext_ack *extack;
|
|
+};
|
|
+
|
|
+struct listener {
|
|
+ struct list_head list;
|
|
+ pid_t pid;
|
|
+ char valid;
|
|
+};
|
|
+
|
|
+struct listener_list {
|
|
+ struct rw_semaphore sem;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+enum actions {
|
|
+ REGISTER = 0,
|
|
+ DEREGISTER = 1,
|
|
+ CPU_DONT_CARE = 2,
|
|
+};
|
|
+
|
|
+struct tp_module {
|
|
+ struct list_head list;
|
|
+ struct module *mod;
|
|
+};
|
|
+
|
|
+struct tp_probes {
|
|
+ struct callback_head rcu;
|
|
+ struct tracepoint_func probes[0];
|
|
+};
|
|
+
|
|
+struct ftrace_hash {
|
|
+ long unsigned int size_bits;
|
|
+ struct hlist_head *buckets;
|
|
+ long unsigned int count;
|
|
+ long unsigned int flags;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FTRACE_UPDATE_CALLS = 1,
|
|
+ FTRACE_DISABLE_CALLS = 2,
|
|
+ FTRACE_UPDATE_TRACE_FUNC = 4,
|
|
+ FTRACE_START_FUNC_RET = 8,
|
|
+ FTRACE_STOP_FUNC_RET = 16,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FTRACE_ITER_FILTER = 1,
|
|
+ FTRACE_ITER_NOTRACE = 2,
|
|
+ FTRACE_ITER_PRINTALL = 4,
|
|
+ FTRACE_ITER_DO_PROBES = 8,
|
|
+ FTRACE_ITER_PROBE = 16,
|
|
+ FTRACE_ITER_MOD = 32,
|
|
+ FTRACE_ITER_ENABLED = 64,
|
|
+};
|
|
+
|
|
+struct prog_entry;
|
|
+
|
|
+struct event_filter {
|
|
+ struct prog_entry *prog;
|
|
+ char *filter_string;
|
|
+};
|
|
+
|
|
+struct trace_array_cpu;
|
|
+
|
|
+struct trace_buffer {
|
|
+ struct trace_array *tr;
|
|
+ struct ring_buffer *buffer;
|
|
+ struct trace_array_cpu *data;
|
|
+ u64 time_start;
|
|
+ int cpu;
|
|
+};
|
|
+
|
|
+struct trace_pid_list;
|
|
+
|
|
+struct trace_options;
|
|
+
|
|
+struct trace_array {
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+ struct trace_buffer trace_buffer;
|
|
+ struct trace_buffer max_buffer;
|
|
+ bool allocated_snapshot;
|
|
+ long unsigned int max_latency;
|
|
+ struct trace_pid_list *filtered_pids;
|
|
+ arch_spinlock_t max_lock;
|
|
+ int buffer_disabled;
|
|
+ int sys_refcount_enter;
|
|
+ int sys_refcount_exit;
|
|
+ struct trace_event_file *enter_syscall_files[335];
|
|
+ struct trace_event_file *exit_syscall_files[335];
|
|
+ int stop_count;
|
|
+ int clock_id;
|
|
+ int nr_topts;
|
|
+ bool clear_trace;
|
|
+ struct tracer *current_trace;
|
|
+ unsigned int trace_flags;
|
|
+ unsigned char trace_flags_index[32];
|
|
+ unsigned int flags;
|
|
+ raw_spinlock_t start_lock;
|
|
+ struct dentry *dir;
|
|
+ struct dentry *options;
|
|
+ struct dentry *percpu_dir;
|
|
+ struct dentry *event_dir;
|
|
+ struct trace_options *topts;
|
|
+ struct list_head systems;
|
|
+ struct list_head events;
|
|
+ struct trace_event_file *trace_marker_file;
|
|
+ cpumask_var_t tracing_cpumask;
|
|
+ int ref;
|
|
+ struct ftrace_ops *ops;
|
|
+ struct trace_pid_list *function_pids;
|
|
+ struct list_head func_probes;
|
|
+ struct list_head mod_trace;
|
|
+ struct list_head mod_notrace;
|
|
+ int function_enabled;
|
|
+ int time_stamp_abs_ref;
|
|
+ struct list_head hist_vars;
|
|
+};
|
|
+
|
|
+struct tracer_flags;
|
|
+
|
|
+struct tracer {
|
|
+ const char *name;
|
|
+ int (*init)(struct trace_array *);
|
|
+ void (*reset)(struct trace_array *);
|
|
+ void (*start)(struct trace_array *);
|
|
+ void (*stop)(struct trace_array *);
|
|
+ int (*update_thresh)(struct trace_array *);
|
|
+ void (*open)(struct trace_iterator *);
|
|
+ void (*pipe_open)(struct trace_iterator *);
|
|
+ void (*close)(struct trace_iterator *);
|
|
+ void (*pipe_close)(struct trace_iterator *);
|
|
+ ssize_t (*read)(struct trace_iterator *, struct file *, char *, size_t, loff_t *);
|
|
+ ssize_t (*splice_read)(struct trace_iterator *, struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
|
|
+ void (*print_header)(struct seq_file *);
|
|
+ enum print_line_t (*print_line)(struct trace_iterator *);
|
|
+ int (*set_flag)(struct trace_array *, u32, u32, int);
|
|
+ int (*flag_changed)(struct trace_array *, u32, int);
|
|
+ struct tracer *next;
|
|
+ struct tracer_flags *flags;
|
|
+ int enabled;
|
|
+ int ref;
|
|
+ bool print_max;
|
|
+ bool allow_instances;
|
|
+ bool use_max_tr;
|
|
+ bool noboot;
|
|
+};
|
|
+
|
|
+struct event_subsystem;
|
|
+
|
|
+struct trace_subsystem_dir {
|
|
+ struct list_head list;
|
|
+ struct event_subsystem *subsystem;
|
|
+ struct trace_array *tr;
|
|
+ struct dentry *entry;
|
|
+ int ref_count;
|
|
+ int nr_events;
|
|
+};
|
|
+
|
|
+struct trace_array_cpu {
|
|
+ atomic_t disabled;
|
|
+ void *buffer_page;
|
|
+ long unsigned int entries;
|
|
+ long unsigned int saved_latency;
|
|
+ long unsigned int critical_start;
|
|
+ long unsigned int critical_end;
|
|
+ long unsigned int critical_sequence;
|
|
+ long unsigned int nice;
|
|
+ long unsigned int policy;
|
|
+ long unsigned int rt_priority;
|
|
+ long unsigned int skipped_entries;
|
|
+ u64 preempt_timestamp;
|
|
+ pid_t pid;
|
|
+ kuid_t uid;
|
|
+ char comm[16];
|
|
+ bool ignore_pid;
|
|
+ bool ftrace_ignore_pid;
|
|
+};
|
|
+
|
|
+struct trace_option_dentry;
|
|
+
|
|
+struct trace_options {
|
|
+ struct tracer *tracer;
|
|
+ struct trace_option_dentry *topts;
|
|
+};
|
|
+
|
|
+struct tracer_opt;
|
|
+
|
|
+struct trace_option_dentry {
|
|
+ struct tracer_opt *opt;
|
|
+ struct tracer_flags *flags;
|
|
+ struct trace_array *tr;
|
|
+ struct dentry *entry;
|
|
+};
|
|
+
|
|
+struct trace_pid_list {
|
|
+ int pid_max;
|
|
+ long unsigned int *pids;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TRACE_ARRAY_FL_GLOBAL = 1,
|
|
+};
|
|
+
|
|
+struct tracer_opt {
|
|
+ const char *name;
|
|
+ u32 bit;
|
|
+};
|
|
+
|
|
+struct tracer_flags {
|
|
+ u32 val;
|
|
+ struct tracer_opt *opts;
|
|
+ struct tracer *trace;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TRACE_BUFFER_BIT = 0,
|
|
+ TRACE_BUFFER_NMI_BIT = 1,
|
|
+ TRACE_BUFFER_IRQ_BIT = 2,
|
|
+ TRACE_BUFFER_SIRQ_BIT = 3,
|
|
+ TRACE_FTRACE_BIT = 4,
|
|
+ TRACE_FTRACE_NMI_BIT = 5,
|
|
+ TRACE_FTRACE_IRQ_BIT = 6,
|
|
+ TRACE_FTRACE_SIRQ_BIT = 7,
|
|
+ TRACE_INTERNAL_BIT = 8,
|
|
+ TRACE_INTERNAL_NMI_BIT = 9,
|
|
+ TRACE_INTERNAL_IRQ_BIT = 10,
|
|
+ TRACE_INTERNAL_SIRQ_BIT = 11,
|
|
+ TRACE_BRANCH_BIT = 12,
|
|
+ TRACE_IRQ_BIT = 13,
|
|
+ TRACE_GRAPH_BIT = 14,
|
|
+ TRACE_GRAPH_DEPTH_START_BIT = 15,
|
|
+ TRACE_GRAPH_DEPTH_END_BIT = 16,
|
|
+ TRACE_TRANSITION_BIT = 17,
|
|
+};
|
|
+
|
|
+struct ftrace_mod_load {
|
|
+ struct list_head list;
|
|
+ char *func;
|
|
+ char *module;
|
|
+ int enable;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FTRACE_HASH_FL_MOD = 1,
|
|
+};
|
|
+
|
|
+struct ftrace_func_command {
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+ int (*func)(struct trace_array *, struct ftrace_hash *, char *, char *, char *, int);
|
|
+};
|
|
+
|
|
+struct ftrace_probe_ops {
|
|
+ void (*func)(long unsigned int, long unsigned int, struct trace_array *, struct ftrace_probe_ops *, void *);
|
|
+ int (*init)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *, void **);
|
|
+ void (*free)(struct ftrace_probe_ops *, struct trace_array *, long unsigned int, void *);
|
|
+ int (*print)(struct seq_file *, long unsigned int, struct ftrace_probe_ops *, void *);
|
|
+};
|
|
+
|
|
+typedef int (*ftrace_mapper_func)(void *);
|
|
+
|
|
+struct trace_parser {
|
|
+ bool cont;
|
|
+ char *buffer;
|
|
+ unsigned int idx;
|
|
+ unsigned int size;
|
|
+};
|
|
+
|
|
+enum trace_iterator_bits {
|
|
+ TRACE_ITER_PRINT_PARENT_BIT = 0,
|
|
+ TRACE_ITER_SYM_OFFSET_BIT = 1,
|
|
+ TRACE_ITER_SYM_ADDR_BIT = 2,
|
|
+ TRACE_ITER_VERBOSE_BIT = 3,
|
|
+ TRACE_ITER_RAW_BIT = 4,
|
|
+ TRACE_ITER_HEX_BIT = 5,
|
|
+ TRACE_ITER_BIN_BIT = 6,
|
|
+ TRACE_ITER_BLOCK_BIT = 7,
|
|
+ TRACE_ITER_PRINTK_BIT = 8,
|
|
+ TRACE_ITER_ANNOTATE_BIT = 9,
|
|
+ TRACE_ITER_USERSTACKTRACE_BIT = 10,
|
|
+ TRACE_ITER_SYM_USEROBJ_BIT = 11,
|
|
+ TRACE_ITER_PRINTK_MSGONLY_BIT = 12,
|
|
+ TRACE_ITER_CONTEXT_INFO_BIT = 13,
|
|
+ TRACE_ITER_LATENCY_FMT_BIT = 14,
|
|
+ TRACE_ITER_RECORD_CMD_BIT = 15,
|
|
+ TRACE_ITER_RECORD_TGID_BIT = 16,
|
|
+ TRACE_ITER_OVERWRITE_BIT = 17,
|
|
+ TRACE_ITER_STOP_ON_FREE_BIT = 18,
|
|
+ TRACE_ITER_IRQ_INFO_BIT = 19,
|
|
+ TRACE_ITER_MARKERS_BIT = 20,
|
|
+ TRACE_ITER_EVENT_FORK_BIT = 21,
|
|
+ TRACE_ITER_FUNCTION_BIT = 22,
|
|
+ TRACE_ITER_FUNC_FORK_BIT = 23,
|
|
+ TRACE_ITER_DISPLAY_GRAPH_BIT = 24,
|
|
+ TRACE_ITER_STACKTRACE_BIT = 25,
|
|
+ TRACE_ITER_LAST_BIT = 26,
|
|
+};
|
|
+
|
|
+struct event_subsystem {
|
|
+ struct list_head list;
|
|
+ const char *name;
|
|
+ struct event_filter *filter;
|
|
+ int ref_count;
|
|
+};
|
|
+
|
|
+enum regex_type {
|
|
+ MATCH_FULL = 0,
|
|
+ MATCH_FRONT_ONLY = 1,
|
|
+ MATCH_MIDDLE_ONLY = 2,
|
|
+ MATCH_END_ONLY = 3,
|
|
+ MATCH_GLOB = 4,
|
|
+};
|
|
+
|
|
+struct tracer_stat {
|
|
+ const char *name;
|
|
+ void * (*stat_start)(struct tracer_stat *);
|
|
+ void * (*stat_next)(void *, int);
|
|
+ int (*stat_cmp)(void *, void *);
|
|
+ int (*stat_show)(struct seq_file *, void *);
|
|
+ void (*stat_release)(void *);
|
|
+ int (*stat_headers)(struct seq_file *);
|
|
+};
|
|
+
|
|
+struct ftrace_profile {
|
|
+ struct hlist_node node;
|
|
+ long unsigned int ip;
|
|
+ long unsigned int counter;
|
|
+ long long unsigned int time;
|
|
+ long long unsigned int time_squared;
|
|
+};
|
|
+
|
|
+struct ftrace_profile_page {
|
|
+ struct ftrace_profile_page *next;
|
|
+ long unsigned int index;
|
|
+ struct ftrace_profile records[0];
|
|
+};
|
|
+
|
|
+struct ftrace_profile_stat {
|
|
+ atomic_t disabled;
|
|
+ struct hlist_head *hash;
|
|
+ struct ftrace_profile_page *pages;
|
|
+ struct ftrace_profile_page *start;
|
|
+ struct tracer_stat stat;
|
|
+};
|
|
+
|
|
+struct ftrace_func_entry {
|
|
+ struct hlist_node hlist;
|
|
+ long unsigned int ip;
|
|
+};
|
|
+
|
|
+struct ftrace_func_probe {
|
|
+ struct ftrace_probe_ops *probe_ops;
|
|
+ struct ftrace_ops ops;
|
|
+ struct trace_array *tr;
|
|
+ struct list_head list;
|
|
+ void *data;
|
|
+ int ref;
|
|
+};
|
|
+
|
|
+struct ftrace_page {
|
|
+ struct ftrace_page *next;
|
|
+ struct dyn_ftrace *records;
|
|
+ int index;
|
|
+ int size;
|
|
+};
|
|
+
|
|
+struct ftrace_rec_iter___2 {
|
|
+ struct ftrace_page *pg;
|
|
+ int index;
|
|
+};
|
|
+
|
|
+struct ftrace_iterator {
|
|
+ loff_t pos;
|
|
+ loff_t func_pos;
|
|
+ loff_t mod_pos;
|
|
+ struct ftrace_page *pg;
|
|
+ struct dyn_ftrace *func;
|
|
+ struct ftrace_func_probe *probe;
|
|
+ struct ftrace_func_entry *probe_entry;
|
|
+ struct trace_parser parser;
|
|
+ struct ftrace_hash *hash;
|
|
+ struct ftrace_ops *ops;
|
|
+ struct trace_array *tr;
|
|
+ struct list_head *mod_list;
|
|
+ int pidx;
|
|
+ int idx;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct ftrace_glob {
|
|
+ char *search;
|
|
+ unsigned int len;
|
|
+ int type;
|
|
+};
|
|
+
|
|
+struct ftrace_func_map {
|
|
+ struct ftrace_func_entry entry;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct ftrace_func_mapper {
|
|
+ struct ftrace_hash hash;
|
|
+};
|
|
+
|
|
+enum graph_filter_type {
|
|
+ GRAPH_FILTER_NOTRACE = 0,
|
|
+ GRAPH_FILTER_FUNCTION = 1,
|
|
+};
|
|
+
|
|
+struct ftrace_graph_data {
|
|
+ struct ftrace_hash *hash;
|
|
+ struct ftrace_func_entry *entry;
|
|
+ int idx;
|
|
+ enum graph_filter_type type;
|
|
+ struct ftrace_hash *new_hash;
|
|
+ const struct seq_operations___2 *seq_ops;
|
|
+ struct trace_parser parser;
|
|
+};
|
|
+
|
|
+struct ftrace_mod_func {
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+ long unsigned int ip;
|
|
+ unsigned int size;
|
|
+};
|
|
+
|
|
+struct ftrace_mod_map {
|
|
+ struct callback_head rcu;
|
|
+ struct list_head list;
|
|
+ struct module *mod;
|
|
+ long unsigned int start_addr;
|
|
+ long unsigned int end_addr;
|
|
+ struct list_head funcs;
|
|
+ unsigned int num_funcs;
|
|
+};
|
|
+
|
|
+struct ftrace_init_func {
|
|
+ struct list_head list;
|
|
+ long unsigned int ip;
|
|
+};
|
|
+
|
|
+enum ring_buffer_type {
|
|
+ RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
|
|
+ RINGBUF_TYPE_PADDING = 29,
|
|
+ RINGBUF_TYPE_TIME_EXTEND = 30,
|
|
+ RINGBUF_TYPE_TIME_STAMP = 31,
|
|
+};
|
|
+
|
|
+enum ring_buffer_flags {
|
|
+ RB_FL_OVERWRITE = 1,
|
|
+};
|
|
+
|
|
+struct rb_irq_work {
|
|
+ struct irq_work work;
|
|
+ wait_queue_head_t waiters;
|
|
+ wait_queue_head_t full_waiters;
|
|
+ bool waiters_pending;
|
|
+ bool full_waiters_pending;
|
|
+ bool wakeup_full;
|
|
+};
|
|
+
|
|
+struct ring_buffer_per_cpu;
|
|
+
|
|
+struct ring_buffer {
|
|
+ unsigned int flags;
|
|
+ int cpus;
|
|
+ atomic_t record_disabled;
|
|
+ atomic_t resize_disabled;
|
|
+ cpumask_var_t cpumask;
|
|
+ struct lock_class_key *reader_lock_key;
|
|
+ struct mutex mutex;
|
|
+ struct ring_buffer_per_cpu **buffers;
|
|
+ struct hlist_node node;
|
|
+ u64 (*clock)();
|
|
+ struct rb_irq_work irq_work;
|
|
+ bool time_stamp_abs;
|
|
+};
|
|
+
|
|
+struct buffer_page;
|
|
+
|
|
+struct ring_buffer_iter {
|
|
+ struct ring_buffer_per_cpu *cpu_buffer;
|
|
+ long unsigned int head;
|
|
+ struct buffer_page *head_page;
|
|
+ struct buffer_page *cache_reader_page;
|
|
+ long unsigned int cache_read;
|
|
+ u64 read_stamp;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ RB_LEN_TIME_EXTEND = 8,
|
|
+ RB_LEN_TIME_STAMP = 8,
|
|
+};
|
|
+
|
|
+struct buffer_data_page {
|
|
+ u64 time_stamp;
|
|
+ local_t commit;
|
|
+ unsigned char data[0];
|
|
+};
|
|
+
|
|
+struct buffer_page {
|
|
+ struct list_head list;
|
|
+ local_t write;
|
|
+ unsigned int read;
|
|
+ local_t entries;
|
|
+ long unsigned int real_end;
|
|
+ struct buffer_data_page *page;
|
|
+};
|
|
+
|
|
+struct rb_event_info {
|
|
+ u64 ts;
|
|
+ u64 delta;
|
|
+ long unsigned int length;
|
|
+ struct buffer_page *tail_page;
|
|
+ int add_timestamp;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ RB_CTX_TRANSITION = 0,
|
|
+ RB_CTX_NMI = 1,
|
|
+ RB_CTX_IRQ = 2,
|
|
+ RB_CTX_SOFTIRQ = 3,
|
|
+ RB_CTX_NORMAL = 4,
|
|
+ RB_CTX_MAX = 5,
|
|
+};
|
|
+
|
|
+struct ring_buffer_per_cpu {
|
|
+ int cpu;
|
|
+ atomic_t record_disabled;
|
|
+ struct ring_buffer *buffer;
|
|
+ raw_spinlock_t reader_lock;
|
|
+ arch_spinlock_t lock;
|
|
+ struct lock_class_key lock_key;
|
|
+ struct buffer_data_page *free_page;
|
|
+ long unsigned int nr_pages;
|
|
+ unsigned int current_context;
|
|
+ struct list_head *pages;
|
|
+ struct buffer_page *head_page;
|
|
+ struct buffer_page *tail_page;
|
|
+ struct buffer_page *commit_page;
|
|
+ struct buffer_page *reader_page;
|
|
+ long unsigned int lost_events;
|
|
+ long unsigned int last_overrun;
|
|
+ long unsigned int nest;
|
|
+ local_t entries_bytes;
|
|
+ local_t entries;
|
|
+ local_t overrun;
|
|
+ local_t commit_overrun;
|
|
+ local_t dropped_events;
|
|
+ local_t committing;
|
|
+ local_t commits;
|
|
+ long unsigned int read;
|
|
+ long unsigned int read_bytes;
|
|
+ u64 write_stamp;
|
|
+ u64 read_stamp;
|
|
+ long int nr_pages_to_update;
|
|
+ struct list_head new_pages;
|
|
+ struct work_struct update_pages_work;
|
|
+ struct completion update_done;
|
|
+ struct rb_irq_work irq_work;
|
|
+};
|
|
+
|
|
+struct trace_export {
|
|
+ struct trace_export *next;
|
|
+ void (*write)(struct trace_export *, const void *, unsigned int);
|
|
+};
|
|
+
|
|
+enum trace_iter_flags {
|
|
+ TRACE_FILE_LAT_FMT = 1,
|
|
+ TRACE_FILE_ANNOTATE = 2,
|
|
+ TRACE_FILE_TIME_IN_NS = 4,
|
|
+};
|
|
+
|
|
+enum event_trigger_type {
|
|
+ ETT_NONE = 0,
|
|
+ ETT_TRACE_ONOFF = 1,
|
|
+ ETT_SNAPSHOT = 2,
|
|
+ ETT_STACKTRACE = 4,
|
|
+ ETT_EVENT_ENABLE = 8,
|
|
+ ETT_EVENT_HIST = 16,
|
|
+ ETT_HIST_ENABLE = 32,
|
|
+};
|
|
+
|
|
+enum trace_type {
|
|
+ __TRACE_FIRST_TYPE = 0,
|
|
+ TRACE_FN = 1,
|
|
+ TRACE_CTX = 2,
|
|
+ TRACE_WAKE = 3,
|
|
+ TRACE_STACK = 4,
|
|
+ TRACE_PRINT = 5,
|
|
+ TRACE_BPRINT = 6,
|
|
+ TRACE_MMIO_RW = 7,
|
|
+ TRACE_MMIO_MAP = 8,
|
|
+ TRACE_BRANCH = 9,
|
|
+ TRACE_GRAPH_RET = 10,
|
|
+ TRACE_GRAPH_ENT = 11,
|
|
+ TRACE_USER_STACK = 12,
|
|
+ TRACE_BLK = 13,
|
|
+ TRACE_BPUTS = 14,
|
|
+ TRACE_HWLAT = 15,
|
|
+ TRACE_RAW_DATA = 16,
|
|
+ __TRACE_LAST_TYPE = 17,
|
|
+};
|
|
+
|
|
+struct ftrace_entry {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ip;
|
|
+ long unsigned int parent_ip;
|
|
+};
|
|
+
|
|
+struct stack_entry {
|
|
+ struct trace_entry ent;
|
|
+ int size;
|
|
+ long unsigned int caller[8];
|
|
+};
|
|
+
|
|
+struct userstack_entry {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int tgid;
|
|
+ long unsigned int caller[8];
|
|
+};
|
|
+
|
|
+struct bprint_entry {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ip;
|
|
+ const char *fmt;
|
|
+ u32 buf[0];
|
|
+};
|
|
+
|
|
+struct print_entry {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ip;
|
|
+ char buf[0];
|
|
+};
|
|
+
|
|
+struct raw_data_entry {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int id;
|
|
+ char buf[0];
|
|
+};
|
|
+
|
|
+struct bputs_entry {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ip;
|
|
+ const char *str;
|
|
+};
|
|
+
|
|
+enum trace_flag_type {
|
|
+ TRACE_FLAG_IRQS_OFF = 1,
|
|
+ TRACE_FLAG_IRQS_NOSUPPORT = 2,
|
|
+ TRACE_FLAG_NEED_RESCHED = 4,
|
|
+ TRACE_FLAG_HARDIRQ = 8,
|
|
+ TRACE_FLAG_SOFTIRQ = 16,
|
|
+ TRACE_FLAG_PREEMPT_RESCHED = 32,
|
|
+ TRACE_FLAG_NMI = 64,
|
|
+};
|
|
+
|
|
+enum trace_iterator_flags {
|
|
+ TRACE_ITER_PRINT_PARENT = 1,
|
|
+ TRACE_ITER_SYM_OFFSET = 2,
|
|
+ TRACE_ITER_SYM_ADDR = 4,
|
|
+ TRACE_ITER_VERBOSE = 8,
|
|
+ TRACE_ITER_RAW = 16,
|
|
+ TRACE_ITER_HEX = 32,
|
|
+ TRACE_ITER_BIN = 64,
|
|
+ TRACE_ITER_BLOCK = 128,
|
|
+ TRACE_ITER_PRINTK = 256,
|
|
+ TRACE_ITER_ANNOTATE = 512,
|
|
+ TRACE_ITER_USERSTACKTRACE = 1024,
|
|
+ TRACE_ITER_SYM_USEROBJ = 2048,
|
|
+ TRACE_ITER_PRINTK_MSGONLY = 4096,
|
|
+ TRACE_ITER_CONTEXT_INFO = 8192,
|
|
+ TRACE_ITER_LATENCY_FMT = 16384,
|
|
+ TRACE_ITER_RECORD_CMD = 32768,
|
|
+ TRACE_ITER_RECORD_TGID = 65536,
|
|
+ TRACE_ITER_OVERWRITE = 131072,
|
|
+ TRACE_ITER_STOP_ON_FREE = 262144,
|
|
+ TRACE_ITER_IRQ_INFO = 524288,
|
|
+ TRACE_ITER_MARKERS = 1048576,
|
|
+ TRACE_ITER_EVENT_FORK = 2097152,
|
|
+ TRACE_ITER_FUNCTION = 4194304,
|
|
+ TRACE_ITER_FUNC_FORK = 8388608,
|
|
+ TRACE_ITER_DISPLAY_GRAPH = 16777216,
|
|
+ TRACE_ITER_STACKTRACE = 33554432,
|
|
+};
|
|
+
|
|
+struct saved_cmdlines_buffer {
|
|
+ unsigned int map_pid_to_cmdline[32769];
|
|
+ unsigned int *map_cmdline_to_pid;
|
|
+ unsigned int cmdline_num;
|
|
+ int cmdline_idx;
|
|
+ char *saved_cmdlines;
|
|
+};
|
|
+
|
|
+struct ftrace_stack {
|
|
+ long unsigned int calls[512];
|
|
+};
|
|
+
|
|
+struct trace_buffer_struct {
|
|
+ int nesting;
|
|
+ char buffer[4096];
|
|
+};
|
|
+
|
|
+struct ftrace_buffer_info {
|
|
+ struct trace_iterator iter;
|
|
+ void *spare;
|
|
+ unsigned int spare_cpu;
|
|
+ unsigned int read;
|
|
+};
|
|
+
|
|
+struct buffer_ref {
|
|
+ struct ring_buffer *buffer;
|
|
+ void *page;
|
|
+ int cpu;
|
|
+ refcount_t refcount;
|
|
+};
|
|
+
|
|
+struct ftrace_func_mapper___2;
|
|
+
|
|
+struct ctx_switch_entry {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int prev_pid;
|
|
+ unsigned int next_pid;
|
|
+ unsigned int next_cpu;
|
|
+ unsigned char prev_prio;
|
|
+ unsigned char prev_state;
|
|
+ unsigned char next_prio;
|
|
+ unsigned char next_state;
|
|
+};
|
|
+
|
|
+struct hwlat_entry {
|
|
+ struct trace_entry ent;
|
|
+ u64 duration;
|
|
+ u64 outer_duration;
|
|
+ u64 nmi_total_ts;
|
|
+ struct timespec64 timestamp;
|
|
+ unsigned int nmi_count;
|
|
+ unsigned int seqnum;
|
|
+};
|
|
+
|
|
+struct trace_mark {
|
|
+ long long unsigned int val;
|
|
+ char sym;
|
|
+};
|
|
+
|
|
+struct stat_node {
|
|
+ struct rb_node node;
|
|
+ void *stat;
|
|
+};
|
|
+
|
|
+struct stat_session {
|
|
+ struct list_head session_list;
|
|
+ struct tracer_stat *ts;
|
|
+ struct rb_root stat_root;
|
|
+ struct mutex stat_mutex;
|
|
+ struct dentry *file;
|
|
+};
|
|
+
|
|
+typedef int (*cmp_stat_t)(void *, void *);
|
|
+
|
|
+struct trace_bprintk_fmt {
|
|
+ struct list_head list;
|
|
+ const char *fmt;
|
|
+};
|
|
+
|
|
+typedef int (*tracing_map_cmp_fn_t)(void *, void *);
|
|
+
|
|
+struct tracing_map_field {
|
|
+ tracing_map_cmp_fn_t cmp_fn;
|
|
+ union {
|
|
+ atomic64_t sum;
|
|
+ unsigned int offset;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct tracing_map;
|
|
+
|
|
+struct tracing_map_elt {
|
|
+ struct tracing_map *map;
|
|
+ struct tracing_map_field *fields;
|
|
+ atomic64_t *vars;
|
|
+ bool *var_set;
|
|
+ void *key;
|
|
+ void *private_data;
|
|
+};
|
|
+
|
|
+struct tracing_map_sort_key {
|
|
+ unsigned int field_idx;
|
|
+ bool descending;
|
|
+};
|
|
+
|
|
+struct tracing_map_array;
|
|
+
|
|
+struct tracing_map_ops;
|
|
+
|
|
+struct tracing_map {
|
|
+ unsigned int key_size;
|
|
+ unsigned int map_bits;
|
|
+ unsigned int map_size;
|
|
+ unsigned int max_elts;
|
|
+ atomic_t next_elt;
|
|
+ struct tracing_map_array *elts;
|
|
+ struct tracing_map_array *map;
|
|
+ const struct tracing_map_ops *ops;
|
|
+ void *private_data;
|
|
+ struct tracing_map_field fields[6];
|
|
+ unsigned int n_fields;
|
|
+ int key_idx[3];
|
|
+ unsigned int n_keys;
|
|
+ struct tracing_map_sort_key sort_key;
|
|
+ unsigned int n_vars;
|
|
+ atomic64_t hits;
|
|
+ atomic64_t drops;
|
|
+};
|
|
+
|
|
+struct tracing_map_entry {
|
|
+ u32 key;
|
|
+ struct tracing_map_elt *val;
|
|
+};
|
|
+
|
|
+struct tracing_map_sort_entry {
|
|
+ void *key;
|
|
+ struct tracing_map_elt *elt;
|
|
+ bool elt_copied;
|
|
+ bool dup;
|
|
+};
|
|
+
|
|
+struct tracing_map_array {
|
|
+ unsigned int entries_per_page;
|
|
+ unsigned int entry_size_shift;
|
|
+ unsigned int entry_shift;
|
|
+ unsigned int entry_mask;
|
|
+ unsigned int n_pages;
|
|
+ void **pages;
|
|
+};
|
|
+
|
|
+struct tracing_map_ops {
|
|
+ int (*elt_alloc)(struct tracing_map_elt *);
|
|
+ void (*elt_free)(struct tracing_map_elt *);
|
|
+ void (*elt_clear)(struct tracing_map_elt *);
|
|
+ void (*elt_init)(struct tracing_map_elt *);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TRACE_FUNC_OPT_STACK = 1,
|
|
+};
|
|
+
|
|
+struct hwlat_sample {
|
|
+ u64 seqnum;
|
|
+ u64 duration;
|
|
+ u64 outer_duration;
|
|
+ u64 nmi_total_ts;
|
|
+ struct timespec64 timestamp;
|
|
+ int nmi_count;
|
|
+};
|
|
+
|
|
+struct hwlat_data {
|
|
+ struct mutex lock;
|
|
+ u64 count;
|
|
+ u64 sample_window;
|
|
+ u64 sample_width;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TRACE_NOP_OPT_ACCEPT = 1,
|
|
+ TRACE_NOP_OPT_REFUSE = 2,
|
|
+};
|
|
+
|
|
+struct ftrace_graph_ent_entry {
|
|
+ struct trace_entry ent;
|
|
+ struct ftrace_graph_ent graph_ent;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct ftrace_graph_ret_entry {
|
|
+ struct trace_entry ent;
|
|
+ struct ftrace_graph_ret ret;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct fgraph_cpu_data {
|
|
+ pid_t last_pid;
|
|
+ int depth;
|
|
+ int depth_irq;
|
|
+ int ignore;
|
|
+ long unsigned int enter_funcs[50];
|
|
+};
|
|
+
|
|
+struct fgraph_data {
|
|
+ struct fgraph_cpu_data *cpu_data;
|
|
+ struct ftrace_graph_ent_entry ent;
|
|
+ struct ftrace_graph_ret_entry ret;
|
|
+ int failed;
|
|
+ int cpu;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum {
|
|
+ FLAGS_FILL_FULL = 268435456,
|
|
+ FLAGS_FILL_START = 536870912,
|
|
+ FLAGS_FILL_END = 805306368,
|
|
+};
|
|
+
|
|
+typedef __u32 blk_mq_req_flags_t;
|
|
+
|
|
+struct blk_mq_ctxs;
|
|
+
|
|
+struct blk_mq_ctx {
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ struct list_head rq_list;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ };
|
|
+ unsigned int cpu;
|
|
+ unsigned int index_hw;
|
|
+ long unsigned int rq_dispatched[2];
|
|
+ long unsigned int rq_merged;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long unsigned int rq_completed[2];
|
|
+ struct request_queue *queue;
|
|
+ struct blk_mq_ctxs *ctxs;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+struct sbitmap_word;
|
|
+
|
|
+struct sbitmap {
|
|
+ unsigned int depth;
|
|
+ unsigned int shift;
|
|
+ unsigned int map_nr;
|
|
+ struct sbitmap_word *map;
|
|
+};
|
|
+
|
|
+struct blk_mq_tags;
|
|
+
|
|
+struct blk_mq_hw_ctx {
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ struct list_head dispatch;
|
|
+ long unsigned int state;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ };
|
|
+ struct delayed_work run_work;
|
|
+ cpumask_var_t cpumask;
|
|
+ int next_cpu;
|
|
+ int next_cpu_batch;
|
|
+ long unsigned int flags;
|
|
+ void *sched_data;
|
|
+ struct request_queue *queue;
|
|
+ struct blk_flush_queue *fq;
|
|
+ void *driver_data;
|
|
+ struct sbitmap ctx_map;
|
|
+ struct blk_mq_ctx *dispatch_from;
|
|
+ unsigned int dispatch_busy;
|
|
+ unsigned int nr_ctx;
|
|
+ struct blk_mq_ctx **ctxs;
|
|
+ spinlock_t dispatch_wait_lock;
|
|
+ wait_queue_entry_t dispatch_wait;
|
|
+ atomic_t wait_index;
|
|
+ struct blk_mq_tags *tags;
|
|
+ struct blk_mq_tags *sched_tags;
|
|
+ long unsigned int queued;
|
|
+ long unsigned int run;
|
|
+ long unsigned int dispatched[7];
|
|
+ unsigned int numa_node;
|
|
+ unsigned int queue_num;
|
|
+ atomic_t nr_active;
|
|
+ unsigned int nr_expired;
|
|
+ struct hlist_node cpuhp_dead;
|
|
+ struct kobject kobj;
|
|
+ long unsigned int poll_considered;
|
|
+ long unsigned int poll_invoked;
|
|
+ long unsigned int poll_success;
|
|
+ struct dentry *debugfs_dir;
|
|
+ struct dentry *sched_debugfs_dir;
|
|
+ struct list_head hctx_list;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ struct srcu_struct srcu[0];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct blk_mq_alloc_data {
|
|
+ struct request_queue *q;
|
|
+ blk_mq_req_flags_t flags;
|
|
+ unsigned int shallow_depth;
|
|
+ struct blk_mq_ctx *ctx;
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
+};
|
|
+
|
|
+struct blk_stat_callback {
|
|
+ struct list_head list;
|
|
+ struct timer_list timer;
|
|
+ struct blk_rq_stat *cpu_stat;
|
|
+ int (*bucket_fn)(const struct request *);
|
|
+ unsigned int buckets;
|
|
+ struct blk_rq_stat *stat;
|
|
+ void (*timer_fn)(struct blk_stat_callback *);
|
|
+ void *data;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct blk_trace {
|
|
+ int trace_state;
|
|
+ struct rchan *rchan;
|
|
+ long unsigned int *sequence;
|
|
+ unsigned char *msg_data;
|
|
+ u16 act_mask;
|
|
+ u64 start_lba;
|
|
+ u64 end_lba;
|
|
+ u32 pid;
|
|
+ u32 dev;
|
|
+ struct dentry *dir;
|
|
+ struct dentry *dropped_file;
|
|
+ struct dentry *msg_file;
|
|
+ struct list_head running_list;
|
|
+ atomic_t dropped;
|
|
+};
|
|
+
|
|
+struct blk_flush_queue {
|
|
+ unsigned int flush_queue_delayed: 1;
|
|
+ unsigned int flush_pending_idx: 1;
|
|
+ unsigned int flush_running_idx: 1;
|
|
+ blk_status_t rq_status;
|
|
+ long unsigned int flush_pending_since;
|
|
+ struct list_head flush_queue[2];
|
|
+ struct list_head flush_data_in_flight;
|
|
+ struct request *flush_rq;
|
|
+ struct request *orig_rq;
|
|
+ spinlock_t mq_flush_lock;
|
|
+};
|
|
+
|
|
+struct blk_mq_tag_set {
|
|
+ unsigned int *mq_map;
|
|
+ const struct blk_mq_ops *ops;
|
|
+ unsigned int nr_hw_queues;
|
|
+ unsigned int queue_depth;
|
|
+ unsigned int reserved_tags;
|
|
+ unsigned int cmd_size;
|
|
+ int numa_node;
|
|
+ unsigned int timeout;
|
|
+ unsigned int flags;
|
|
+ void *driver_data;
|
|
+ struct blk_mq_tags **tags;
|
|
+ struct mutex tag_list_lock;
|
|
+ struct list_head tag_list;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+typedef u64 compat_u64;
|
|
+
|
|
+enum blktrace_cat {
|
|
+ BLK_TC_READ = 1,
|
|
+ BLK_TC_WRITE = 2,
|
|
+ BLK_TC_FLUSH = 4,
|
|
+ BLK_TC_SYNC = 8,
|
|
+ BLK_TC_SYNCIO = 8,
|
|
+ BLK_TC_QUEUE = 16,
|
|
+ BLK_TC_REQUEUE = 32,
|
|
+ BLK_TC_ISSUE = 64,
|
|
+ BLK_TC_COMPLETE = 128,
|
|
+ BLK_TC_FS = 256,
|
|
+ BLK_TC_PC = 512,
|
|
+ BLK_TC_NOTIFY = 1024,
|
|
+ BLK_TC_AHEAD = 2048,
|
|
+ BLK_TC_META = 4096,
|
|
+ BLK_TC_DISCARD = 8192,
|
|
+ BLK_TC_DRV_DATA = 16384,
|
|
+ BLK_TC_FUA = 32768,
|
|
+ BLK_TC_END = 32768,
|
|
+};
|
|
+
|
|
+enum blktrace_act {
|
|
+ __BLK_TA_QUEUE = 1,
|
|
+ __BLK_TA_BACKMERGE = 2,
|
|
+ __BLK_TA_FRONTMERGE = 3,
|
|
+ __BLK_TA_GETRQ = 4,
|
|
+ __BLK_TA_SLEEPRQ = 5,
|
|
+ __BLK_TA_REQUEUE = 6,
|
|
+ __BLK_TA_ISSUE = 7,
|
|
+ __BLK_TA_COMPLETE = 8,
|
|
+ __BLK_TA_PLUG = 9,
|
|
+ __BLK_TA_UNPLUG_IO = 10,
|
|
+ __BLK_TA_UNPLUG_TIMER = 11,
|
|
+ __BLK_TA_INSERT = 12,
|
|
+ __BLK_TA_SPLIT = 13,
|
|
+ __BLK_TA_BOUNCE = 14,
|
|
+ __BLK_TA_REMAP = 15,
|
|
+ __BLK_TA_ABORT = 16,
|
|
+ __BLK_TA_DRV_DATA = 17,
|
|
+ __BLK_TA_CGROUP = 256,
|
|
+};
|
|
+
|
|
+enum blktrace_notify {
|
|
+ __BLK_TN_PROCESS = 0,
|
|
+ __BLK_TN_TIMESTAMP = 1,
|
|
+ __BLK_TN_MESSAGE = 2,
|
|
+ __BLK_TN_CGROUP = 256,
|
|
+};
|
|
+
|
|
+struct blk_io_trace {
|
|
+ __u32 magic;
|
|
+ __u32 sequence;
|
|
+ __u64 time;
|
|
+ __u64 sector;
|
|
+ __u32 bytes;
|
|
+ __u32 action;
|
|
+ __u32 pid;
|
|
+ __u32 device;
|
|
+ __u32 cpu;
|
|
+ __u16 error;
|
|
+ __u16 pdu_len;
|
|
+};
|
|
+
|
|
+struct blk_io_trace_remap {
|
|
+ __be32 device_from;
|
|
+ __be32 device_to;
|
|
+ __be64 sector_from;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Blktrace_setup = 1,
|
|
+ Blktrace_running = 2,
|
|
+ Blktrace_stopped = 3,
|
|
+};
|
|
+
|
|
+struct blk_user_trace_setup {
|
|
+ char name[32];
|
|
+ __u16 act_mask;
|
|
+ __u32 buf_size;
|
|
+ __u32 buf_nr;
|
|
+ __u64 start_lba;
|
|
+ __u64 end_lba;
|
|
+ __u32 pid;
|
|
+};
|
|
+
|
|
+struct compat_blk_user_trace_setup {
|
|
+ char name[32];
|
|
+ u16 act_mask;
|
|
+ short: 16;
|
|
+ u32 buf_size;
|
|
+ u32 buf_nr;
|
|
+ compat_u64 start_lba;
|
|
+ compat_u64 end_lba;
|
|
+ u32 pid;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct sbitmap_word {
|
|
+ long unsigned int word;
|
|
+ long unsigned int depth;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct sbq_wait_state {
|
|
+ atomic_t wait_cnt;
|
|
+ wait_queue_head_t wait;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct sbitmap_queue {
|
|
+ struct sbitmap sb;
|
|
+ unsigned int *alloc_hint;
|
|
+ unsigned int wake_batch;
|
|
+ atomic_t wake_index;
|
|
+ struct sbq_wait_state *ws;
|
|
+ bool round_robin;
|
|
+ unsigned int min_shallow_depth;
|
|
+};
|
|
+
|
|
+struct blk_mq_tags {
|
|
+ unsigned int nr_tags;
|
|
+ unsigned int nr_reserved_tags;
|
|
+ atomic_t active_queues;
|
|
+ struct sbitmap_queue bitmap_tags;
|
|
+ struct sbitmap_queue breserved_tags;
|
|
+ struct request **rqs;
|
|
+ struct request **static_rqs;
|
|
+ struct list_head page_list;
|
|
+};
|
|
+
|
|
+struct blk_mq_queue_data {
|
|
+ struct request *rq;
|
|
+ bool last;
|
|
+ long unsigned int kabi_reserved1;
|
|
+};
|
|
+
|
|
+struct blk_mq_ctxs {
|
|
+ struct kobject kobj;
|
|
+ struct blk_mq_ctx *queue_ctx;
|
|
+};
|
|
+
|
|
+typedef void blk_log_action_t(struct trace_iterator *, const char *, bool);
|
|
+
|
|
+struct ftrace_event_field {
|
|
+ struct list_head link;
|
|
+ const char *name;
|
|
+ const char *type;
|
|
+ int filter_type;
|
|
+ int offset;
|
|
+ int size;
|
|
+ int is_signed;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FORMAT_HEADER = 1,
|
|
+ FORMAT_FIELD_SEPERATOR = 2,
|
|
+ FORMAT_PRINTFMT = 3,
|
|
+};
|
|
+
|
|
+struct event_probe_data {
|
|
+ struct trace_event_file *file;
|
|
+ long unsigned int count;
|
|
+ int ref;
|
|
+ bool enable;
|
|
+};
|
|
+
|
|
+struct mmiotrace_rw {
|
|
+ resource_size_t phys;
|
|
+ long unsigned int value;
|
|
+ long unsigned int pc;
|
|
+ int map_id;
|
|
+ unsigned char opcode;
|
|
+ unsigned char width;
|
|
+};
|
|
+
|
|
+struct mmiotrace_map {
|
|
+ resource_size_t phys;
|
|
+ long unsigned int virt;
|
|
+ long unsigned int len;
|
|
+ int map_id;
|
|
+ unsigned char opcode;
|
|
+};
|
|
+
|
|
+struct trace_mmiotrace_rw {
|
|
+ struct trace_entry ent;
|
|
+ struct mmiotrace_rw rw;
|
|
+};
|
|
+
|
|
+struct trace_mmiotrace_map {
|
|
+ struct trace_entry ent;
|
|
+ struct mmiotrace_map map;
|
|
+};
|
|
+
|
|
+struct trace_branch {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int line;
|
|
+ char func[31];
|
|
+ char file[21];
|
|
+ char correct;
|
|
+ char constant;
|
|
+};
|
|
+
|
|
+struct syscall_trace_enter {
|
|
+ struct trace_entry ent;
|
|
+ int nr;
|
|
+ long unsigned int args[0];
|
|
+};
|
|
+
|
|
+struct syscall_trace_exit {
|
|
+ struct trace_entry ent;
|
|
+ int nr;
|
|
+ long int ret;
|
|
+};
|
|
+
|
|
+struct syscall_tp_t {
|
|
+ long long unsigned int regs;
|
|
+ long unsigned int syscall_nr;
|
|
+ long unsigned int ret;
|
|
+};
|
|
+
|
|
+struct syscall_tp_t___2 {
|
|
+ long long unsigned int regs;
|
|
+ long unsigned int syscall_nr;
|
|
+ long unsigned int args[6];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FETCH_MTD_reg = 0,
|
|
+ FETCH_MTD_stack = 1,
|
|
+ FETCH_MTD_retval = 2,
|
|
+ FETCH_MTD_comm = 3,
|
|
+ FETCH_MTD_memory = 4,
|
|
+ FETCH_MTD_symbol = 5,
|
|
+ FETCH_MTD_deref = 6,
|
|
+ FETCH_MTD_bitfield = 7,
|
|
+ FETCH_MTD_file_offset = 8,
|
|
+ FETCH_MTD_END = 9,
|
|
+};
|
|
+
|
|
+typedef long unsigned int perf_trace_t[256];
|
|
+
|
|
+struct filter_pred;
|
|
+
|
|
+struct prog_entry {
|
|
+ int target;
|
|
+ int when_to_branch;
|
|
+ struct filter_pred *pred;
|
|
+};
|
|
+
|
|
+typedef int (*filter_pred_fn_t)(struct filter_pred *, void *);
|
|
+
|
|
+struct regex;
|
|
+
|
|
+typedef int (*regex_match_func)(char *, struct regex *, int);
|
|
+
|
|
+struct regex {
|
|
+ char pattern[256];
|
|
+ int len;
|
|
+ int field_len;
|
|
+ regex_match_func match;
|
|
+};
|
|
+
|
|
+struct filter_pred {
|
|
+ filter_pred_fn_t fn;
|
|
+ u64 val;
|
|
+ struct regex regex;
|
|
+ short unsigned int *ops;
|
|
+ struct ftrace_event_field *field;
|
|
+ int offset;
|
|
+ int not;
|
|
+ int op;
|
|
+};
|
|
+
|
|
+enum filter_op_ids {
|
|
+ OP_GLOB = 0,
|
|
+ OP_NE = 1,
|
|
+ OP_EQ = 2,
|
|
+ OP_LE = 3,
|
|
+ OP_LT = 4,
|
|
+ OP_GE = 5,
|
|
+ OP_GT = 6,
|
|
+ OP_BAND = 7,
|
|
+ OP_MAX = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FILT_ERR_NONE = 0,
|
|
+ FILT_ERR_INVALID_OP = 1,
|
|
+ FILT_ERR_TOO_MANY_OPEN = 2,
|
|
+ FILT_ERR_TOO_MANY_CLOSE = 3,
|
|
+ FILT_ERR_MISSING_QUOTE = 4,
|
|
+ FILT_ERR_OPERAND_TOO_LONG = 5,
|
|
+ FILT_ERR_EXPECT_STRING = 6,
|
|
+ FILT_ERR_EXPECT_DIGIT = 7,
|
|
+ FILT_ERR_ILLEGAL_FIELD_OP = 8,
|
|
+ FILT_ERR_FIELD_NOT_FOUND = 9,
|
|
+ FILT_ERR_ILLEGAL_INTVAL = 10,
|
|
+ FILT_ERR_BAD_SUBSYS_FILTER = 11,
|
|
+ FILT_ERR_TOO_MANY_PREDS = 12,
|
|
+ FILT_ERR_INVALID_FILTER = 13,
|
|
+ FILT_ERR_IP_FIELD_ONLY = 14,
|
|
+ FILT_ERR_INVALID_VALUE = 15,
|
|
+ FILT_ERR_NO_FILTER = 16,
|
|
+};
|
|
+
|
|
+struct filter_parse_error {
|
|
+ int lasterr;
|
|
+ int lasterr_pos;
|
|
+};
|
|
+
|
|
+typedef int (*parse_pred_fn)(const char *, void *, int, struct filter_parse_error *, struct filter_pred **);
|
|
+
|
|
+enum {
|
|
+ INVERT = 1,
|
|
+ PROCESS_AND = 2,
|
|
+ PROCESS_OR = 4,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TOO_MANY_CLOSE = -1,
|
|
+ TOO_MANY_OPEN = -2,
|
|
+ MISSING_QUOTE = -3,
|
|
+};
|
|
+
|
|
+struct filter_list {
|
|
+ struct list_head list;
|
|
+ struct event_filter *filter;
|
|
+};
|
|
+
|
|
+struct function_filter_data {
|
|
+ struct ftrace_ops *ops;
|
|
+ int first_filter;
|
|
+ int first_notrace;
|
|
+};
|
|
+
|
|
+struct event_trigger_ops;
|
|
+
|
|
+struct event_command;
|
|
+
|
|
+struct event_trigger_data {
|
|
+ long unsigned int count;
|
|
+ int ref;
|
|
+ struct event_trigger_ops *ops;
|
|
+ struct event_command *cmd_ops;
|
|
+ struct event_filter *filter;
|
|
+ char *filter_str;
|
|
+ void *private_data;
|
|
+ bool paused;
|
|
+ bool paused_tmp;
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+ struct list_head named_list;
|
|
+ struct event_trigger_data *named_data;
|
|
+};
|
|
+
|
|
+struct event_trigger_ops {
|
|
+ void (*func)(struct event_trigger_data *, void *, struct ring_buffer_event *);
|
|
+ int (*init)(struct event_trigger_ops *, struct event_trigger_data *);
|
|
+ void (*free)(struct event_trigger_ops *, struct event_trigger_data *);
|
|
+ int (*print)(struct seq_file *, struct event_trigger_ops *, struct event_trigger_data *);
|
|
+};
|
|
+
|
|
+struct event_command {
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+ enum event_trigger_type trigger_type;
|
|
+ int flags;
|
|
+ int (*func)(struct event_command *, struct trace_event_file *, char *, char *, char *);
|
|
+ int (*reg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *);
|
|
+ void (*unreg)(char *, struct event_trigger_ops *, struct event_trigger_data *, struct trace_event_file *);
|
|
+ void (*unreg_all)(struct trace_event_file *);
|
|
+ int (*set_filter)(char *, struct event_trigger_data *, struct trace_event_file *);
|
|
+ struct event_trigger_ops * (*get_trigger_ops)(char *, char *);
|
|
+};
|
|
+
|
|
+struct enable_trigger_data {
|
|
+ struct trace_event_file *file;
|
|
+ bool enable;
|
|
+ bool hist;
|
|
+};
|
|
+
|
|
+enum event_command_flags {
|
|
+ EVENT_CMD_FL_POST_TRIGGER = 1,
|
|
+ EVENT_CMD_FL_NEEDS_REC = 2,
|
|
+};
|
|
+
|
|
+struct hist_field;
|
|
+
|
|
+typedef u64 (*hist_field_fn_t)(struct hist_field *, struct tracing_map_elt *, struct ring_buffer_event *, void *);
|
|
+
|
|
+struct hist_trigger_data;
|
|
+
|
|
+struct hist_var {
|
|
+ char *name;
|
|
+ struct hist_trigger_data *hist_data;
|
|
+ unsigned int idx;
|
|
+};
|
|
+
|
|
+enum field_op_id {
|
|
+ FIELD_OP_NONE = 0,
|
|
+ FIELD_OP_PLUS = 1,
|
|
+ FIELD_OP_MINUS = 2,
|
|
+ FIELD_OP_UNARY_MINUS = 3,
|
|
+};
|
|
+
|
|
+struct hist_field {
|
|
+ struct ftrace_event_field *field;
|
|
+ long unsigned int flags;
|
|
+ hist_field_fn_t fn;
|
|
+ unsigned int size;
|
|
+ unsigned int offset;
|
|
+ unsigned int is_signed;
|
|
+ const char *type;
|
|
+ struct hist_field *operands[2];
|
|
+ struct hist_trigger_data *hist_data;
|
|
+ struct hist_var var;
|
|
+ enum field_op_id operator;
|
|
+ char *system;
|
|
+ char *event_name;
|
|
+ char *name;
|
|
+ unsigned int var_idx;
|
|
+ unsigned int var_ref_idx;
|
|
+ bool read_once;
|
|
+};
|
|
+
|
|
+struct hist_trigger_attrs;
|
|
+
|
|
+struct action_data;
|
|
+
|
|
+struct field_var;
|
|
+
|
|
+struct field_var_hist;
|
|
+
|
|
+struct hist_trigger_data {
|
|
+ struct hist_field *fields[22];
|
|
+ unsigned int n_vals;
|
|
+ unsigned int n_keys;
|
|
+ unsigned int n_fields;
|
|
+ unsigned int n_vars;
|
|
+ unsigned int key_size;
|
|
+ struct tracing_map_sort_key sort_keys[2];
|
|
+ unsigned int n_sort_keys;
|
|
+ struct trace_event_file *event_file;
|
|
+ struct hist_trigger_attrs *attrs;
|
|
+ struct tracing_map *map;
|
|
+ bool enable_timestamps;
|
|
+ bool remove;
|
|
+ struct hist_field *var_refs[16];
|
|
+ unsigned int n_var_refs;
|
|
+ struct action_data *actions[8];
|
|
+ unsigned int n_actions;
|
|
+ struct hist_field *synth_var_refs[16];
|
|
+ unsigned int n_synth_var_refs;
|
|
+ struct field_var *field_vars[16];
|
|
+ unsigned int n_field_vars;
|
|
+ unsigned int n_field_var_str;
|
|
+ struct field_var_hist *field_var_hists[16];
|
|
+ unsigned int n_field_var_hists;
|
|
+ struct field_var *max_vars[16];
|
|
+ unsigned int n_max_vars;
|
|
+ unsigned int n_max_var_str;
|
|
+};
|
|
+
|
|
+enum hist_field_flags {
|
|
+ HIST_FIELD_FL_HITCOUNT = 1,
|
|
+ HIST_FIELD_FL_KEY = 2,
|
|
+ HIST_FIELD_FL_STRING = 4,
|
|
+ HIST_FIELD_FL_HEX = 8,
|
|
+ HIST_FIELD_FL_SYM = 16,
|
|
+ HIST_FIELD_FL_SYM_OFFSET = 32,
|
|
+ HIST_FIELD_FL_EXECNAME = 64,
|
|
+ HIST_FIELD_FL_SYSCALL = 128,
|
|
+ HIST_FIELD_FL_STACKTRACE = 256,
|
|
+ HIST_FIELD_FL_LOG2 = 512,
|
|
+ HIST_FIELD_FL_TIMESTAMP = 1024,
|
|
+ HIST_FIELD_FL_TIMESTAMP_USECS = 2048,
|
|
+ HIST_FIELD_FL_VAR = 4096,
|
|
+ HIST_FIELD_FL_EXPR = 8192,
|
|
+ HIST_FIELD_FL_VAR_REF = 16384,
|
|
+ HIST_FIELD_FL_CPU = 32768,
|
|
+ HIST_FIELD_FL_ALIAS = 65536,
|
|
+};
|
|
+
|
|
+struct var_defs {
|
|
+ unsigned int n_vars;
|
|
+ char *name[16];
|
|
+ char *expr[16];
|
|
+};
|
|
+
|
|
+struct hist_trigger_attrs {
|
|
+ char *keys_str;
|
|
+ char *vals_str;
|
|
+ char *sort_key_str;
|
|
+ char *name;
|
|
+ char *clock;
|
|
+ bool pause;
|
|
+ bool cont;
|
|
+ bool clear;
|
|
+ bool ts_in_usecs;
|
|
+ unsigned int map_bits;
|
|
+ char *assignment_str[16];
|
|
+ unsigned int n_assignments;
|
|
+ char *action_str[8];
|
|
+ unsigned int n_actions;
|
|
+ struct var_defs var_defs;
|
|
+};
|
|
+
|
|
+struct field_var {
|
|
+ struct hist_field *var;
|
|
+ struct hist_field *val;
|
|
+};
|
|
+
|
|
+struct field_var_hist {
|
|
+ struct hist_trigger_data *hist_data;
|
|
+ char *cmd;
|
|
+};
|
|
+
|
|
+typedef void (*action_fn_t)(struct hist_trigger_data *, struct tracing_map_elt *, void *, struct ring_buffer_event *, struct action_data *, u64 *);
|
|
+
|
|
+struct synth_event;
|
|
+
|
|
+struct action_data {
|
|
+ action_fn_t fn;
|
|
+ unsigned int n_params;
|
|
+ char *params[16];
|
|
+ union {
|
|
+ struct {
|
|
+ unsigned int var_ref_idx;
|
|
+ char *match_event;
|
|
+ char *match_event_system;
|
|
+ char *synth_event_name;
|
|
+ struct synth_event *synth_event;
|
|
+ } onmatch;
|
|
+ struct {
|
|
+ char *var_str;
|
|
+ char *fn_name;
|
|
+ unsigned int max_var_ref_idx;
|
|
+ struct hist_field *max_var;
|
|
+ struct hist_field *var;
|
|
+ } onmax;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct synth_field {
|
|
+ char *type;
|
|
+ char *name;
|
|
+ size_t size;
|
|
+ bool is_signed;
|
|
+ bool is_string;
|
|
+};
|
|
+
|
|
+struct synth_event {
|
|
+ struct list_head list;
|
|
+ int ref;
|
|
+ char *name;
|
|
+ struct synth_field **fields;
|
|
+ unsigned int n_fields;
|
|
+ unsigned int n_u64;
|
|
+ struct trace_event_class class;
|
|
+ struct trace_event_call call;
|
|
+ struct tracepoint *tp;
|
|
+};
|
|
+
|
|
+struct synth_trace_event {
|
|
+ struct trace_entry ent;
|
|
+ u64 fields[0];
|
|
+};
|
|
+
|
|
+typedef void (*synth_probe_func_t)(void *, u64 *, unsigned int);
|
|
+
|
|
+struct hist_var_data {
|
|
+ struct list_head list;
|
|
+ struct hist_trigger_data *hist_data;
|
|
+};
|
|
+
|
|
+struct hist_elt_data {
|
|
+ char *comm;
|
|
+ u64 *var_ref_vals;
|
|
+ char *field_var_str[16];
|
|
+};
|
|
+
|
|
+struct bpf_perf_event_value {
|
|
+ __u64 counter;
|
|
+ __u64 enabled;
|
|
+ __u64 running;
|
|
+};
|
|
+
|
|
+struct bpf_raw_tracepoint_args {
|
|
+ __u64 args[0];
|
|
+};
|
|
+
|
|
+enum bpf_task_fd_type {
|
|
+ BPF_FD_TYPE_RAW_TRACEPOINT = 0,
|
|
+ BPF_FD_TYPE_TRACEPOINT = 1,
|
|
+ BPF_FD_TYPE_KPROBE = 2,
|
|
+ BPF_FD_TYPE_KRETPROBE = 3,
|
|
+ BPF_FD_TYPE_UPROBE = 4,
|
|
+ BPF_FD_TYPE_URETPROBE = 5,
|
|
+};
|
|
+
|
|
+struct bpf_array {
|
|
+ struct bpf_map map;
|
|
+ u32 elem_size;
|
|
+ u32 index_mask;
|
|
+ enum bpf_prog_type owner_prog_type;
|
|
+ bool owner_jited;
|
|
+ union {
|
|
+ char value[0];
|
|
+ void *ptrs[0];
|
|
+ void *pptrs[0];
|
|
+ };
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bpf_event_entry {
|
|
+ struct perf_event *event;
|
|
+ struct file *perf_file;
|
|
+ struct file *map_file;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+typedef long unsigned int (*bpf_ctx_copy_t)(void *, const void *, long unsigned int, long unsigned int);
|
|
+
|
|
+typedef struct pt_regs bpf_user_pt_regs_t;
|
|
+
|
|
+struct bpf_perf_event_data {
|
|
+ bpf_user_pt_regs_t regs;
|
|
+ __u64 sample_period;
|
|
+ __u64 addr;
|
|
+};
|
|
+
|
|
+struct perf_event_query_bpf {
|
|
+ __u32 ids_len;
|
|
+ __u32 prog_cnt;
|
|
+ __u32 ids[0];
|
|
+};
|
|
+
|
|
+struct bpf_perf_event_data_kern {
|
|
+ bpf_user_pt_regs_t *regs;
|
|
+ struct perf_sample_data *data;
|
|
+ struct perf_event *event;
|
|
+};
|
|
+
|
|
+struct bpf_trace_module {
|
|
+ struct module *module;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct bpf_trace_sample_data {
|
|
+ struct perf_sample_data sds[3];
|
|
+};
|
|
+
|
|
+struct bpf_raw_tp_regs {
|
|
+ struct pt_regs regs[3];
|
|
+};
|
|
+
|
|
+typedef void *pto_T_____25;
|
|
+
|
|
+struct kprobe_trace_entry_head {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ip;
|
|
+};
|
|
+
|
|
+struct kretprobe_trace_entry_head {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int func;
|
|
+ long unsigned int ret_ip;
|
|
+};
|
|
+
|
|
+typedef void (*fetch_func_t)(struct pt_regs *, void *, void *);
|
|
+
|
|
+typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, void *);
|
|
+
|
|
+struct fetch_type {
|
|
+ const char *name;
|
|
+ size_t size;
|
|
+ int is_signed;
|
|
+ print_type_func_t print;
|
|
+ const char *fmt;
|
|
+ const char *fmttype;
|
|
+ fetch_func_t fetch[9];
|
|
+};
|
|
+
|
|
+struct fetch_param {
|
|
+ fetch_func_t fn;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+typedef u32 string;
|
|
+
|
|
+typedef u32 string_size;
|
|
+
|
|
+struct probe_arg {
|
|
+ struct fetch_param fetch;
|
|
+ struct fetch_param fetch_size;
|
|
+ unsigned int offset;
|
|
+ const char *name;
|
|
+ const char *comm;
|
|
+ const struct fetch_type *type;
|
|
+};
|
|
+
|
|
+struct trace_probe {
|
|
+ unsigned int flags;
|
|
+ struct trace_event_class class;
|
|
+ struct trace_event_call call;
|
|
+ struct list_head files;
|
|
+ ssize_t size;
|
|
+ unsigned int nr_args;
|
|
+ struct probe_arg args[0];
|
|
+};
|
|
+
|
|
+struct event_file_link {
|
|
+ struct trace_event_file *file;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct trace_kprobe {
|
|
+ struct list_head list;
|
|
+ struct kretprobe rp;
|
|
+ long unsigned int *nhit;
|
|
+ const char *symbol;
|
|
+ struct trace_probe tp;
|
|
+};
|
|
+
|
|
+struct symbol_cache {
|
|
+ char *symbol;
|
|
+ long int offset;
|
|
+ long unsigned int addr;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cpu {
|
|
+ struct trace_entry ent;
|
|
+ u32 state;
|
|
+ u32 cpu_id;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_powernv_throttle {
|
|
+ struct trace_entry ent;
|
|
+ int chip_id;
|
|
+ u32 __data_loc_reason;
|
|
+ int pmax;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_pstate_sample {
|
|
+ struct trace_entry ent;
|
|
+ u32 core_busy;
|
|
+ u32 scaled_busy;
|
|
+ u32 from;
|
|
+ u32 to;
|
|
+ u64 mperf;
|
|
+ u64 aperf;
|
|
+ u64 tsc;
|
|
+ u32 freq;
|
|
+ u32 io_boost;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cpu_frequency_limits {
|
|
+ struct trace_entry ent;
|
|
+ u32 min_freq;
|
|
+ u32 max_freq;
|
|
+ u32 cpu_id;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_device_pm_callback_start {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_device;
|
|
+ u32 __data_loc_driver;
|
|
+ u32 __data_loc_parent;
|
|
+ u32 __data_loc_pm_ops;
|
|
+ int event;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_device_pm_callback_end {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_device;
|
|
+ u32 __data_loc_driver;
|
|
+ int error;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_suspend_resume {
|
|
+ struct trace_entry ent;
|
|
+ const char *action;
|
|
+ int val;
|
|
+ bool start;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_wakeup_source {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ u64 state;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_clock {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ u64 state;
|
|
+ u64 cpu_id;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_power_domain {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ u64 state;
|
|
+ u64 cpu_id;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_pm_qos_request {
|
|
+ struct trace_entry ent;
|
|
+ int pm_qos_class;
|
|
+ s32 value;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_pm_qos_update_request_timeout {
|
|
+ struct trace_entry ent;
|
|
+ int pm_qos_class;
|
|
+ s32 value;
|
|
+ long unsigned int timeout_us;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_pm_qos_update {
|
|
+ struct trace_entry ent;
|
|
+ enum pm_qos_req_action action;
|
|
+ int prev_value;
|
|
+ int curr_value;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dev_pm_qos_request {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ enum dev_pm_qos_req_type type;
|
|
+ s32 new_value;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_cpu {};
|
|
+
|
|
+struct trace_event_data_offsets_powernv_throttle {
|
|
+ u32 reason;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_pstate_sample {};
|
|
+
|
|
+struct trace_event_data_offsets_cpu_frequency_limits {};
|
|
+
|
|
+struct trace_event_data_offsets_device_pm_callback_start {
|
|
+ u32 device;
|
|
+ u32 driver;
|
|
+ u32 parent;
|
|
+ u32 pm_ops;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_device_pm_callback_end {
|
|
+ u32 device;
|
|
+ u32 driver;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_suspend_resume {};
|
|
+
|
|
+struct trace_event_data_offsets_wakeup_source {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_clock {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_power_domain {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_pm_qos_request {};
|
|
+
|
|
+struct trace_event_data_offsets_pm_qos_update_request_timeout {};
|
|
+
|
|
+struct trace_event_data_offsets_pm_qos_update {};
|
|
+
|
|
+struct trace_event_data_offsets_dev_pm_qos_request {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rpm_internal {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ int flags;
|
|
+ int usage_count;
|
|
+ int disable_depth;
|
|
+ int runtime_auto;
|
|
+ int request_pending;
|
|
+ int irq_safe;
|
|
+ int child_count;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rpm_return_int {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ long unsigned int ip;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_rpm_internal {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_rpm_return_int {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct deref_fetch_param {
|
|
+ struct fetch_param orig;
|
|
+ long int offset;
|
|
+ fetch_func_t fetch;
|
|
+ fetch_func_t fetch_size;
|
|
+};
|
|
+
|
|
+struct bitfield_fetch_param {
|
|
+ struct fetch_param orig;
|
|
+ unsigned char hi_shift;
|
|
+ unsigned char low_shift;
|
|
+};
|
|
+
|
|
+enum uprobe_filter_ctx {
|
|
+ UPROBE_FILTER_REGISTER = 0,
|
|
+ UPROBE_FILTER_UNREGISTER = 1,
|
|
+ UPROBE_FILTER_MMAP = 2,
|
|
+};
|
|
+
|
|
+struct uprobe_consumer {
|
|
+ int (*handler)(struct uprobe_consumer *, struct pt_regs *);
|
|
+ int (*ret_handler)(struct uprobe_consumer *, long unsigned int, struct pt_regs *);
|
|
+ bool (*filter)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *);
|
|
+ struct uprobe_consumer *next;
|
|
+};
|
|
+
|
|
+struct uprobe_trace_entry_head {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int vaddr[0];
|
|
+};
|
|
+
|
|
+struct trace_uprobe_filter {
|
|
+ rwlock_t rwlock;
|
|
+ int nr_systemwide;
|
|
+ struct list_head perf_events;
|
|
+};
|
|
+
|
|
+struct trace_uprobe {
|
|
+ struct list_head list;
|
|
+ struct trace_uprobe_filter filter;
|
|
+ struct uprobe_consumer consumer;
|
|
+ struct path path;
|
|
+ struct inode *inode;
|
|
+ char *filename;
|
|
+ long unsigned int offset;
|
|
+ long unsigned int nhit;
|
|
+ struct trace_probe tp;
|
|
+};
|
|
+
|
|
+struct uprobe_dispatch_data {
|
|
+ struct trace_uprobe *tu;
|
|
+ long unsigned int bp_addr;
|
|
+};
|
|
+
|
|
+struct uprobe_cpu_buffer {
|
|
+ struct mutex mutex;
|
|
+ void *buf;
|
|
+};
|
|
+
|
|
+typedef bool (*filter_func_t)(struct uprobe_consumer *, enum uprobe_filter_ctx, struct mm_struct *);
|
|
+
|
|
+enum xdp_action {
|
|
+ XDP_ABORTED = 0,
|
|
+ XDP_DROP = 1,
|
|
+ XDP_PASS = 2,
|
|
+ XDP_TX = 3,
|
|
+ XDP_REDIRECT = 4,
|
|
+};
|
|
+
|
|
+typedef void (*bpf_jit_fill_hole_t)(void *, unsigned int);
|
|
+
|
|
+struct bpf_prog_dummy {
|
|
+ struct bpf_prog prog;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xdp_exception {
|
|
+ struct trace_entry ent;
|
|
+ int prog_id;
|
|
+ u32 act;
|
|
+ int ifindex;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xdp_redirect_template {
|
|
+ struct trace_entry ent;
|
|
+ int prog_id;
|
|
+ u32 act;
|
|
+ int ifindex;
|
|
+ int err;
|
|
+ int to_ifindex;
|
|
+ u32 map_id;
|
|
+ int map_index;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xdp_cpumap_kthread {
|
|
+ struct trace_entry ent;
|
|
+ int map_id;
|
|
+ u32 act;
|
|
+ int cpu;
|
|
+ unsigned int drops;
|
|
+ unsigned int processed;
|
|
+ int sched;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xdp_cpumap_enqueue {
|
|
+ struct trace_entry ent;
|
|
+ int map_id;
|
|
+ u32 act;
|
|
+ int cpu;
|
|
+ unsigned int drops;
|
|
+ unsigned int processed;
|
|
+ int to_cpu;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xdp_devmap_xmit {
|
|
+ struct trace_entry ent;
|
|
+ int map_id;
|
|
+ u32 act;
|
|
+ u32 map_index;
|
|
+ int drops;
|
|
+ int sent;
|
|
+ int from_ifindex;
|
|
+ int to_ifindex;
|
|
+ int err;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_xdp_exception {};
|
|
+
|
|
+struct trace_event_data_offsets_xdp_redirect_template {};
|
|
+
|
|
+struct trace_event_data_offsets_xdp_cpumap_kthread {};
|
|
+
|
|
+struct trace_event_data_offsets_xdp_cpumap_enqueue {};
|
|
+
|
|
+struct trace_event_data_offsets_xdp_devmap_xmit {};
|
|
+
|
|
+enum bpf_cmd {
|
|
+ BPF_MAP_CREATE = 0,
|
|
+ BPF_MAP_LOOKUP_ELEM = 1,
|
|
+ BPF_MAP_UPDATE_ELEM = 2,
|
|
+ BPF_MAP_DELETE_ELEM = 3,
|
|
+ BPF_MAP_GET_NEXT_KEY = 4,
|
|
+ BPF_PROG_LOAD = 5,
|
|
+ BPF_OBJ_PIN = 6,
|
|
+ BPF_OBJ_GET = 7,
|
|
+ BPF_PROG_ATTACH = 8,
|
|
+ BPF_PROG_DETACH = 9,
|
|
+ BPF_PROG_TEST_RUN = 10,
|
|
+ BPF_PROG_GET_NEXT_ID = 11,
|
|
+ BPF_MAP_GET_NEXT_ID = 12,
|
|
+ BPF_PROG_GET_FD_BY_ID = 13,
|
|
+ BPF_MAP_GET_FD_BY_ID = 14,
|
|
+ BPF_OBJ_GET_INFO_BY_FD = 15,
|
|
+ BPF_PROG_QUERY = 16,
|
|
+ BPF_RAW_TRACEPOINT_OPEN = 17,
|
|
+ BPF_BTF_LOAD = 18,
|
|
+ BPF_BTF_GET_FD_BY_ID = 19,
|
|
+ BPF_TASK_FD_QUERY = 20,
|
|
+};
|
|
+
|
|
+struct bpf_prog_info {
|
|
+ __u32 type;
|
|
+ __u32 id;
|
|
+ __u8 tag[8];
|
|
+ __u32 jited_prog_len;
|
|
+ __u32 xlated_prog_len;
|
|
+ __u64 jited_prog_insns;
|
|
+ __u64 xlated_prog_insns;
|
|
+ __u64 load_time;
|
|
+ __u32 created_by_uid;
|
|
+ __u32 nr_map_ids;
|
|
+ __u64 map_ids;
|
|
+ char name[16];
|
|
+ __u32 ifindex;
|
|
+ __u32 gpl_compatible: 1;
|
|
+ __u64 netns_dev;
|
|
+ __u64 netns_ino;
|
|
+ __u32 nr_jited_ksyms;
|
|
+ __u32 nr_jited_func_lens;
|
|
+ __u64 jited_ksyms;
|
|
+ __u64 jited_func_lens;
|
|
+};
|
|
+
|
|
+struct bpf_map_info {
|
|
+ __u32 type;
|
|
+ __u32 id;
|
|
+ __u32 key_size;
|
|
+ __u32 value_size;
|
|
+ __u32 max_entries;
|
|
+ __u32 map_flags;
|
|
+ char name[16];
|
|
+ __u32 ifindex;
|
|
+ __u64 netns_dev;
|
|
+ __u64 netns_ino;
|
|
+ __u32 btf_id;
|
|
+ __u32 btf_key_type_id;
|
|
+ __u32 btf_value_type_id;
|
|
+};
|
|
+
|
|
+struct bpf_btf_info {
|
|
+ __u64 btf;
|
|
+ __u32 btf_size;
|
|
+ __u32 id;
|
|
+};
|
|
+
|
|
+struct btf_header {
|
|
+ __u16 magic;
|
|
+ __u8 version;
|
|
+ __u8 flags;
|
|
+ __u32 hdr_len;
|
|
+ __u32 type_off;
|
|
+ __u32 type_len;
|
|
+ __u32 str_off;
|
|
+ __u32 str_len;
|
|
+};
|
|
+
|
|
+struct btf {
|
|
+ void *data;
|
|
+ struct btf_type **types;
|
|
+ u32 *resolved_ids;
|
|
+ u32 *resolved_sizes;
|
|
+ const char *strings;
|
|
+ void *nohdr_data;
|
|
+ struct btf_header hdr;
|
|
+ u32 nr_types;
|
|
+ u32 types_size;
|
|
+ u32 data_size;
|
|
+ refcount_t refcnt;
|
|
+ u32 id;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct bpf_raw_tracepoint {
|
|
+ struct bpf_raw_event_map *btp;
|
|
+ struct bpf_prog *prog;
|
|
+};
|
|
+
|
|
+struct bpf_verifier_log {
|
|
+ u32 level;
|
|
+ char kbuf[1024];
|
|
+ char *ubuf;
|
|
+ u32 len_used;
|
|
+ u32 len_total;
|
|
+};
|
|
+
|
|
+struct bpf_subprog_info {
|
|
+ u32 start;
|
|
+ u16 stack_depth;
|
|
+};
|
|
+
|
|
+struct bpf_id_pair {
|
|
+ u32 old;
|
|
+ u32 cur;
|
|
+};
|
|
+
|
|
+struct bpf_verifier_stack_elem;
|
|
+
|
|
+struct bpf_verifier_state;
|
|
+
|
|
+struct bpf_verifier_state_list;
|
|
+
|
|
+struct bpf_insn_aux_data;
|
|
+
|
|
+struct bpf_verifier_env {
|
|
+ u32 insn_idx;
|
|
+ u32 prev_insn_idx;
|
|
+ struct bpf_prog *prog;
|
|
+ const struct bpf_verifier_ops *ops;
|
|
+ struct bpf_verifier_stack_elem *head;
|
|
+ int stack_size;
|
|
+ bool strict_alignment;
|
|
+ struct bpf_verifier_state *cur_state;
|
|
+ struct bpf_verifier_state_list **explored_states;
|
|
+ struct bpf_map *used_maps[64];
|
|
+ u32 used_map_cnt;
|
|
+ u32 id_gen;
|
|
+ bool explore_alu_limits;
|
|
+ bool allow_ptr_leaks;
|
|
+ bool seen_direct_write;
|
|
+ struct bpf_insn_aux_data *insn_aux_data;
|
|
+ struct bpf_verifier_log log;
|
|
+ struct bpf_subprog_info subprog_info[257];
|
|
+ struct bpf_id_pair idmap_scratch[75];
|
|
+ u32 subprog_cnt;
|
|
+};
|
|
+
|
|
+struct tnum {
|
|
+ u64 value;
|
|
+ u64 mask;
|
|
+};
|
|
+
|
|
+enum bpf_reg_liveness {
|
|
+ REG_LIVE_NONE = 0,
|
|
+ REG_LIVE_READ = 1,
|
|
+ REG_LIVE_WRITTEN = 2,
|
|
+};
|
|
+
|
|
+struct bpf_reg_state {
|
|
+ enum bpf_reg_type type;
|
|
+ union {
|
|
+ u16 range;
|
|
+ struct bpf_map *map_ptr;
|
|
+ long unsigned int raw;
|
|
+ };
|
|
+ s32 off;
|
|
+ u32 id;
|
|
+ struct tnum var_off;
|
|
+ s64 smin_value;
|
|
+ s64 smax_value;
|
|
+ u64 umin_value;
|
|
+ u64 umax_value;
|
|
+ struct bpf_reg_state *parent;
|
|
+ u32 frameno;
|
|
+ enum bpf_reg_liveness live;
|
|
+};
|
|
+
|
|
+enum bpf_stack_slot_type {
|
|
+ STACK_INVALID = 0,
|
|
+ STACK_SPILL = 1,
|
|
+ STACK_MISC = 2,
|
|
+ STACK_ZERO = 3,
|
|
+};
|
|
+
|
|
+struct bpf_stack_state {
|
|
+ struct bpf_reg_state spilled_ptr;
|
|
+ u8 slot_type[8];
|
|
+};
|
|
+
|
|
+struct bpf_func_state {
|
|
+ struct bpf_reg_state regs[11];
|
|
+ int callsite;
|
|
+ u32 frameno;
|
|
+ u32 subprogno;
|
|
+ int allocated_stack;
|
|
+ struct bpf_stack_state *stack;
|
|
+};
|
|
+
|
|
+struct bpf_verifier_state {
|
|
+ struct bpf_func_state *frame[8];
|
|
+ u32 curframe;
|
|
+ bool speculative;
|
|
+};
|
|
+
|
|
+struct bpf_verifier_state_list {
|
|
+ struct bpf_verifier_state state;
|
|
+ struct bpf_verifier_state_list *next;
|
|
+};
|
|
+
|
|
+struct bpf_insn_aux_data {
|
|
+ union {
|
|
+ enum bpf_reg_type ptr_type;
|
|
+ long unsigned int map_state;
|
|
+ s32 call_imm;
|
|
+ u32 alu_limit;
|
|
+ };
|
|
+ int ctx_field_size;
|
|
+ bool seen;
|
|
+ bool sanitize_stack_spill;
|
|
+ u8 alu_state;
|
|
+};
|
|
+
|
|
+struct bpf_verifier_stack_elem {
|
|
+ struct bpf_verifier_state st;
|
|
+ int insn_idx;
|
|
+ int prev_insn_idx;
|
|
+ struct bpf_verifier_stack_elem *next;
|
|
+};
|
|
+
|
|
+typedef void (*bpf_insn_print_t)(void *, const char *, ...);
|
|
+
|
|
+typedef const char * (*bpf_insn_revmap_call_t)(void *, const struct bpf_insn *);
|
|
+
|
|
+typedef const char * (*bpf_insn_print_imm_t)(void *, const struct bpf_insn *, __u64);
|
|
+
|
|
+struct bpf_insn_cbs {
|
|
+ bpf_insn_print_t cb_print;
|
|
+ bpf_insn_revmap_call_t cb_call;
|
|
+ bpf_insn_print_imm_t cb_imm;
|
|
+ void *private_data;
|
|
+};
|
|
+
|
|
+struct bpf_call_arg_meta {
|
|
+ struct bpf_map *map_ptr;
|
|
+ bool raw_mode;
|
|
+ bool pkt_access;
|
|
+ int regno;
|
|
+ int access_size;
|
|
+ u64 msize_max_value;
|
|
+};
|
|
+
|
|
+enum reg_arg_type {
|
|
+ SRC_OP = 0,
|
|
+ DST_OP = 1,
|
|
+ DST_OP_NO_MARK = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ REASON_BOUNDS = -1,
|
|
+ REASON_TYPE = -2,
|
|
+ REASON_PATHS = -3,
|
|
+ REASON_LIMIT = -4,
|
|
+ REASON_STACK = -5,
|
|
+};
|
|
+
|
|
+struct bpf_sanitize_info {
|
|
+ struct bpf_insn_aux_data aux;
|
|
+ bool mask_to_left;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DISCOVERED = 16,
|
|
+ EXPLORED = 32,
|
|
+ FALLTHROUGH = 1,
|
|
+ BRANCH = 2,
|
|
+};
|
|
+
|
|
+struct tree_descr {
|
|
+ const char *name;
|
|
+ const struct file_operations *ops;
|
|
+ int mode;
|
|
+};
|
|
+
|
|
+struct match_token {
|
|
+ int token;
|
|
+ const char *pattern;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MAX_OPT_ARGS = 3,
|
|
+};
|
|
+
|
|
+enum bpf_type {
|
|
+ BPF_TYPE_UNSPEC = 0,
|
|
+ BPF_TYPE_PROG = 1,
|
|
+ BPF_TYPE_MAP = 2,
|
|
+};
|
|
+
|
|
+struct map_iter {
|
|
+ void *key;
|
|
+ bool done;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ OPT_MODE = 0,
|
|
+ OPT_ERR = 1,
|
|
+};
|
|
+
|
|
+struct bpf_mount_opts {
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+struct pcpu_freelist_node;
|
|
+
|
|
+struct pcpu_freelist_head {
|
|
+ struct pcpu_freelist_node *first;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct pcpu_freelist_node {
|
|
+ struct pcpu_freelist_node *next;
|
|
+};
|
|
+
|
|
+struct pcpu_freelist {
|
|
+ struct pcpu_freelist_head *freelist;
|
|
+};
|
|
+
|
|
+struct bpf_lru_node {
|
|
+ struct list_head list;
|
|
+ u16 cpu;
|
|
+ u8 type;
|
|
+ u8 ref;
|
|
+};
|
|
+
|
|
+struct bpf_lru_list {
|
|
+ struct list_head lists[3];
|
|
+ unsigned int counts[2];
|
|
+ struct list_head *next_inactive_rotation;
|
|
+ raw_spinlock_t lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bpf_lru_locallist {
|
|
+ struct list_head lists[2];
|
|
+ u16 next_steal;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct bpf_common_lru {
|
|
+ struct bpf_lru_list lru_list;
|
|
+ struct bpf_lru_locallist *local_list;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+typedef bool (*del_from_htab_func)(void *, struct bpf_lru_node *);
|
|
+
|
|
+struct bpf_lru {
|
|
+ union {
|
|
+ struct bpf_common_lru common_lru;
|
|
+ struct bpf_lru_list *percpu_lru;
|
|
+ };
|
|
+ del_from_htab_func del_from_htab;
|
|
+ void *del_arg;
|
|
+ unsigned int hash_offset;
|
|
+ unsigned int nr_scans;
|
|
+ bool percpu;
|
|
+ long: 56;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bucket {
|
|
+ struct hlist_nulls_head head;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct htab_elem;
|
|
+
|
|
+struct bpf_htab {
|
|
+ struct bpf_map map;
|
|
+ struct bucket *buckets;
|
|
+ void *elems;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ union {
|
|
+ struct pcpu_freelist freelist;
|
|
+ struct bpf_lru lru;
|
|
+ };
|
|
+ struct htab_elem **extra_elems;
|
|
+ atomic_t count;
|
|
+ u32 n_buckets;
|
|
+ u32 elem_size;
|
|
+ u32 hashrnd;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct htab_elem {
|
|
+ union {
|
|
+ struct hlist_nulls_node hash_node;
|
|
+ struct {
|
|
+ void *padding;
|
|
+ union {
|
|
+ struct bpf_htab *htab;
|
|
+ struct pcpu_freelist_node fnode;
|
|
+ };
|
|
+ };
|
|
+ };
|
|
+ union {
|
|
+ struct callback_head rcu;
|
|
+ struct bpf_lru_node lru_node;
|
|
+ };
|
|
+ u32 hash;
|
|
+ int: 32;
|
|
+ char key[0];
|
|
+};
|
|
+
|
|
+enum bpf_lru_list_type {
|
|
+ BPF_LRU_LIST_T_ACTIVE = 0,
|
|
+ BPF_LRU_LIST_T_INACTIVE = 1,
|
|
+ BPF_LRU_LIST_T_FREE = 2,
|
|
+ BPF_LRU_LOCAL_LIST_T_FREE = 3,
|
|
+ BPF_LRU_LOCAL_LIST_T_PENDING = 4,
|
|
+};
|
|
+
|
|
+struct bpf_lpm_trie_key {
|
|
+ __u32 prefixlen;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct lpm_trie_node {
|
|
+ struct callback_head rcu;
|
|
+ struct lpm_trie_node *child[2];
|
|
+ u32 prefixlen;
|
|
+ u32 flags;
|
|
+ u8 data[0];
|
|
+};
|
|
+
|
|
+struct lpm_trie {
|
|
+ struct bpf_map map;
|
|
+ struct lpm_trie_node *root;
|
|
+ size_t n_entries;
|
|
+ size_t max_prefixlen;
|
|
+ size_t data_size;
|
|
+ raw_spinlock_t lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bpf_cgroup_storage_map {
|
|
+ struct bpf_map map;
|
|
+ spinlock_t lock;
|
|
+ struct bpf_prog *prog;
|
|
+ struct rb_root root;
|
|
+ struct list_head list;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct btf_enum {
|
|
+ __u32 name_off;
|
|
+ __s32 val;
|
|
+};
|
|
+
|
|
+struct btf_array {
|
|
+ __u32 type;
|
|
+ __u32 index_type;
|
|
+ __u32 nelems;
|
|
+};
|
|
+
|
|
+struct btf_member {
|
|
+ __u32 name_off;
|
|
+ __u32 type;
|
|
+ __u32 offset;
|
|
+};
|
|
+
|
|
+enum verifier_phase {
|
|
+ CHECK_META = 0,
|
|
+ CHECK_TYPE = 1,
|
|
+};
|
|
+
|
|
+struct resolve_vertex {
|
|
+ const struct btf_type *t;
|
|
+ u32 type_id;
|
|
+ u16 next_member;
|
|
+};
|
|
+
|
|
+enum visit_state {
|
|
+ NOT_VISITED = 0,
|
|
+ VISITED = 1,
|
|
+ RESOLVED = 2,
|
|
+};
|
|
+
|
|
+enum resolve_mode {
|
|
+ RESOLVE_TBD = 0,
|
|
+ RESOLVE_PTR = 1,
|
|
+ RESOLVE_STRUCT_OR_ARRAY = 2,
|
|
+};
|
|
+
|
|
+struct btf_sec_info {
|
|
+ u32 off;
|
|
+ u32 len;
|
|
+};
|
|
+
|
|
+struct btf_verifier_env {
|
|
+ struct btf *btf;
|
|
+ u8 *visit_states;
|
|
+ struct resolve_vertex stack[32];
|
|
+ struct bpf_verifier_log log;
|
|
+ u32 log_type_id;
|
|
+ u32 top_stack;
|
|
+ enum verifier_phase phase;
|
|
+ enum resolve_mode resolve_mode;
|
|
+};
|
|
+
|
|
+struct btf_kind_operations {
|
|
+ s32 (*check_meta)(struct btf_verifier_env *, const struct btf_type *, u32);
|
|
+ int (*resolve)(struct btf_verifier_env *, const struct resolve_vertex *);
|
|
+ int (*check_member)(struct btf_verifier_env *, const struct btf_type *, const struct btf_member *, const struct btf_type *);
|
|
+ void (*log_details)(struct btf_verifier_env *, const struct btf_type *);
|
|
+ void (*seq_show)(const struct btf *, const struct btf_type *, u32, void *, u8, struct seq_file *);
|
|
+};
|
|
+
|
|
+enum xdp_mem_type {
|
|
+ MEM_TYPE_PAGE_SHARED = 0,
|
|
+ MEM_TYPE_PAGE_ORDER0 = 1,
|
|
+ MEM_TYPE_PAGE_POOL = 2,
|
|
+ MEM_TYPE_ZERO_COPY = 3,
|
|
+ MEM_TYPE_MAX = 4,
|
|
+};
|
|
+
|
|
+struct xdp_buff {
|
|
+ void *data;
|
|
+ void *data_end;
|
|
+ void *data_meta;
|
|
+ void *data_hard_start;
|
|
+ long unsigned int handle;
|
|
+ struct xdp_rxq_info *rxq;
|
|
+};
|
|
+
|
|
+enum net_device_flags {
|
|
+ IFF_UP = 1,
|
|
+ IFF_BROADCAST = 2,
|
|
+ IFF_DEBUG = 4,
|
|
+ IFF_LOOPBACK = 8,
|
|
+ IFF_POINTOPOINT = 16,
|
|
+ IFF_NOTRAILERS = 32,
|
|
+ IFF_RUNNING = 64,
|
|
+ IFF_NOARP = 128,
|
|
+ IFF_PROMISC = 256,
|
|
+ IFF_ALLMULTI = 512,
|
|
+ IFF_MASTER = 1024,
|
|
+ IFF_SLAVE = 2048,
|
|
+ IFF_MULTICAST = 4096,
|
|
+ IFF_PORTSEL = 8192,
|
|
+ IFF_AUTOMEDIA = 16384,
|
|
+ IFF_DYNAMIC = 32768,
|
|
+ IFF_LOWER_UP = 65536,
|
|
+ IFF_DORMANT = 131072,
|
|
+ IFF_ECHO = 262144,
|
|
+};
|
|
+
|
|
+enum netdev_cmd {
|
|
+ NETDEV_UP = 1,
|
|
+ NETDEV_DOWN = 2,
|
|
+ NETDEV_REBOOT = 3,
|
|
+ NETDEV_CHANGE = 4,
|
|
+ NETDEV_REGISTER = 5,
|
|
+ NETDEV_UNREGISTER = 6,
|
|
+ NETDEV_CHANGEMTU = 7,
|
|
+ NETDEV_CHANGEADDR = 8,
|
|
+ NETDEV_GOING_DOWN = 9,
|
|
+ NETDEV_CHANGENAME = 10,
|
|
+ NETDEV_FEAT_CHANGE = 11,
|
|
+ NETDEV_BONDING_FAILOVER = 12,
|
|
+ NETDEV_PRE_UP = 13,
|
|
+ NETDEV_PRE_TYPE_CHANGE = 14,
|
|
+ NETDEV_POST_TYPE_CHANGE = 15,
|
|
+ NETDEV_POST_INIT = 16,
|
|
+ NETDEV_RELEASE = 17,
|
|
+ NETDEV_NOTIFY_PEERS = 18,
|
|
+ NETDEV_JOIN = 19,
|
|
+ NETDEV_CHANGEUPPER = 20,
|
|
+ NETDEV_RESEND_IGMP = 21,
|
|
+ NETDEV_PRECHANGEMTU = 22,
|
|
+ NETDEV_CHANGEINFODATA = 23,
|
|
+ NETDEV_BONDING_INFO = 24,
|
|
+ NETDEV_PRECHANGEUPPER = 25,
|
|
+ NETDEV_CHANGELOWERSTATE = 26,
|
|
+ NETDEV_UDP_TUNNEL_PUSH_INFO = 27,
|
|
+ NETDEV_UDP_TUNNEL_DROP_INFO = 28,
|
|
+ NETDEV_CHANGE_TX_QUEUE_LEN = 29,
|
|
+ NETDEV_CVLAN_FILTER_PUSH_INFO = 30,
|
|
+ NETDEV_CVLAN_FILTER_DROP_INFO = 31,
|
|
+ NETDEV_SVLAN_FILTER_PUSH_INFO = 32,
|
|
+ NETDEV_SVLAN_FILTER_DROP_INFO = 33,
|
|
+};
|
|
+
|
|
+struct netdev_notifier_info {
|
|
+ struct net_device *dev;
|
|
+ struct netlink_ext_ack *extack;
|
|
+};
|
|
+
|
|
+struct xdp_bulk_queue {
|
|
+ struct xdp_frame *q[16];
|
|
+ struct net_device *dev_rx;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct bpf_dtab;
|
|
+
|
|
+struct bpf_dtab_netdev {
|
|
+ struct net_device *dev;
|
|
+ struct bpf_dtab *dtab;
|
|
+ unsigned int bit;
|
|
+ struct xdp_bulk_queue *bulkq;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct bpf_dtab {
|
|
+ struct bpf_map map;
|
|
+ struct bpf_dtab_netdev **netdev_map;
|
|
+ long unsigned int *flush_needed;
|
|
+ struct list_head list;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct skb_frag_struct {
|
|
+ struct {
|
|
+ struct page *p;
|
|
+ } page;
|
|
+ __u32 page_offset;
|
|
+ __u32 size;
|
|
+};
|
|
+
|
|
+typedef struct skb_frag_struct skb_frag_t;
|
|
+
|
|
+struct skb_shared_hwtstamps {
|
|
+ ktime_t hwtstamp;
|
|
+};
|
|
+
|
|
+struct skb_shared_info {
|
|
+ __u8 __unused;
|
|
+ __u8 meta_len;
|
|
+ __u8 nr_frags;
|
|
+ __u8 tx_flags;
|
|
+ short unsigned int gso_size;
|
|
+ short unsigned int gso_segs;
|
|
+ struct sk_buff *frag_list;
|
|
+ struct skb_shared_hwtstamps hwtstamps;
|
|
+ unsigned int gso_type;
|
|
+ u32 tskey;
|
|
+ atomic_t dataref;
|
|
+ void *destructor_arg;
|
|
+ skb_frag_t frags[17];
|
|
+};
|
|
+
|
|
+struct ptr_ring {
|
|
+ int producer;
|
|
+ spinlock_t producer_lock;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ int consumer_head;
|
|
+ int consumer_tail;
|
|
+ spinlock_t consumer_lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ int size;
|
|
+ int batch;
|
|
+ void **queue;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct xdp_bulk_queue___2 {
|
|
+ void *q[8];
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct bpf_cpu_map_entry {
|
|
+ u32 cpu;
|
|
+ int map_id;
|
|
+ u32 qsize;
|
|
+ struct xdp_bulk_queue___2 *bulkq;
|
|
+ struct ptr_ring *queue;
|
|
+ struct task_struct *kthread;
|
|
+ struct work_struct kthread_stop_wq;
|
|
+ atomic_t refcnt;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct bpf_cpu_map {
|
|
+ struct bpf_map map;
|
|
+ struct bpf_cpu_map_entry **cpu_map;
|
|
+ long unsigned int *flush_needed;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct xdp_umem_props {
|
|
+ u64 chunk_mask;
|
|
+ u64 size;
|
|
+};
|
|
+
|
|
+struct xsk_queue;
|
|
+
|
|
+struct xdp_umem_page;
|
|
+
|
|
+struct xdp_umem {
|
|
+ struct xsk_queue *fq;
|
|
+ struct xsk_queue *cq;
|
|
+ struct xdp_umem_page *pages;
|
|
+ struct xdp_umem_props props;
|
|
+ u32 headroom;
|
|
+ u32 chunk_size_nohr;
|
|
+ struct user_struct *user;
|
|
+ long unsigned int address;
|
|
+ refcount_t users;
|
|
+ struct work_struct work;
|
|
+ struct page **pgs;
|
|
+ u32 npgs;
|
|
+ struct net_device *dev;
|
|
+ u16 queue_id;
|
|
+ bool zc;
|
|
+ spinlock_t xsk_list_lock;
|
|
+ struct list_head xsk_list;
|
|
+};
|
|
+
|
|
+struct xdp_umem_page {
|
|
+ void *addr;
|
|
+ dma_addr_t dma;
|
|
+};
|
|
+
|
|
+struct xdp_sock {
|
|
+ struct sock sk;
|
|
+ struct xsk_queue *rx;
|
|
+ struct net_device *dev;
|
|
+ struct xdp_umem *umem;
|
|
+ struct list_head flush_node;
|
|
+ u16 queue_id;
|
|
+ long: 48;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct xsk_queue *tx;
|
|
+ struct list_head list;
|
|
+ bool zc;
|
|
+ struct mutex mutex;
|
|
+ spinlock_t tx_completion_lock;
|
|
+ u64 rx_dropped;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct xsk_map {
|
|
+ struct bpf_map map;
|
|
+ struct xdp_sock **xsk_map;
|
|
+ struct list_head *flush_list;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct rhlist_head {
|
|
+ struct rhash_head rhead;
|
|
+ struct rhlist_head *next;
|
|
+};
|
|
+
|
|
+struct bucket_table {
|
|
+ unsigned int size;
|
|
+ unsigned int nest;
|
|
+ unsigned int rehash;
|
|
+ u32 hash_rnd;
|
|
+ unsigned int locks_mask;
|
|
+ spinlock_t *locks;
|
|
+ struct list_head walkers;
|
|
+ struct callback_head rcu;
|
|
+ struct bucket_table *future_tbl;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct rhash_head *buckets[0];
|
|
+};
|
|
+
|
|
+struct bpf_offload_dev {
|
|
+ struct list_head netdevs;
|
|
+};
|
|
+
|
|
+struct bpf_offload_netdev {
|
|
+ struct rhash_head l;
|
|
+ struct net_device *netdev;
|
|
+ struct bpf_offload_dev *offdev;
|
|
+ struct list_head progs;
|
|
+ struct list_head maps;
|
|
+ struct list_head offdev_netdevs;
|
|
+};
|
|
+
|
|
+struct ns_get_path_bpf_prog_args {
|
|
+ struct bpf_prog *prog;
|
|
+ struct bpf_prog_info *info;
|
|
+};
|
|
+
|
|
+struct ns_get_path_bpf_map_args {
|
|
+ struct bpf_offloaded_map *offmap;
|
|
+ struct bpf_map_info *info;
|
|
+};
|
|
+
|
|
+enum sk_action {
|
|
+ SK_DROP = 0,
|
|
+ SK_PASS = 1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BPF_SOCK_OPS_VOID = 0,
|
|
+ BPF_SOCK_OPS_TIMEOUT_INIT = 1,
|
|
+ BPF_SOCK_OPS_RWND_INIT = 2,
|
|
+ BPF_SOCK_OPS_TCP_CONNECT_CB = 3,
|
|
+ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4,
|
|
+ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5,
|
|
+ BPF_SOCK_OPS_NEEDS_ECN = 6,
|
|
+ BPF_SOCK_OPS_BASE_RTT = 7,
|
|
+ BPF_SOCK_OPS_RTO_CB = 8,
|
|
+ BPF_SOCK_OPS_RETRANS_CB = 9,
|
|
+ BPF_SOCK_OPS_STATE_CB = 10,
|
|
+ BPF_SOCK_OPS_TCP_LISTEN_CB = 11,
|
|
+};
|
|
+
|
|
+enum sock_type {
|
|
+ SOCK_STREAM = 1,
|
|
+ SOCK_DGRAM = 2,
|
|
+ SOCK_RAW = 3,
|
|
+ SOCK_RDM = 4,
|
|
+ SOCK_SEQPACKET = 5,
|
|
+ SOCK_DCCP = 6,
|
|
+ SOCK_PACKET = 10,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NETIF_F_SG_BIT = 0,
|
|
+ NETIF_F_IP_CSUM_BIT = 1,
|
|
+ __UNUSED_NETIF_F_1 = 2,
|
|
+ NETIF_F_HW_CSUM_BIT = 3,
|
|
+ NETIF_F_IPV6_CSUM_BIT = 4,
|
|
+ NETIF_F_HIGHDMA_BIT = 5,
|
|
+ NETIF_F_FRAGLIST_BIT = 6,
|
|
+ NETIF_F_HW_VLAN_CTAG_TX_BIT = 7,
|
|
+ NETIF_F_HW_VLAN_CTAG_RX_BIT = 8,
|
|
+ NETIF_F_HW_VLAN_CTAG_FILTER_BIT = 9,
|
|
+ NETIF_F_VLAN_CHALLENGED_BIT = 10,
|
|
+ NETIF_F_GSO_BIT = 11,
|
|
+ NETIF_F_LLTX_BIT = 12,
|
|
+ NETIF_F_NETNS_LOCAL_BIT = 13,
|
|
+ NETIF_F_GRO_BIT = 14,
|
|
+ NETIF_F_LRO_BIT = 15,
|
|
+ NETIF_F_GSO_SHIFT = 16,
|
|
+ NETIF_F_TSO_BIT = 16,
|
|
+ NETIF_F_GSO_ROBUST_BIT = 17,
|
|
+ NETIF_F_TSO_ECN_BIT = 18,
|
|
+ NETIF_F_TSO_MANGLEID_BIT = 19,
|
|
+ NETIF_F_TSO6_BIT = 20,
|
|
+ NETIF_F_FSO_BIT = 21,
|
|
+ NETIF_F_GSO_GRE_BIT = 22,
|
|
+ NETIF_F_GSO_GRE_CSUM_BIT = 23,
|
|
+ NETIF_F_GSO_IPXIP4_BIT = 24,
|
|
+ NETIF_F_GSO_IPXIP6_BIT = 25,
|
|
+ NETIF_F_GSO_UDP_TUNNEL_BIT = 26,
|
|
+ NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT = 27,
|
|
+ NETIF_F_GSO_PARTIAL_BIT = 28,
|
|
+ NETIF_F_GSO_TUNNEL_REMCSUM_BIT = 29,
|
|
+ NETIF_F_GSO_SCTP_BIT = 30,
|
|
+ NETIF_F_GSO_ESP_BIT = 31,
|
|
+ NETIF_F_GSO_UDP_BIT = 32,
|
|
+ NETIF_F_GSO_UDP_L4_BIT = 33,
|
|
+ NETIF_F_GSO_LAST = 33,
|
|
+ NETIF_F_FCOE_CRC_BIT = 34,
|
|
+ NETIF_F_SCTP_CRC_BIT = 35,
|
|
+ NETIF_F_FCOE_MTU_BIT = 36,
|
|
+ NETIF_F_NTUPLE_BIT = 37,
|
|
+ NETIF_F_RXHASH_BIT = 38,
|
|
+ NETIF_F_RXCSUM_BIT = 39,
|
|
+ NETIF_F_NOCACHE_COPY_BIT = 40,
|
|
+ NETIF_F_LOOPBACK_BIT = 41,
|
|
+ NETIF_F_RXFCS_BIT = 42,
|
|
+ NETIF_F_RXALL_BIT = 43,
|
|
+ NETIF_F_HW_VLAN_STAG_TX_BIT = 44,
|
|
+ NETIF_F_HW_VLAN_STAG_RX_BIT = 45,
|
|
+ NETIF_F_HW_VLAN_STAG_FILTER_BIT = 46,
|
|
+ NETIF_F_HW_L2FW_DOFFLOAD_BIT = 47,
|
|
+ NETIF_F_HW_TC_BIT = 48,
|
|
+ NETIF_F_HW_ESP_BIT = 49,
|
|
+ NETIF_F_HW_ESP_TX_CSUM_BIT = 50,
|
|
+ NETIF_F_RX_UDP_TUNNEL_PORT_BIT = 51,
|
|
+ NETIF_F_HW_TLS_TX_BIT = 52,
|
|
+ NETIF_F_HW_TLS_RX_BIT = 53,
|
|
+ NETIF_F_GRO_HW_BIT = 54,
|
|
+ NETIF_F_HW_TLS_RECORD_BIT = 55,
|
|
+ NETDEV_FEATURE_COUNT = 56,
|
|
+};
|
|
+
|
|
+struct inet_listen_hashbucket {
|
|
+ spinlock_t lock;
|
|
+ unsigned int count;
|
|
+ union {
|
|
+ struct hlist_head head;
|
|
+ struct hlist_nulls_head nulls_head;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct inet_ehash_bucket;
|
|
+
|
|
+struct inet_bind_hashbucket;
|
|
+
|
|
+struct inet_hashinfo {
|
|
+ struct inet_ehash_bucket *ehash;
|
|
+ spinlock_t *ehash_locks;
|
|
+ unsigned int ehash_mask;
|
|
+ unsigned int ehash_locks_mask;
|
|
+ struct kmem_cache *bind_bucket_cachep;
|
|
+ struct inet_bind_hashbucket *bhash;
|
|
+ unsigned int bhash_size;
|
|
+ unsigned int lhash2_mask;
|
|
+ struct inet_listen_hashbucket *lhash2;
|
|
+ long: 64;
|
|
+ struct inet_listen_hashbucket listening_hash[32];
|
|
+};
|
|
+
|
|
+struct ip_ra_chain {
|
|
+ struct ip_ra_chain *next;
|
|
+ struct sock *sk;
|
|
+ union {
|
|
+ void (*destructor)(struct sock *);
|
|
+ struct sock *saved_sk;
|
|
+ };
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct fib_table {
|
|
+ struct hlist_node tb_hlist;
|
|
+ u32 tb_id;
|
|
+ int tb_num_default;
|
|
+ struct callback_head rcu;
|
|
+ long unsigned int *tb_data;
|
|
+ long unsigned int __data[0];
|
|
+};
|
|
+
|
|
+struct inet_peer_base {
|
|
+ struct rb_root rb_root;
|
|
+ seqlock_t lock;
|
|
+ int total;
|
|
+};
|
|
+
|
|
+struct tcp_fastopen_context {
|
|
+ struct crypto_cipher *tfm;
|
|
+ __u8 key[16];
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IPPROTO_IP = 0,
|
|
+ IPPROTO_ICMP = 1,
|
|
+ IPPROTO_IGMP = 2,
|
|
+ IPPROTO_IPIP = 4,
|
|
+ IPPROTO_TCP = 6,
|
|
+ IPPROTO_EGP = 8,
|
|
+ IPPROTO_PUP = 12,
|
|
+ IPPROTO_UDP = 17,
|
|
+ IPPROTO_IDP = 22,
|
|
+ IPPROTO_TP = 29,
|
|
+ IPPROTO_DCCP = 33,
|
|
+ IPPROTO_IPV6 = 41,
|
|
+ IPPROTO_RSVP = 46,
|
|
+ IPPROTO_GRE = 47,
|
|
+ IPPROTO_ESP = 50,
|
|
+ IPPROTO_AH = 51,
|
|
+ IPPROTO_MTP = 92,
|
|
+ IPPROTO_BEETPH = 94,
|
|
+ IPPROTO_ENCAP = 98,
|
|
+ IPPROTO_PIM = 103,
|
|
+ IPPROTO_COMP = 108,
|
|
+ IPPROTO_SCTP = 132,
|
|
+ IPPROTO_UDPLITE = 136,
|
|
+ IPPROTO_MPLS = 137,
|
|
+ IPPROTO_RAW = 255,
|
|
+ IPPROTO_MAX = 256,
|
|
+};
|
|
+
|
|
+struct sk_msg_buff {
|
|
+ void *data;
|
|
+ void *data_end;
|
|
+ __u32 apply_bytes;
|
|
+ __u32 cork_bytes;
|
|
+ int sg_copybreak;
|
|
+ int sg_start;
|
|
+ int sg_curr;
|
|
+ int sg_end;
|
|
+ struct scatterlist sg_data[17];
|
|
+ bool sg_copy[17];
|
|
+ __u32 flags;
|
|
+ struct sock *sk_redir;
|
|
+ struct sock *sk;
|
|
+ struct sk_buff *skb;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct bpf_sock_ops_kern {
|
|
+ struct sock *sk;
|
|
+ u32 op;
|
|
+ union {
|
|
+ u32 args[4];
|
|
+ u32 reply;
|
|
+ u32 replylong[4];
|
|
+ };
|
|
+ u32 is_fullsock;
|
|
+ u64 temp;
|
|
+};
|
|
+
|
|
+struct sock_reuseport {
|
|
+ struct callback_head rcu;
|
|
+ u16 max_socks;
|
|
+ u16 num_socks;
|
|
+ unsigned int synq_overflow_ts;
|
|
+ unsigned int reuseport_id;
|
|
+ unsigned int bind_inany: 1;
|
|
+ unsigned int has_conns: 1;
|
|
+ struct bpf_prog *prog;
|
|
+ struct sock *socks[0];
|
|
+};
|
|
+
|
|
+enum sock_flags {
|
|
+ SOCK_DEAD = 0,
|
|
+ SOCK_DONE = 1,
|
|
+ SOCK_URGINLINE = 2,
|
|
+ SOCK_KEEPOPEN = 3,
|
|
+ SOCK_LINGER = 4,
|
|
+ SOCK_DESTROY = 5,
|
|
+ SOCK_BROADCAST = 6,
|
|
+ SOCK_TIMESTAMP = 7,
|
|
+ SOCK_ZAPPED = 8,
|
|
+ SOCK_USE_WRITE_QUEUE = 9,
|
|
+ SOCK_DBG = 10,
|
|
+ SOCK_RCVTSTAMP = 11,
|
|
+ SOCK_RCVTSTAMPNS = 12,
|
|
+ SOCK_LOCALROUTE = 13,
|
|
+ SOCK_QUEUE_SHRUNK = 14,
|
|
+ SOCK_MEMALLOC = 15,
|
|
+ SOCK_TIMESTAMPING_RX_SOFTWARE = 16,
|
|
+ SOCK_FASYNC = 17,
|
|
+ SOCK_RXQ_OVFL = 18,
|
|
+ SOCK_ZEROCOPY = 19,
|
|
+ SOCK_WIFI_STATUS = 20,
|
|
+ SOCK_NOFCS = 21,
|
|
+ SOCK_FILTER_LOCKED = 22,
|
|
+ SOCK_SELECT_ERR_QUEUE = 23,
|
|
+ SOCK_RCU_FREE = 24,
|
|
+ SOCK_TXTIME = 25,
|
|
+ SOCK_COMP = 26,
|
|
+};
|
|
+
|
|
+struct strp_stats {
|
|
+ long long unsigned int msgs;
|
|
+ long long unsigned int bytes;
|
|
+ unsigned int mem_fail;
|
|
+ unsigned int need_more_hdr;
|
|
+ unsigned int msg_too_big;
|
|
+ unsigned int msg_timeouts;
|
|
+ unsigned int bad_hdr_len;
|
|
+};
|
|
+
|
|
+struct strparser;
|
|
+
|
|
+struct strp_callbacks {
|
|
+ int (*parse_msg)(struct strparser *, struct sk_buff *);
|
|
+ void (*rcv_msg)(struct strparser *, struct sk_buff *);
|
|
+ int (*read_sock_done)(struct strparser *, int);
|
|
+ void (*abort_parser)(struct strparser *, int);
|
|
+ void (*lock)(struct strparser *);
|
|
+ void (*unlock)(struct strparser *);
|
|
+};
|
|
+
|
|
+struct strparser {
|
|
+ struct sock *sk;
|
|
+ u32 stopped: 1;
|
|
+ u32 paused: 1;
|
|
+ u32 aborted: 1;
|
|
+ u32 interrupted: 1;
|
|
+ u32 unrecov_intr: 1;
|
|
+ struct sk_buff **skb_nextp;
|
|
+ struct sk_buff *skb_head;
|
|
+ unsigned int need_bytes;
|
|
+ struct delayed_work msg_timer_work;
|
|
+ struct work_struct work;
|
|
+ struct strp_stats stats;
|
|
+ struct strp_callbacks cb;
|
|
+};
|
|
+
|
|
+struct fastopen_queue {
|
|
+ struct request_sock *rskq_rst_head;
|
|
+ struct request_sock *rskq_rst_tail;
|
|
+ spinlock_t lock;
|
|
+ int qlen;
|
|
+ int max_qlen;
|
|
+ struct tcp_fastopen_context *ctx;
|
|
+};
|
|
+
|
|
+struct request_sock_queue {
|
|
+ spinlock_t rskq_lock;
|
|
+ u8 rskq_defer_accept;
|
|
+ u32 synflood_warned;
|
|
+ atomic_t qlen;
|
|
+ atomic_t young;
|
|
+ struct request_sock *rskq_accept_head;
|
|
+ struct request_sock *rskq_accept_tail;
|
|
+ struct fastopen_queue fastopenq;
|
|
+};
|
|
+
|
|
+struct ip_options {
|
|
+ __be32 faddr;
|
|
+ __be32 nexthop;
|
|
+ unsigned char optlen;
|
|
+ unsigned char srr;
|
|
+ unsigned char rr;
|
|
+ unsigned char ts;
|
|
+ unsigned char is_strictroute: 1;
|
|
+ unsigned char srr_is_hit: 1;
|
|
+ unsigned char is_changed: 1;
|
|
+ unsigned char rr_needaddr: 1;
|
|
+ unsigned char ts_needtime: 1;
|
|
+ unsigned char ts_needaddr: 1;
|
|
+ unsigned char router_alert;
|
|
+ unsigned char cipso;
|
|
+ unsigned char __pad2;
|
|
+ unsigned char __data[0];
|
|
+};
|
|
+
|
|
+struct ip_options_rcu {
|
|
+ struct callback_head rcu;
|
|
+ struct ip_options opt;
|
|
+};
|
|
+
|
|
+struct ipv6_opt_hdr;
|
|
+
|
|
+struct ipv6_rt_hdr;
|
|
+
|
|
+struct ipv6_txoptions {
|
|
+ refcount_t refcnt;
|
|
+ int tot_len;
|
|
+ __u16 opt_flen;
|
|
+ __u16 opt_nflen;
|
|
+ struct ipv6_opt_hdr *hopopt;
|
|
+ struct ipv6_opt_hdr *dst0opt;
|
|
+ struct ipv6_rt_hdr *srcrt;
|
|
+ struct ipv6_opt_hdr *dst1opt;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct inet_cork {
|
|
+ unsigned int flags;
|
|
+ __be32 addr;
|
|
+ struct ip_options *opt;
|
|
+ unsigned int fragsize;
|
|
+ int length;
|
|
+ struct dst_entry *dst;
|
|
+ u8 tx_flags;
|
|
+ __u8 ttl;
|
|
+ __s16 tos;
|
|
+ char priority;
|
|
+ __u16 gso_size;
|
|
+ u64 transmit_time;
|
|
+};
|
|
+
|
|
+struct inet_cork_full {
|
|
+ struct inet_cork base;
|
|
+ struct flowi fl;
|
|
+};
|
|
+
|
|
+struct ipv6_pinfo;
|
|
+
|
|
+struct ip_mc_socklist;
|
|
+
|
|
+struct inet_sock {
|
|
+ struct sock sk;
|
|
+ struct ipv6_pinfo *pinet6;
|
|
+ __be32 inet_saddr;
|
|
+ __s16 uc_ttl;
|
|
+ __u16 cmsg_flags;
|
|
+ __be16 inet_sport;
|
|
+ __u16 inet_id;
|
|
+ struct ip_options_rcu *inet_opt;
|
|
+ int rx_dst_ifindex;
|
|
+ __u8 tos;
|
|
+ __u8 min_ttl;
|
|
+ __u8 mc_ttl;
|
|
+ __u8 pmtudisc;
|
|
+ __u8 recverr: 1;
|
|
+ __u8 is_icsk: 1;
|
|
+ __u8 freebind: 1;
|
|
+ __u8 hdrincl: 1;
|
|
+ __u8 mc_loop: 1;
|
|
+ __u8 transparent: 1;
|
|
+ __u8 mc_all: 1;
|
|
+ __u8 nodefrag: 1;
|
|
+ __u8 bind_address_no_port: 1;
|
|
+ __u8 defer_connect: 1;
|
|
+ __u8 rcv_tos;
|
|
+ __u8 convert_csum;
|
|
+ int uc_index;
|
|
+ int mc_index;
|
|
+ __be32 mc_addr;
|
|
+ struct ip_mc_socklist *mc_list;
|
|
+ struct inet_cork_full cork;
|
|
+};
|
|
+
|
|
+struct in6_pktinfo {
|
|
+ struct in6_addr ipi6_addr;
|
|
+ int ipi6_ifindex;
|
|
+};
|
|
+
|
|
+struct inet6_cork {
|
|
+ struct ipv6_txoptions *opt;
|
|
+ u8 hop_limit;
|
|
+ u8 tclass;
|
|
+};
|
|
+
|
|
+struct ipv6_mc_socklist;
|
|
+
|
|
+struct ipv6_ac_socklist;
|
|
+
|
|
+struct ipv6_fl_socklist;
|
|
+
|
|
+struct ipv6_pinfo {
|
|
+ struct in6_addr saddr;
|
|
+ struct in6_pktinfo sticky_pktinfo;
|
|
+ const struct in6_addr *daddr_cache;
|
|
+ __be32 flow_label;
|
|
+ __u32 frag_size;
|
|
+ __u16 __unused_1: 7;
|
|
+ __s16 hop_limit: 9;
|
|
+ __u16 mc_loop: 1;
|
|
+ __u16 __unused_2: 6;
|
|
+ __s16 mcast_hops: 9;
|
|
+ int ucast_oif;
|
|
+ int mcast_oif;
|
|
+ union {
|
|
+ struct {
|
|
+ __u16 srcrt: 1;
|
|
+ __u16 osrcrt: 1;
|
|
+ __u16 rxinfo: 1;
|
|
+ __u16 rxoinfo: 1;
|
|
+ __u16 rxhlim: 1;
|
|
+ __u16 rxohlim: 1;
|
|
+ __u16 hopopts: 1;
|
|
+ __u16 ohopopts: 1;
|
|
+ __u16 dstopts: 1;
|
|
+ __u16 odstopts: 1;
|
|
+ __u16 rxflow: 1;
|
|
+ __u16 rxtclass: 1;
|
|
+ __u16 rxpmtu: 1;
|
|
+ __u16 rxorigdstaddr: 1;
|
|
+ __u16 recvfragsize: 1;
|
|
+ } bits;
|
|
+ __u16 all;
|
|
+ } rxopt;
|
|
+ __u16 recverr: 1;
|
|
+ __u16 sndflow: 1;
|
|
+ __u16 repflow: 1;
|
|
+ __u16 pmtudisc: 3;
|
|
+ __u16 padding: 1;
|
|
+ __u16 srcprefs: 3;
|
|
+ __u16 dontfrag: 1;
|
|
+ __u16 autoflowlabel: 1;
|
|
+ __u16 autoflowlabel_set: 1;
|
|
+ __u8 min_hopcount;
|
|
+ __u8 tclass;
|
|
+ __be32 rcv_flowinfo;
|
|
+ __u32 dst_cookie;
|
|
+ __u32 rx_dst_cookie;
|
|
+ struct ipv6_mc_socklist *ipv6_mc_list;
|
|
+ struct ipv6_ac_socklist *ipv6_ac_list;
|
|
+ struct ipv6_fl_socklist *ipv6_fl_list;
|
|
+ struct ipv6_txoptions *opt;
|
|
+ struct sk_buff *pktoptions;
|
|
+ struct sk_buff *rxpmtu;
|
|
+ struct inet6_cork cork;
|
|
+};
|
|
+
|
|
+struct inet_connection_sock_af_ops {
|
|
+ int (*queue_xmit)(struct sock *, struct sk_buff *, struct flowi *);
|
|
+ void (*send_check)(struct sock *, struct sk_buff *);
|
|
+ int (*rebuild_header)(struct sock *);
|
|
+ void (*sk_rx_dst_set)(struct sock *, const struct sk_buff *);
|
|
+ int (*conn_request)(struct sock *, struct sk_buff *);
|
|
+ struct sock * (*syn_recv_sock)(const struct sock *, struct sk_buff *, struct request_sock *, struct dst_entry *, struct request_sock *, bool *);
|
|
+ u16 net_header_len;
|
|
+ u16 net_frag_header_len;
|
|
+ u16 sockaddr_len;
|
|
+ int (*setsockopt)(struct sock *, int, int, char *, unsigned int);
|
|
+ int (*getsockopt)(struct sock *, int, int, char *, int *);
|
|
+ int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int);
|
|
+ int (*compat_getsockopt)(struct sock *, int, int, char *, int *);
|
|
+ void (*addr2sockaddr)(struct sock *, struct sockaddr *);
|
|
+ void (*mtu_reduced)(struct sock *);
|
|
+};
|
|
+
|
|
+struct inet_bind_bucket;
|
|
+
|
|
+struct tcp_ulp_ops;
|
|
+
|
|
+struct inet_connection_sock {
|
|
+ struct inet_sock icsk_inet;
|
|
+ struct request_sock_queue icsk_accept_queue;
|
|
+ struct inet_bind_bucket *icsk_bind_hash;
|
|
+ long unsigned int icsk_timeout;
|
|
+ struct timer_list icsk_retransmit_timer;
|
|
+ struct timer_list icsk_delack_timer;
|
|
+ __u32 icsk_rto;
|
|
+ __u32 icsk_pmtu_cookie;
|
|
+ const struct tcp_congestion_ops *icsk_ca_ops;
|
|
+ const struct inet_connection_sock_af_ops *icsk_af_ops;
|
|
+ const struct tcp_ulp_ops *icsk_ulp_ops;
|
|
+ void *icsk_ulp_data;
|
|
+ void (*icsk_clean_acked)(struct sock *, u32);
|
|
+ struct hlist_node icsk_listen_portaddr_node;
|
|
+ unsigned int (*icsk_sync_mss)(struct sock *, u32);
|
|
+ __u8 icsk_ca_state: 6;
|
|
+ __u8 icsk_ca_setsockopt: 1;
|
|
+ __u8 icsk_ca_dst_locked: 1;
|
|
+ __u8 icsk_retransmits;
|
|
+ __u8 icsk_pending;
|
|
+ __u8 icsk_backoff;
|
|
+ __u8 icsk_syn_retries;
|
|
+ __u8 icsk_probes_out;
|
|
+ __u16 icsk_ext_hdr_len;
|
|
+ struct {
|
|
+ __u8 pending;
|
|
+ __u8 quick;
|
|
+ __u8 pingpong;
|
|
+ __u8 blocked;
|
|
+ __u32 ato;
|
|
+ long unsigned int timeout;
|
|
+ __u32 lrcvtime;
|
|
+ __u16 last_seg_size;
|
|
+ __u16 rcv_mss;
|
|
+ } icsk_ack;
|
|
+ struct {
|
|
+ int enabled;
|
|
+ int search_high;
|
|
+ int search_low;
|
|
+ int probe_size;
|
|
+ u32 probe_timestamp;
|
|
+ } icsk_mtup;
|
|
+ u32 icsk_user_timeout;
|
|
+ u64 icsk_ca_priv[13];
|
|
+};
|
|
+
|
|
+struct inet_bind_bucket {
|
|
+ possible_net_t ib_net;
|
|
+ int l3mdev;
|
|
+ short unsigned int port;
|
|
+ signed char fastreuse;
|
|
+ signed char fastreuseport;
|
|
+ kuid_t fastuid;
|
|
+ struct in6_addr fast_v6_rcv_saddr;
|
|
+ __be32 fast_rcv_saddr;
|
|
+ short unsigned int fast_sk_family;
|
|
+ bool fast_ipv6_only;
|
|
+ struct hlist_node node;
|
|
+ struct hlist_head owners;
|
|
+};
|
|
+
|
|
+struct tcp_ulp_ops {
|
|
+ struct list_head list;
|
|
+ int (*init)(struct sock *);
|
|
+ void (*release)(struct sock *);
|
|
+ int uid;
|
|
+ char name[16];
|
|
+ bool user_visible;
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct ipv6_rt_hdr {
|
|
+ __u8 nexthdr;
|
|
+ __u8 hdrlen;
|
|
+ __u8 type;
|
|
+ __u8 segments_left;
|
|
+};
|
|
+
|
|
+struct ipv6_opt_hdr {
|
|
+ __u8 nexthdr;
|
|
+ __u8 hdrlen;
|
|
+};
|
|
+
|
|
+struct inet6_skb_parm {
|
|
+ int iif;
|
|
+ __be16 ra;
|
|
+ __u16 dst0;
|
|
+ __u16 srcrt;
|
|
+ __u16 dst1;
|
|
+ __u16 lastopt;
|
|
+ __u16 nhoff;
|
|
+ __u16 flags;
|
|
+ __u16 dsthao;
|
|
+ __u16 frag_max_size;
|
|
+};
|
|
+
|
|
+struct ip6_sf_socklist;
|
|
+
|
|
+struct ipv6_mc_socklist {
|
|
+ struct in6_addr addr;
|
|
+ int ifindex;
|
|
+ struct ipv6_mc_socklist *next;
|
|
+ rwlock_t sflock;
|
|
+ unsigned int sfmode;
|
|
+ struct ip6_sf_socklist *sflist;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct ipv6_ac_socklist {
|
|
+ struct in6_addr acl_addr;
|
|
+ int acl_ifindex;
|
|
+ struct ipv6_ac_socklist *acl_next;
|
|
+};
|
|
+
|
|
+struct ip6_flowlabel;
|
|
+
|
|
+struct ipv6_fl_socklist {
|
|
+ struct ipv6_fl_socklist *next;
|
|
+ struct ip6_flowlabel *fl;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct ip6_sf_socklist {
|
|
+ unsigned int sl_max;
|
|
+ unsigned int sl_count;
|
|
+ struct in6_addr sl_addr[0];
|
|
+};
|
|
+
|
|
+struct ip6_flowlabel {
|
|
+ struct ip6_flowlabel *next;
|
|
+ __be32 label;
|
|
+ atomic_t users;
|
|
+ struct in6_addr dst;
|
|
+ struct ipv6_txoptions *opt;
|
|
+ long unsigned int linger;
|
|
+ struct callback_head rcu;
|
|
+ u8 share;
|
|
+ union {
|
|
+ struct pid *pid;
|
|
+ kuid_t uid;
|
|
+ } owner;
|
|
+ long unsigned int lastuse;
|
|
+ long unsigned int expires;
|
|
+ struct net *fl_net;
|
|
+};
|
|
+
|
|
+struct ip_rt_acct {
|
|
+ __u32 o_bytes;
|
|
+ __u32 o_packets;
|
|
+ __u32 i_bytes;
|
|
+ __u32 i_packets;
|
|
+};
|
|
+
|
|
+struct inet_ehash_bucket {
|
|
+ struct hlist_nulls_head chain;
|
|
+};
|
|
+
|
|
+struct inet_bind_hashbucket {
|
|
+ spinlock_t lock;
|
|
+ struct hlist_head chain;
|
|
+};
|
|
+
|
|
+struct inet_skb_parm {
|
|
+ int iif;
|
|
+ struct ip_options opt;
|
|
+ u16 flags;
|
|
+ u16 frag_max_size;
|
|
+};
|
|
+
|
|
+struct tcp_skb_cb {
|
|
+ __u32 seq;
|
|
+ __u32 end_seq;
|
|
+ union {
|
|
+ __u32 tcp_tw_isn;
|
|
+ struct {
|
|
+ u16 tcp_gso_segs;
|
|
+ u16 tcp_gso_size;
|
|
+ };
|
|
+ };
|
|
+ __u8 tcp_flags;
|
|
+ __u8 sacked;
|
|
+ __u8 ip_dsfield;
|
|
+ __u8 txstamp_ack: 1;
|
|
+ __u8 eor: 1;
|
|
+ __u8 has_rxtstamp: 1;
|
|
+ __u8 unused: 5;
|
|
+ __u32 ack_seq;
|
|
+ union {
|
|
+ struct {
|
|
+ __u32 in_flight: 30;
|
|
+ __u32 is_app_limited: 1;
|
|
+ __u32 unused: 1;
|
|
+ __u32 delivered;
|
|
+ u64 first_tx_mstamp;
|
|
+ u64 delivered_mstamp;
|
|
+ } tx;
|
|
+ union {
|
|
+ struct inet_skb_parm h4;
|
|
+ struct inet6_skb_parm h6;
|
|
+ } header;
|
|
+ struct {
|
|
+ __u32 flags;
|
|
+ struct sock *sk_redir;
|
|
+ void *data_end;
|
|
+ } bpf;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct ack_sample {
|
|
+ u32 pkts_acked;
|
|
+ s32 rtt_us;
|
|
+ u32 in_flight;
|
|
+};
|
|
+
|
|
+struct rate_sample {
|
|
+ u64 prior_mstamp;
|
|
+ u32 prior_delivered;
|
|
+ s32 delivered;
|
|
+ long int interval_us;
|
|
+ u32 snd_interval_us;
|
|
+ u32 rcv_interval_us;
|
|
+ long int rtt_us;
|
|
+ int losses;
|
|
+ u32 acked_sacked;
|
|
+ u32 prior_in_flight;
|
|
+ bool is_app_limited;
|
|
+ bool is_retrans;
|
|
+ bool is_ack_delayed;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCP_ULP_TLS = 0,
|
|
+ TCP_ULP_BPF = 1,
|
|
+};
|
|
+
|
|
+struct bpf_sock_progs {
|
|
+ struct bpf_prog *bpf_tx_msg;
|
|
+ struct bpf_prog *bpf_parse;
|
|
+ struct bpf_prog *bpf_verdict;
|
|
+};
|
|
+
|
|
+struct bpf_stab {
|
|
+ struct bpf_map map;
|
|
+ struct sock **sock_map;
|
|
+ struct bpf_sock_progs progs;
|
|
+ raw_spinlock_t lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bucket___2 {
|
|
+ struct hlist_head head;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct bpf_htab___2 {
|
|
+ struct bpf_map map;
|
|
+ struct bucket___2 *buckets;
|
|
+ atomic_t count;
|
|
+ u32 n_buckets;
|
|
+ u32 elem_size;
|
|
+ struct bpf_sock_progs progs;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct htab_elem___2 {
|
|
+ struct callback_head rcu;
|
|
+ struct hlist_node hash_node;
|
|
+ u32 hash;
|
|
+ struct sock *sk;
|
|
+ char key[0];
|
|
+};
|
|
+
|
|
+enum smap_psock_state {
|
|
+ SMAP_TX_RUNNING = 0,
|
|
+};
|
|
+
|
|
+struct smap_psock_map_entry {
|
|
+ struct list_head list;
|
|
+ struct bpf_map *map;
|
|
+ struct sock **entry;
|
|
+ struct htab_elem___2 *hash_link;
|
|
+};
|
|
+
|
|
+struct smap_psock {
|
|
+ struct callback_head rcu;
|
|
+ refcount_t refcnt;
|
|
+ struct sk_buff_head rxqueue;
|
|
+ bool strp_enabled;
|
|
+ int save_rem;
|
|
+ int save_off;
|
|
+ struct sk_buff *save_skb;
|
|
+ struct sock *sk_redir;
|
|
+ int apply_bytes;
|
|
+ int cork_bytes;
|
|
+ int sg_size;
|
|
+ int eval;
|
|
+ struct sk_msg_buff *cork;
|
|
+ struct list_head ingress;
|
|
+ struct strparser strp;
|
|
+ struct bpf_prog *bpf_tx_msg;
|
|
+ struct bpf_prog *bpf_parse;
|
|
+ struct bpf_prog *bpf_verdict;
|
|
+ struct list_head maps;
|
|
+ spinlock_t maps_lock;
|
|
+ struct sock *sock;
|
|
+ long unsigned int state;
|
|
+ struct work_struct tx_work;
|
|
+ struct work_struct gc_work;
|
|
+ struct proto *sk_proto;
|
|
+ void (*save_unhash)(struct sock *);
|
|
+ void (*save_close)(struct sock *, long int);
|
|
+ void (*save_data_ready)(struct sock *);
|
|
+ void (*save_write_space)(struct sock *);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SOCKMAP_IPV4 = 0,
|
|
+ SOCKMAP_IPV6 = 1,
|
|
+ SOCKMAP_NUM_PROTS = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SOCKMAP_BASE = 0,
|
|
+ SOCKMAP_TX = 1,
|
|
+ SOCKMAP_NUM_CONFIGS = 2,
|
|
+};
|
|
+
|
|
+enum __sk_action {
|
|
+ __SK_DROP = 0,
|
|
+ __SK_PASS = 1,
|
|
+ __SK_REDIRECT = 2,
|
|
+ __SK_NONE = 3,
|
|
+};
|
|
+
|
|
+enum bpf_stack_build_id_status {
|
|
+ BPF_STACK_BUILD_ID_EMPTY = 0,
|
|
+ BPF_STACK_BUILD_ID_VALID = 1,
|
|
+ BPF_STACK_BUILD_ID_IP = 2,
|
|
+};
|
|
+
|
|
+struct bpf_stack_build_id {
|
|
+ __s32 status;
|
|
+ unsigned char build_id[20];
|
|
+ union {
|
|
+ __u64 offset;
|
|
+ __u64 ip;
|
|
+ };
|
|
+};
|
|
+
|
|
+typedef __u32 Elf32_Addr;
|
|
+
|
|
+typedef __u16 Elf32_Half;
|
|
+
|
|
+typedef __u32 Elf32_Off;
|
|
+
|
|
+struct elf32_hdr {
|
|
+ unsigned char e_ident[16];
|
|
+ Elf32_Half e_type;
|
|
+ Elf32_Half e_machine;
|
|
+ Elf32_Word e_version;
|
|
+ Elf32_Addr e_entry;
|
|
+ Elf32_Off e_phoff;
|
|
+ Elf32_Off e_shoff;
|
|
+ Elf32_Word e_flags;
|
|
+ Elf32_Half e_ehsize;
|
|
+ Elf32_Half e_phentsize;
|
|
+ Elf32_Half e_phnum;
|
|
+ Elf32_Half e_shentsize;
|
|
+ Elf32_Half e_shnum;
|
|
+ Elf32_Half e_shstrndx;
|
|
+};
|
|
+
|
|
+typedef struct elf32_hdr Elf32_Ehdr;
|
|
+
|
|
+struct elf32_phdr {
|
|
+ Elf32_Word p_type;
|
|
+ Elf32_Off p_offset;
|
|
+ Elf32_Addr p_vaddr;
|
|
+ Elf32_Addr p_paddr;
|
|
+ Elf32_Word p_filesz;
|
|
+ Elf32_Word p_memsz;
|
|
+ Elf32_Word p_flags;
|
|
+ Elf32_Word p_align;
|
|
+};
|
|
+
|
|
+typedef struct elf32_phdr Elf32_Phdr;
|
|
+
|
|
+typedef struct elf32_note Elf32_Nhdr;
|
|
+
|
|
+struct stack_map_bucket {
|
|
+ struct pcpu_freelist_node fnode;
|
|
+ u32 hash;
|
|
+ u32 nr;
|
|
+ u64 data[0];
|
|
+};
|
|
+
|
|
+struct bpf_stack_map {
|
|
+ struct bpf_map map;
|
|
+ void *elems;
|
|
+ struct pcpu_freelist freelist;
|
|
+ u32 n_buckets;
|
|
+ struct stack_map_bucket *buckets[0];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct stack_map_irq_work {
|
|
+ struct irq_work irq_work;
|
|
+ struct rw_semaphore *sem;
|
|
+};
|
|
+
|
|
+struct __sk_buff {
|
|
+ __u32 len;
|
|
+ __u32 pkt_type;
|
|
+ __u32 mark;
|
|
+ __u32 queue_mapping;
|
|
+ __u32 protocol;
|
|
+ __u32 vlan_present;
|
|
+ __u32 vlan_tci;
|
|
+ __u32 vlan_proto;
|
|
+ __u32 priority;
|
|
+ __u32 ingress_ifindex;
|
|
+ __u32 ifindex;
|
|
+ __u32 tc_index;
|
|
+ __u32 cb[5];
|
|
+ __u32 hash;
|
|
+ __u32 tc_classid;
|
|
+ __u32 data;
|
|
+ __u32 data_end;
|
|
+ __u32 napi_id;
|
|
+ __u32 family;
|
|
+ __u32 remote_ip4;
|
|
+ __u32 local_ip4;
|
|
+ __u32 remote_ip6[4];
|
|
+ __u32 local_ip6[4];
|
|
+ __u32 remote_port;
|
|
+ __u32 local_port;
|
|
+ __u32 data_meta;
|
|
+};
|
|
+
|
|
+struct bpf_cgroup_dev_ctx {
|
|
+ __u32 access_type;
|
|
+ __u32 major;
|
|
+ __u32 minor;
|
|
+};
|
|
+
|
|
+struct bpf_prog_list {
|
|
+ struct list_head node;
|
|
+ struct bpf_prog *prog;
|
|
+ struct bpf_cgroup_storage *storage;
|
|
+};
|
|
+
|
|
+struct qdisc_skb_cb {
|
|
+ unsigned int pkt_len;
|
|
+ u16 slave_dev_queue_mapping;
|
|
+ u16 tc_classid;
|
|
+ unsigned char data[20];
|
|
+};
|
|
+
|
|
+struct bpf_sock_addr_kern {
|
|
+ struct sock *sk;
|
|
+ struct sockaddr *uaddr;
|
|
+ u64 tmp_reg;
|
|
+ void *t_ctx;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCPF_ESTABLISHED = 2,
|
|
+ TCPF_SYN_SENT = 4,
|
|
+ TCPF_SYN_RECV = 8,
|
|
+ TCPF_FIN_WAIT1 = 16,
|
|
+ TCPF_FIN_WAIT2 = 32,
|
|
+ TCPF_TIME_WAIT = 64,
|
|
+ TCPF_CLOSE = 128,
|
|
+ TCPF_CLOSE_WAIT = 256,
|
|
+ TCPF_LAST_ACK = 512,
|
|
+ TCPF_LISTEN = 1024,
|
|
+ TCPF_CLOSING = 2048,
|
|
+ TCPF_NEW_SYN_RECV = 4096,
|
|
+};
|
|
+
|
|
+struct reuseport_array {
|
|
+ struct bpf_map map;
|
|
+ struct sock *ptrs[0];
|
|
+};
|
|
+
|
|
+struct module___2;
|
|
+
|
|
+struct file___2;
|
|
+
|
|
+struct kiocb___2;
|
|
+
|
|
+struct iov_iter___2;
|
|
+
|
|
+struct poll_table_struct___2;
|
|
+
|
|
+struct vm_area_struct___2;
|
|
+
|
|
+struct file_lock___2;
|
|
+
|
|
+struct page___2;
|
|
+
|
|
+struct pipe_inode_info___2;
|
|
+
|
|
+struct file_operations___2 {
|
|
+ struct module___2 *owner;
|
|
+ loff_t (*llseek)(struct file___2 *, loff_t, int);
|
|
+ ssize_t (*read)(struct file___2 *, char *, size_t, loff_t *);
|
|
+ ssize_t (*write)(struct file___2 *, const char *, size_t, loff_t *);
|
|
+ ssize_t (*read_iter)(struct kiocb___2 *, struct iov_iter___2 *);
|
|
+ ssize_t (*write_iter)(struct kiocb___2 *, struct iov_iter___2 *);
|
|
+ int (*iterate)(struct file___2 *, struct dir_context *);
|
|
+ int (*iterate_shared)(struct file___2 *, struct dir_context *);
|
|
+ __poll_t (*poll)(struct file___2 *, struct poll_table_struct___2 *);
|
|
+ long int (*unlocked_ioctl)(struct file___2 *, unsigned int, long unsigned int);
|
|
+ long int (*compat_ioctl)(struct file___2 *, unsigned int, long unsigned int);
|
|
+ int (*mmap)(struct file___2 *, struct vm_area_struct___2 *);
|
|
+ long unsigned int mmap_supported_flags;
|
|
+ int (*open)(struct inode___2 *, struct file___2 *);
|
|
+ int (*flush)(struct file___2 *, fl_owner_t);
|
|
+ int (*release)(struct inode___2 *, struct file___2 *);
|
|
+ int (*fsync)(struct file___2 *, loff_t, loff_t, int);
|
|
+ int (*fasync)(int, struct file___2 *, int);
|
|
+ int (*lock)(struct file___2 *, int, struct file_lock___2 *);
|
|
+ ssize_t (*sendpage)(struct file___2 *, struct page___2 *, int, size_t, loff_t *, int);
|
|
+ long unsigned int (*get_unmapped_area)(struct file___2 *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
|
|
+ int (*check_flags)(int);
|
|
+ int (*flock)(struct file___2 *, int, struct file_lock___2 *);
|
|
+ ssize_t (*splice_write)(struct pipe_inode_info___2 *, struct file___2 *, loff_t *, size_t, unsigned int);
|
|
+ ssize_t (*splice_read)(struct file___2 *, loff_t *, struct pipe_inode_info___2 *, size_t, unsigned int);
|
|
+ int (*setlease)(struct file___2 *, long int, struct file_lock___2 **, void **);
|
|
+ long int (*fallocate)(struct file___2 *, int, loff_t, loff_t);
|
|
+ void (*show_fdinfo)(struct seq_file___2 *, struct file___2 *);
|
|
+ ssize_t (*copy_file_range)(struct file___2 *, loff_t, struct file___2 *, loff_t, size_t, unsigned int);
|
|
+ int (*clone_file_range)(struct file___2 *, loff_t, struct file___2 *, loff_t, u64);
|
|
+ int (*dedupe_file_range)(struct file___2 *, loff_t, struct file___2 *, loff_t, u64);
|
|
+ int (*fadvise)(struct file___2 *, loff_t, loff_t, int);
|
|
+ int (*iopoll)(struct kiocb___2 *, bool);
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+typedef struct page___2 *pgtable_t___2;
|
|
+
|
|
+struct address_space___2;
|
|
+
|
|
+struct mm_struct___2;
|
|
+
|
|
+struct dev_pagemap___2;
|
|
+
|
|
+struct page___2 {
|
|
+ long unsigned int flags;
|
|
+ union {
|
|
+ struct {
|
|
+ struct list_head lru;
|
|
+ struct address_space___2 *mapping;
|
|
+ long unsigned int index;
|
|
+ long unsigned int private;
|
|
+ };
|
|
+ struct {
|
|
+ union {
|
|
+ struct list_head slab_list;
|
|
+ struct {
|
|
+ struct page___2 *next;
|
|
+ int pages;
|
|
+ int pobjects;
|
|
+ };
|
|
+ };
|
|
+ struct kmem_cache *slab_cache;
|
|
+ void *freelist;
|
|
+ union {
|
|
+ void *s_mem;
|
|
+ long unsigned int counters;
|
|
+ struct {
|
|
+ unsigned int inuse: 16;
|
|
+ unsigned int objects: 15;
|
|
+ unsigned int frozen: 1;
|
|
+ };
|
|
+ };
|
|
+ };
|
|
+ struct {
|
|
+ long unsigned int compound_head;
|
|
+ unsigned char compound_dtor;
|
|
+ unsigned char compound_order;
|
|
+ atomic_t compound_mapcount;
|
|
+ };
|
|
+ struct {
|
|
+ long unsigned int _compound_pad_1;
|
|
+ long unsigned int _compound_pad_2;
|
|
+ struct list_head deferred_list;
|
|
+ };
|
|
+ struct {
|
|
+ long unsigned int _pt_pad_1;
|
|
+ pgtable_t___2 pmd_huge_pte;
|
|
+ long unsigned int _pt_pad_2;
|
|
+ union {
|
|
+ struct mm_struct___2 *pt_mm;
|
|
+ atomic_t pt_frag_refcount;
|
|
+ };
|
|
+ spinlock_t ptl;
|
|
+ };
|
|
+ struct {
|
|
+ struct dev_pagemap___2 *pgmap;
|
|
+ long unsigned int hmm_data;
|
|
+ long unsigned int _zd_pad_1;
|
|
+ };
|
|
+ struct callback_head callback_head;
|
|
+ };
|
|
+ union {
|
|
+ atomic_t _mapcount;
|
|
+ unsigned int page_type;
|
|
+ unsigned int active;
|
|
+ int units;
|
|
+ };
|
|
+ atomic_t _refcount;
|
|
+ struct mem_cgroup *mem_cgroup;
|
|
+};
|
|
+
|
|
+struct thread_struct___2;
|
|
+
|
|
+struct task_struct___2;
|
|
+
|
|
+struct pv_cpu_ops___2 {
|
|
+ long unsigned int (*get_debugreg)(int);
|
|
+ void (*set_debugreg)(int, long unsigned int);
|
|
+ long unsigned int (*read_cr0)();
|
|
+ void (*write_cr0)(long unsigned int);
|
|
+ void (*write_cr4)(long unsigned int);
|
|
+ long unsigned int (*read_cr8)();
|
|
+ void (*write_cr8)(long unsigned int);
|
|
+ void (*load_tr_desc)();
|
|
+ void (*load_gdt)(const struct desc_ptr *);
|
|
+ void (*load_idt)(const struct desc_ptr *);
|
|
+ void (*set_ldt)(const void *, unsigned int);
|
|
+ long unsigned int (*store_tr)();
|
|
+ void (*load_tls)(struct thread_struct___2 *, unsigned int);
|
|
+ void (*load_gs_index)(unsigned int);
|
|
+ void (*write_ldt_entry)(struct desc_struct *, int, const void *);
|
|
+ void (*write_gdt_entry)(struct desc_struct *, int, const void *, int);
|
|
+ void (*write_idt_entry)(gate_desc *, int, const gate_desc *);
|
|
+ void (*alloc_ldt)(struct desc_struct *, unsigned int);
|
|
+ void (*free_ldt)(struct desc_struct *, unsigned int);
|
|
+ void (*load_sp0)(long unsigned int);
|
|
+ void (*set_iopl_mask)(unsigned int);
|
|
+ void (*wbinvd)();
|
|
+ void (*io_delay)();
|
|
+ void (*cpuid)(unsigned int *, unsigned int *, unsigned int *, unsigned int *);
|
|
+ u64 (*read_msr)(unsigned int);
|
|
+ void (*write_msr)(unsigned int, unsigned int, unsigned int);
|
|
+ u64 (*read_msr_safe)(unsigned int, int *);
|
|
+ int (*write_msr_safe)(unsigned int, unsigned int, unsigned int);
|
|
+ u64 (*read_pmc)(int);
|
|
+ void (*usergs_sysret64)();
|
|
+ void (*iret)();
|
|
+ void (*swapgs)();
|
|
+ void (*start_context_switch)(struct task_struct___2 *);
|
|
+ void (*end_context_switch)(struct task_struct___2 *);
|
|
+};
|
|
+
|
|
+struct perf_event___2;
|
|
+
|
|
+struct thread_struct___2 {
|
|
+ struct desc_struct tls_array[3];
|
|
+ long unsigned int sp;
|
|
+ short unsigned int es;
|
|
+ short unsigned int ds;
|
|
+ short unsigned int fsindex;
|
|
+ short unsigned int gsindex;
|
|
+ long unsigned int fsbase;
|
|
+ long unsigned int gsbase;
|
|
+ struct perf_event___2 *ptrace_bps[4];
|
|
+ long unsigned int debugreg6;
|
|
+ long unsigned int ptrace_dr7;
|
|
+ long unsigned int cr2;
|
|
+ long unsigned int trap_nr;
|
|
+ long unsigned int error_code;
|
|
+ long unsigned int *io_bitmap_ptr;
|
|
+ long unsigned int iopl;
|
|
+ unsigned int io_bitmap_max;
|
|
+ mm_segment_t addr_limit;
|
|
+ unsigned int sig_on_uaccess_err: 1;
|
|
+ unsigned int uaccess_err: 1;
|
|
+ long: 62;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct fpu fpu;
|
|
+};
|
|
+
|
|
+struct vmacache___2 {
|
|
+ u64 seqnum;
|
|
+ struct vm_area_struct___2 *vmas[4];
|
|
+};
|
|
+
|
|
+struct page_frag___2 {
|
|
+ struct page___2 *page;
|
|
+ __u32 offset;
|
|
+ __u32 size;
|
|
+};
|
|
+
|
|
+struct pid___2;
|
|
+
|
|
+struct cred___2;
|
|
+
|
|
+struct nsproxy___2;
|
|
+
|
|
+struct signal_struct___2;
|
|
+
|
|
+struct css_set___2;
|
|
+
|
|
+struct perf_event_context___2;
|
|
+
|
|
+struct vm_struct___2;
|
|
+
|
|
+struct task_struct___2 {
|
|
+ struct thread_info thread_info;
|
|
+ volatile long int state;
|
|
+ void *stack;
|
|
+ atomic_t usage;
|
|
+ unsigned int flags;
|
|
+ unsigned int ptrace;
|
|
+ struct llist_node wake_entry;
|
|
+ int on_cpu;
|
|
+ unsigned int cpu;
|
|
+ unsigned int wakee_flips;
|
|
+ long unsigned int wakee_flip_decay_ts;
|
|
+ struct task_struct___2 *last_wakee;
|
|
+ int recent_used_cpu;
|
|
+ int wake_cpu;
|
|
+ int on_rq;
|
|
+ int prio;
|
|
+ int static_prio;
|
|
+ int normal_prio;
|
|
+ unsigned int rt_priority;
|
|
+ const struct sched_class *sched_class;
|
|
+ struct sched_entity se;
|
|
+ struct sched_rt_entity rt;
|
|
+ struct task_group *sched_task_group;
|
|
+ struct sched_dl_entity dl;
|
|
+ struct hlist_head preempt_notifiers;
|
|
+ unsigned int btrace_seq;
|
|
+ unsigned int policy;
|
|
+ int nr_cpus_allowed;
|
|
+ cpumask_t cpus_allowed;
|
|
+ struct sched_info sched_info;
|
|
+ struct list_head tasks;
|
|
+ struct plist_node pushable_tasks;
|
|
+ struct rb_node pushable_dl_tasks;
|
|
+ struct mm_struct___2 *mm;
|
|
+ struct mm_struct___2 *active_mm;
|
|
+ struct vmacache___2 vmacache;
|
|
+ struct task_rss_stat rss_stat;
|
|
+ int exit_state;
|
|
+ int exit_code;
|
|
+ int exit_signal;
|
|
+ int pdeath_signal;
|
|
+ long unsigned int jobctl;
|
|
+ unsigned int personality;
|
|
+ unsigned int sched_reset_on_fork: 1;
|
|
+ unsigned int sched_contributes_to_load: 1;
|
|
+ unsigned int sched_migrated: 1;
|
|
+ unsigned int sched_remote_wakeup: 1;
|
|
+ int: 28;
|
|
+ unsigned int in_execve: 1;
|
|
+ unsigned int in_iowait: 1;
|
|
+ unsigned int restore_sigmask: 1;
|
|
+ unsigned int in_user_fault: 1;
|
|
+ unsigned int memcg_kmem_skip_account: 1;
|
|
+ unsigned int no_cgroup_migration: 1;
|
|
+ unsigned int use_memdelay: 1;
|
|
+ long unsigned int atomic_flags;
|
|
+ struct restart_block restart_block;
|
|
+ pid_t pid;
|
|
+ pid_t tgid;
|
|
+ long unsigned int stack_canary;
|
|
+ struct task_struct___2 *real_parent;
|
|
+ struct task_struct___2 *parent;
|
|
+ struct list_head children;
|
|
+ struct list_head sibling;
|
|
+ struct task_struct___2 *group_leader;
|
|
+ struct list_head ptraced;
|
|
+ struct list_head ptrace_entry;
|
|
+ struct pid___2 *thread_pid;
|
|
+ struct hlist_node pid_links[4];
|
|
+ struct list_head thread_group;
|
|
+ struct list_head thread_node;
|
|
+ struct completion *vfork_done;
|
|
+ int *set_child_tid;
|
|
+ int *clear_child_tid;
|
|
+ u64 utime;
|
|
+ u64 stime;
|
|
+ u64 gtime;
|
|
+ struct prev_cputime prev_cputime;
|
|
+ struct vtime vtime;
|
|
+ atomic_t tick_dep_mask;
|
|
+ long unsigned int nvcsw;
|
|
+ long unsigned int nivcsw;
|
|
+ u64 start_time;
|
|
+ u64 real_start_time;
|
|
+ long unsigned int min_flt;
|
|
+ long unsigned int maj_flt;
|
|
+ struct task_cputime cputime_expires;
|
|
+ struct list_head cpu_timers[3];
|
|
+ const struct cred___2 *ptracer_cred;
|
|
+ const struct cred___2 *real_cred;
|
|
+ const struct cred___2 *cred;
|
|
+ char comm[16];
|
|
+ struct nameidata *nameidata;
|
|
+ struct sysv_sem sysvsem;
|
|
+ struct sysv_shm sysvshm;
|
|
+ long unsigned int last_switch_count;
|
|
+ long unsigned int last_switch_time;
|
|
+ struct fs_struct *fs;
|
|
+ struct files_struct *files;
|
|
+ struct nsproxy___2 *nsproxy;
|
|
+ struct signal_struct___2 *signal;
|
|
+ struct sighand_struct *sighand;
|
|
+ sigset_t blocked;
|
|
+ sigset_t real_blocked;
|
|
+ sigset_t saved_sigmask;
|
|
+ struct sigpending pending;
|
|
+ long unsigned int sas_ss_sp;
|
|
+ size_t sas_ss_size;
|
|
+ unsigned int sas_ss_flags;
|
|
+ struct callback_head *task_works;
|
|
+ struct audit_context *audit_context;
|
|
+ kuid_t loginuid;
|
|
+ unsigned int sessionid;
|
|
+ struct seccomp seccomp;
|
|
+ u32 parent_exec_id;
|
|
+ u32 self_exec_id;
|
|
+ spinlock_t alloc_lock;
|
|
+ raw_spinlock_t pi_lock;
|
|
+ struct wake_q_node wake_q;
|
|
+ struct rb_root_cached pi_waiters;
|
|
+ struct task_struct___2 *pi_top_task;
|
|
+ struct rt_mutex_waiter *pi_blocked_on;
|
|
+ void *journal_info;
|
|
+ struct bio_list *bio_list;
|
|
+ struct blk_plug *plug;
|
|
+ struct reclaim_state *reclaim_state;
|
|
+ struct backing_dev_info *backing_dev_info;
|
|
+ struct io_context *io_context;
|
|
+ long unsigned int ptrace_message;
|
|
+ siginfo_t *last_siginfo;
|
|
+ struct task_io_accounting ioac;
|
|
+ u64 acct_rss_mem1;
|
|
+ u64 acct_vm_mem1;
|
|
+ u64 acct_timexpd;
|
|
+ nodemask_t mems_allowed;
|
|
+ seqcount_t mems_allowed_seq;
|
|
+ int cpuset_mem_spread_rotor;
|
|
+ int cpuset_slab_spread_rotor;
|
|
+ struct css_set___2 *cgroups;
|
|
+ struct list_head cg_list;
|
|
+ struct robust_list_head *robust_list;
|
|
+ struct compat_robust_list_head *compat_robust_list;
|
|
+ struct list_head pi_state_list;
|
|
+ struct futex_pi_state *pi_state_cache;
|
|
+ struct perf_event_context___2 *perf_event_ctxp[2];
|
|
+ struct mutex perf_event_mutex;
|
|
+ struct list_head perf_event_list;
|
|
+ struct mempolicy *mempolicy;
|
|
+ short int il_prev;
|
|
+ short int pref_node_fork;
|
|
+ int numa_scan_seq;
|
|
+ unsigned int numa_scan_period;
|
|
+ unsigned int numa_scan_period_max;
|
|
+ int numa_preferred_nid;
|
|
+ long unsigned int numa_migrate_retry;
|
|
+ u64 node_stamp;
|
|
+ u64 last_task_numa_placement;
|
|
+ u64 last_sum_exec_runtime;
|
|
+ struct callback_head numa_work;
|
|
+ struct numa_group *numa_group;
|
|
+ long unsigned int *numa_faults;
|
|
+ long unsigned int total_numa_faults;
|
|
+ long unsigned int numa_faults_locality[3];
|
|
+ long unsigned int numa_pages_migrated;
|
|
+ struct rseq *rseq;
|
|
+ u32 rseq_len;
|
|
+ u32 rseq_sig;
|
|
+ long unsigned int rseq_event_mask;
|
|
+ struct tlbflush_unmap_batch tlb_ubc;
|
|
+ union {
|
|
+ refcount_t rcu_users;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ struct pipe_inode_info___2 *splice_pipe;
|
|
+ struct page_frag___2 task_frag;
|
|
+ struct task_delay_info *delays;
|
|
+ int nr_dirtied;
|
|
+ int nr_dirtied_pause;
|
|
+ long unsigned int dirty_paused_when;
|
|
+ u64 timer_slack_ns;
|
|
+ u64 default_timer_slack_ns;
|
|
+ int curr_ret_stack;
|
|
+ int curr_ret_depth;
|
|
+ struct ftrace_ret_stack *ret_stack;
|
|
+ long long unsigned int ftrace_timestamp;
|
|
+ atomic_t trace_overrun;
|
|
+ atomic_t tracing_graph_pause;
|
|
+ long unsigned int trace;
|
|
+ long unsigned int trace_recursion;
|
|
+ struct mem_cgroup *memcg_in_oom;
|
|
+ gfp_t memcg_oom_gfp_mask;
|
|
+ int memcg_oom_order;
|
|
+ unsigned int memcg_nr_pages_over_high;
|
|
+ struct mem_cgroup *active_memcg;
|
|
+ struct request_queue *throttle_queue;
|
|
+ struct uprobe_task *utask;
|
|
+ int pagefault_disabled;
|
|
+ struct task_struct___2 *oom_reaper_list;
|
|
+ struct vm_struct___2 *stack_vm_area;
|
|
+ atomic_t stack_refcount;
|
|
+ int patch_state;
|
|
+ void *security;
|
|
+ u64 parent_exec_id_u64;
|
|
+ u64 self_exec_id_u64;
|
|
+ struct mutex *futex_exit_mutex;
|
|
+ long unsigned int futex_state;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct thread_struct___2 thread;
|
|
+};
|
|
+
|
|
+struct pv_mmu_ops___2 {
|
|
+ long unsigned int (*read_cr2)();
|
|
+ void (*write_cr2)(long unsigned int);
|
|
+ long unsigned int (*read_cr3)();
|
|
+ void (*write_cr3)(long unsigned int);
|
|
+ void (*activate_mm)(struct mm_struct___2 *, struct mm_struct___2 *);
|
|
+ void (*dup_mmap)(struct mm_struct___2 *, struct mm_struct___2 *);
|
|
+ void (*exit_mmap)(struct mm_struct___2 *);
|
|
+ void (*flush_tlb_user)();
|
|
+ void (*flush_tlb_kernel)();
|
|
+ void (*flush_tlb_one_user)(long unsigned int);
|
|
+ void (*flush_tlb_others)(const struct cpumask *, const struct flush_tlb_info *);
|
|
+ void (*tlb_remove_table)(struct mmu_gather *, void *);
|
|
+ int (*pgd_alloc)(struct mm_struct___2 *);
|
|
+ void (*pgd_free)(struct mm_struct___2 *, pgd_t *);
|
|
+ void (*alloc_pte)(struct mm_struct___2 *, long unsigned int);
|
|
+ void (*alloc_pmd)(struct mm_struct___2 *, long unsigned int);
|
|
+ void (*alloc_pud)(struct mm_struct___2 *, long unsigned int);
|
|
+ void (*alloc_p4d)(struct mm_struct___2 *, long unsigned int);
|
|
+ void (*release_pte)(long unsigned int);
|
|
+ void (*release_pmd)(long unsigned int);
|
|
+ void (*release_pud)(long unsigned int);
|
|
+ void (*release_p4d)(long unsigned int);
|
|
+ void (*set_pte)(pte_t *, pte_t);
|
|
+ void (*set_pte_at)(struct mm_struct___2 *, long unsigned int, pte_t *, pte_t);
|
|
+ void (*set_pmd)(pmd_t *, pmd_t);
|
|
+ pte_t (*ptep_modify_prot_start)(struct mm_struct___2 *, long unsigned int, pte_t *);
|
|
+ void (*ptep_modify_prot_commit)(struct mm_struct___2 *, long unsigned int, pte_t *, pte_t);
|
|
+ struct paravirt_callee_save pte_val;
|
|
+ struct paravirt_callee_save make_pte;
|
|
+ struct paravirt_callee_save pgd_val;
|
|
+ struct paravirt_callee_save make_pgd;
|
|
+ void (*set_pud)(pud_t *, pud_t);
|
|
+ struct paravirt_callee_save pmd_val;
|
|
+ struct paravirt_callee_save make_pmd;
|
|
+ struct paravirt_callee_save pud_val;
|
|
+ struct paravirt_callee_save make_pud;
|
|
+ void (*set_p4d)(p4d_t *, p4d_t);
|
|
+ struct paravirt_callee_save p4d_val;
|
|
+ struct paravirt_callee_save make_p4d;
|
|
+ void (*set_pgd)(pgd_t *, pgd_t);
|
|
+ struct pv_lazy_ops lazy_mode;
|
|
+ void (*set_fixmap)(unsigned int, phys_addr_t, pgprot_t);
|
|
+};
|
|
+
|
|
+struct rw_semaphore___2 {
|
|
+ atomic_long_t count;
|
|
+ struct list_head wait_list;
|
|
+ raw_spinlock_t wait_lock;
|
|
+ struct optimistic_spin_queue osq;
|
|
+ struct task_struct___2 *owner;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ u64 ctx_id;
|
|
+ atomic64_t tlb_gen;
|
|
+ struct rw_semaphore___2 ldt_usr_sem;
|
|
+ struct ldt_struct *ldt;
|
|
+ short unsigned int ia32_compat;
|
|
+ struct mutex lock;
|
|
+ void *vdso;
|
|
+ const struct vdso_image *vdso_image;
|
|
+ atomic_t perf_rdpmc_allowed;
|
|
+ u16 pkey_allocation_map;
|
|
+ s16 execute_only_pkey;
|
|
+} mm_context_t___2;
|
|
+
|
|
+struct core_state___2;
|
|
+
|
|
+struct user_namespace___2;
|
|
+
|
|
+struct mm_struct___2 {
|
|
+ struct {
|
|
+ struct vm_area_struct___2 *mmap;
|
|
+ struct rb_root mm_rb;
|
|
+ u64 vmacache_seqnum;
|
|
+ long unsigned int (*get_unmapped_area)(struct file___2 *, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
|
|
+ long unsigned int mmap_base;
|
|
+ long unsigned int mmap_legacy_base;
|
|
+ long unsigned int mmap_compat_base;
|
|
+ long unsigned int mmap_compat_legacy_base;
|
|
+ long unsigned int task_size;
|
|
+ long unsigned int highest_vm_end;
|
|
+ pgd_t *pgd;
|
|
+ atomic_t membarrier_state;
|
|
+ atomic_t mm_users;
|
|
+ atomic_t mm_count;
|
|
+ atomic_long_t pgtables_bytes;
|
|
+ int map_count;
|
|
+ spinlock_t page_table_lock;
|
|
+ struct rw_semaphore___2 mmap_sem;
|
|
+ struct list_head mmlist;
|
|
+ long unsigned int hiwater_rss;
|
|
+ long unsigned int hiwater_vm;
|
|
+ long unsigned int total_vm;
|
|
+ atomic_long_t locked_vm;
|
|
+ long unsigned int pinned_vm;
|
|
+ long unsigned int data_vm;
|
|
+ long unsigned int exec_vm;
|
|
+ long unsigned int stack_vm;
|
|
+ long unsigned int def_flags;
|
|
+ spinlock_t arg_lock;
|
|
+ long unsigned int start_code;
|
|
+ long unsigned int end_code;
|
|
+ long unsigned int start_data;
|
|
+ long unsigned int end_data;
|
|
+ long unsigned int start_brk;
|
|
+ long unsigned int brk;
|
|
+ long unsigned int start_stack;
|
|
+ long unsigned int arg_start;
|
|
+ long unsigned int arg_end;
|
|
+ long unsigned int env_start;
|
|
+ long unsigned int env_end;
|
|
+ long unsigned int saved_auxv[46];
|
|
+ struct mm_rss_stat rss_stat;
|
|
+ struct linux_binfmt *binfmt;
|
|
+ mm_context_t___2 context;
|
|
+ long unsigned int flags;
|
|
+ struct core_state___2 *core_state;
|
|
+ spinlock_t ioctx_lock;
|
|
+ struct kioctx_table *ioctx_table;
|
|
+ struct task_struct___2 *owner;
|
|
+ struct user_namespace___2 *user_ns;
|
|
+ struct file___2 *exe_file;
|
|
+ struct mmu_notifier_mm *mmu_notifier_mm;
|
|
+ long unsigned int numa_next_scan;
|
|
+ long unsigned int numa_scan_offset;
|
|
+ int numa_scan_seq;
|
|
+ atomic_t tlb_flush_pending;
|
|
+ bool tlb_flush_batched;
|
|
+ struct uprobes_state uprobes_state;
|
|
+ atomic_long_t hugetlb_usage;
|
|
+ struct work_struct async_put_work;
|
|
+ struct hmm *hmm;
|
|
+ };
|
|
+ struct kvm *kvm;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int cpu_bitmap[0];
|
|
+};
|
|
+
|
|
+struct hw_perf_event___2 {
|
|
+ union {
|
|
+ struct {
|
|
+ u64 config;
|
|
+ u64 last_tag;
|
|
+ long unsigned int config_base;
|
|
+ long unsigned int event_base;
|
|
+ int event_base_rdpmc;
|
|
+ int idx;
|
|
+ int last_cpu;
|
|
+ int flags;
|
|
+ struct hw_perf_event_extra extra_reg;
|
|
+ struct hw_perf_event_extra branch_reg;
|
|
+ };
|
|
+ struct {
|
|
+ struct hrtimer hrtimer;
|
|
+ };
|
|
+ struct {
|
|
+ struct list_head tp_list;
|
|
+ };
|
|
+ struct {
|
|
+ u64 pwr_acc;
|
|
+ u64 ptsc;
|
|
+ };
|
|
+ struct {
|
|
+ struct arch_hw_breakpoint info;
|
|
+ struct list_head bp_list;
|
|
+ };
|
|
+ struct {
|
|
+ u8 iommu_bank;
|
|
+ u8 iommu_cntr;
|
|
+ u16 padding;
|
|
+ u64 conf;
|
|
+ u64 conf1;
|
|
+ };
|
|
+ };
|
|
+ struct task_struct___2 *target;
|
|
+ void *addr_filters;
|
|
+ long unsigned int addr_filters_gen;
|
|
+ int state;
|
|
+ local64_t prev_count;
|
|
+ u64 sample_period;
|
|
+ union {
|
|
+ struct {
|
|
+ u64 last_period;
|
|
+ local64_t period_left;
|
|
+ };
|
|
+ struct {
|
|
+ u64 saved_metric;
|
|
+ u64 saved_slots;
|
|
+ };
|
|
+ };
|
|
+ u64 interrupts_seq;
|
|
+ u64 interrupts;
|
|
+ u64 freq_time_stamp;
|
|
+ u64 freq_count_stamp;
|
|
+};
|
|
+
|
|
+typedef void (*perf_overflow_handler_t___2)(struct perf_event___2 *, struct perf_sample_data *, struct pt_regs *);
|
|
+
|
|
+struct pmu___2;
|
|
+
|
|
+struct ring_buffer___2;
|
|
+
|
|
+struct fasync_struct___2;
|
|
+
|
|
+struct pid_namespace___2;
|
|
+
|
|
+struct bpf_prog___2;
|
|
+
|
|
+struct trace_event_call___2;
|
|
+
|
|
+struct perf_cgroup___2;
|
|
+
|
|
+struct perf_event___2 {
|
|
+ struct list_head event_entry;
|
|
+ struct list_head sibling_list;
|
|
+ struct list_head active_list;
|
|
+ struct rb_node group_node;
|
|
+ u64 group_index;
|
|
+ struct list_head migrate_entry;
|
|
+ struct hlist_node hlist_entry;
|
|
+ struct list_head active_entry;
|
|
+ int nr_siblings;
|
|
+ int event_caps;
|
|
+ int group_caps;
|
|
+ struct perf_event___2 *group_leader;
|
|
+ struct pmu___2 *pmu;
|
|
+ void *pmu_private;
|
|
+ enum perf_event_state state;
|
|
+ unsigned int attach_state;
|
|
+ local64_t count;
|
|
+ atomic64_t child_count;
|
|
+ u64 total_time_enabled;
|
|
+ u64 total_time_running;
|
|
+ u64 tstamp;
|
|
+ u64 shadow_ctx_time;
|
|
+ struct perf_event_attr attr;
|
|
+ u16 header_size;
|
|
+ u16 id_header_size;
|
|
+ u16 read_size;
|
|
+ struct hw_perf_event___2 hw;
|
|
+ struct perf_event_context___2 *ctx;
|
|
+ atomic_long_t refcount;
|
|
+ atomic64_t child_total_time_enabled;
|
|
+ atomic64_t child_total_time_running;
|
|
+ struct mutex child_mutex;
|
|
+ struct list_head child_list;
|
|
+ struct perf_event___2 *parent;
|
|
+ int oncpu;
|
|
+ int cpu;
|
|
+ struct list_head owner_entry;
|
|
+ struct task_struct___2 *owner;
|
|
+ struct mutex mmap_mutex;
|
|
+ atomic_t mmap_count;
|
|
+ struct ring_buffer___2 *rb;
|
|
+ struct list_head rb_entry;
|
|
+ long unsigned int rcu_batches;
|
|
+ int rcu_pending;
|
|
+ wait_queue_head_t waitq;
|
|
+ struct fasync_struct___2 *fasync;
|
|
+ int pending_wakeup;
|
|
+ int pending_kill;
|
|
+ int pending_disable;
|
|
+ struct irq_work pending;
|
|
+ atomic_t event_limit;
|
|
+ struct perf_addr_filters_head addr_filters;
|
|
+ struct perf_addr_filter_range *addr_filter_ranges;
|
|
+ long unsigned int addr_filters_gen;
|
|
+ void (*destroy)(struct perf_event___2 *);
|
|
+ struct callback_head callback_head;
|
|
+ struct pid_namespace___2 *ns;
|
|
+ u64 id;
|
|
+ u64 (*clock)();
|
|
+ perf_overflow_handler_t___2 overflow_handler;
|
|
+ void *overflow_handler_context;
|
|
+ perf_overflow_handler_t___2 orig_overflow_handler;
|
|
+ struct bpf_prog___2 *prog;
|
|
+ struct trace_event_call___2 *tp_event;
|
|
+ struct event_filter *filter;
|
|
+ struct ftrace_ops ftrace_ops;
|
|
+ struct perf_cgroup___2 *cgrp;
|
|
+ struct list_head sb_list;
|
|
+};
|
|
+
|
|
+struct dentry_operations___2;
|
|
+
|
|
+struct dentry___2 {
|
|
+ unsigned int d_flags;
|
|
+ seqcount_t d_seq;
|
|
+ struct hlist_bl_node d_hash;
|
|
+ struct dentry___2 *d_parent;
|
|
+ struct qstr d_name;
|
|
+ struct inode___2 *d_inode;
|
|
+ unsigned char d_iname[32];
|
|
+ struct lockref d_lockref;
|
|
+ const struct dentry_operations___2 *d_op;
|
|
+ struct super_block___2 *d_sb;
|
|
+ long unsigned int d_time;
|
|
+ void *d_fsdata;
|
|
+ union {
|
|
+ struct list_head d_lru;
|
|
+ wait_queue_head_t *d_wait;
|
|
+ };
|
|
+ struct list_head d_child;
|
|
+ struct list_head d_subdirs;
|
|
+ union {
|
|
+ struct hlist_node d_alias;
|
|
+ struct hlist_bl_node d_in_lookup_hash;
|
|
+ struct callback_head d_rcu;
|
|
+ } d_u;
|
|
+ atomic_t d_neg_dnum;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct address_space_operations___2;
|
|
+
|
|
+struct address_space___2 {
|
|
+ struct inode___2 *host;
|
|
+ struct radix_tree_root i_pages;
|
|
+ atomic_t i_mmap_writable;
|
|
+ struct rb_root_cached i_mmap;
|
|
+ struct rw_semaphore___2 i_mmap_rwsem;
|
|
+ long unsigned int nrpages;
|
|
+ long unsigned int nrexceptional;
|
|
+ long unsigned int writeback_index;
|
|
+ const struct address_space_operations___2 *a_ops;
|
|
+ long unsigned int flags;
|
|
+ spinlock_t private_lock;
|
|
+ gfp_t gfp_mask;
|
|
+ struct list_head private_list;
|
|
+ void *private_data;
|
|
+ errseq_t wb_err;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct inode_operations___2;
|
|
+
|
|
+struct block_device___2;
|
|
+
|
|
+struct inode___2 {
|
|
+ umode_t i_mode;
|
|
+ short unsigned int i_opflags;
|
|
+ kuid_t i_uid;
|
|
+ kgid_t i_gid;
|
|
+ unsigned int i_flags;
|
|
+ struct posix_acl *i_acl;
|
|
+ struct posix_acl *i_default_acl;
|
|
+ const struct inode_operations___2 *i_op;
|
|
+ struct super_block___2 *i_sb;
|
|
+ struct address_space___2 *i_mapping;
|
|
+ void *i_security;
|
|
+ long unsigned int i_ino;
|
|
+ union {
|
|
+ const unsigned int i_nlink;
|
|
+ unsigned int __i_nlink;
|
|
+ };
|
|
+ dev_t i_rdev;
|
|
+ loff_t i_size;
|
|
+ struct timespec64 i_atime;
|
|
+ struct timespec64 i_mtime;
|
|
+ struct timespec64 i_ctime;
|
|
+ spinlock_t i_lock;
|
|
+ short unsigned int i_bytes;
|
|
+ u8 i_blkbits;
|
|
+ u8 i_write_hint;
|
|
+ blkcnt_t i_blocks;
|
|
+ long unsigned int i_state;
|
|
+ struct rw_semaphore___2 i_rwsem;
|
|
+ long unsigned int dirtied_when;
|
|
+ long unsigned int dirtied_time_when;
|
|
+ struct hlist_node i_hash;
|
|
+ struct list_head i_io_list;
|
|
+ struct bdi_writeback *i_wb;
|
|
+ int i_wb_frn_winner;
|
|
+ u16 i_wb_frn_avg_time;
|
|
+ u16 i_wb_frn_history;
|
|
+ struct list_head i_lru;
|
|
+ struct list_head i_sb_list;
|
|
+ struct list_head i_wb_list;
|
|
+ union {
|
|
+ struct hlist_head i_dentry;
|
|
+ struct callback_head i_rcu;
|
|
+ };
|
|
+ atomic64_t i_version;
|
|
+ atomic_t i_count;
|
|
+ atomic_t i_dio_count;
|
|
+ atomic_t i_writecount;
|
|
+ atomic_t i_readcount;
|
|
+ const struct file_operations___2 *i_fop;
|
|
+ struct file_lock_context *i_flctx;
|
|
+ struct address_space___2 i_data;
|
|
+ struct list_head i_devices;
|
|
+ union {
|
|
+ struct pipe_inode_info___2 *i_pipe;
|
|
+ struct block_device___2 *i_bdev;
|
|
+ struct cdev *i_cdev;
|
|
+ char *i_link;
|
|
+ unsigned int i_dir_seq;
|
|
+ };
|
|
+ __u32 i_generation;
|
|
+ __u32 i_fsnotify_mask;
|
|
+ struct fsnotify_mark_connector *i_fsnotify_marks;
|
|
+ void *i_private;
|
|
+ atomic64_t i_sequence;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct vfsmount___2;
|
|
+
|
|
+struct path___2;
|
|
+
|
|
+struct dentry_operations___2 {
|
|
+ int (*d_revalidate)(struct dentry___2 *, unsigned int);
|
|
+ int (*d_weak_revalidate)(struct dentry___2 *, unsigned int);
|
|
+ int (*d_hash)(const struct dentry___2 *, struct qstr *);
|
|
+ int (*d_compare)(const struct dentry___2 *, unsigned int, const char *, const struct qstr *);
|
|
+ int (*d_delete)(const struct dentry___2 *);
|
|
+ int (*d_init)(struct dentry___2 *);
|
|
+ void (*d_release)(struct dentry___2 *);
|
|
+ void (*d_prune)(struct dentry___2 *);
|
|
+ void (*d_iput)(struct dentry___2 *, struct inode___2 *);
|
|
+ char * (*d_dname)(struct dentry___2 *, char *, int);
|
|
+ struct vfsmount___2 * (*d_automount)(struct path___2 *);
|
|
+ int (*d_manage)(const struct path___2 *, bool);
|
|
+ struct dentry___2 * (*d_real)(struct dentry___2 *, const struct inode___2 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct quota_format_type___2;
|
|
+
|
|
+struct mem_dqinfo___2 {
|
|
+ struct quota_format_type___2 *dqi_format;
|
|
+ int dqi_fmt_id;
|
|
+ struct list_head dqi_dirty_list;
|
|
+ long unsigned int dqi_flags;
|
|
+ unsigned int dqi_bgrace;
|
|
+ unsigned int dqi_igrace;
|
|
+ qsize_t dqi_max_spc_limit;
|
|
+ qsize_t dqi_max_ino_limit;
|
|
+ void *dqi_priv;
|
|
+};
|
|
+
|
|
+struct quota_format_ops___2;
|
|
+
|
|
+struct quota_info___2 {
|
|
+ unsigned int flags;
|
|
+ struct rw_semaphore___2 dqio_sem;
|
|
+ struct inode___2 *files[3];
|
|
+ struct mem_dqinfo___2 info[3];
|
|
+ const struct quota_format_ops___2 *ops[3];
|
|
+};
|
|
+
|
|
+struct rcuwait___2 {
|
|
+ struct task_struct___2 *task;
|
|
+};
|
|
+
|
|
+struct percpu_rw_semaphore___2 {
|
|
+ struct rcu_sync rss;
|
|
+ unsigned int *read_count;
|
|
+ struct rw_semaphore___2 rw_sem;
|
|
+ struct rcuwait___2 writer;
|
|
+ int readers_block;
|
|
+};
|
|
+
|
|
+struct sb_writers___2 {
|
|
+ int frozen;
|
|
+ wait_queue_head_t wait_unfrozen;
|
|
+ struct percpu_rw_semaphore___2 rw_sem[3];
|
|
+};
|
|
+
|
|
+struct file_system_type___2;
|
|
+
|
|
+struct super_operations___2;
|
|
+
|
|
+struct dquot_operations___2;
|
|
+
|
|
+struct quotactl_ops___2;
|
|
+
|
|
+struct super_block___2 {
|
|
+ struct list_head s_list;
|
|
+ dev_t s_dev;
|
|
+ unsigned char s_blocksize_bits;
|
|
+ long unsigned int s_blocksize;
|
|
+ loff_t s_maxbytes;
|
|
+ struct file_system_type___2 *s_type;
|
|
+ const struct super_operations___2 *s_op;
|
|
+ const struct dquot_operations___2 *dq_op;
|
|
+ const struct quotactl_ops___2 *s_qcop;
|
|
+ const struct export_operations *s_export_op;
|
|
+ long unsigned int s_flags;
|
|
+ long unsigned int s_iflags;
|
|
+ long unsigned int s_magic;
|
|
+ struct dentry___2 *s_root;
|
|
+ struct rw_semaphore___2 s_umount;
|
|
+ int s_count;
|
|
+ atomic_t s_active;
|
|
+ void *s_security;
|
|
+ const struct xattr_handler **s_xattr;
|
|
+ struct hlist_bl_head s_roots;
|
|
+ struct list_head s_mounts;
|
|
+ struct block_device___2 *s_bdev;
|
|
+ struct backing_dev_info *s_bdi;
|
|
+ struct mtd_info *s_mtd;
|
|
+ struct hlist_node s_instances;
|
|
+ unsigned int s_quota_types;
|
|
+ struct quota_info___2 s_dquot;
|
|
+ struct sb_writers___2 s_writers;
|
|
+ char s_id[32];
|
|
+ uuid_t s_uuid;
|
|
+ void *s_fs_info;
|
|
+ unsigned int s_max_links;
|
|
+ fmode_t s_mode;
|
|
+ u32 s_time_gran;
|
|
+ struct mutex s_vfs_rename_mutex;
|
|
+ char *s_subtype;
|
|
+ const struct dentry_operations___2 *s_d_op;
|
|
+ int cleancache_poolid;
|
|
+ struct shrinker s_shrink;
|
|
+ atomic_long_t s_remove_count;
|
|
+ atomic_long_t s_fsnotify_inode_refs;
|
|
+ int s_readonly_remount;
|
|
+ struct workqueue_struct *s_dio_done_wq;
|
|
+ struct hlist_head s_pins;
|
|
+ struct user_namespace___2 *s_user_ns;
|
|
+ struct list_lru s_dentry_lru;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct list_lru s_inode_lru;
|
|
+ struct callback_head rcu;
|
|
+ struct work_struct destroy_work;
|
|
+ struct mutex s_sync_lock;
|
|
+ int s_stack_depth;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ spinlock_t s_inode_list_lock;
|
|
+ struct list_head s_inodes;
|
|
+ spinlock_t s_inode_wblist_lock;
|
|
+ struct list_head s_inodes_wb;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct vfsmount___2 {
|
|
+ struct dentry___2 *mnt_root;
|
|
+ struct super_block___2 *mnt_sb;
|
|
+ int mnt_flags;
|
|
+};
|
|
+
|
|
+struct path___2 {
|
|
+ struct vfsmount___2 *mnt;
|
|
+ struct dentry___2 *dentry;
|
|
+};
|
|
+
|
|
+struct proc_ns_operations___2;
|
|
+
|
|
+struct ns_common___2 {
|
|
+ atomic_long_t stashed;
|
|
+ const struct proc_ns_operations___2 *ops;
|
|
+ unsigned int inum;
|
|
+};
|
|
+
|
|
+struct key___2;
|
|
+
|
|
+struct ucounts___2;
|
|
+
|
|
+struct user_namespace___2 {
|
|
+ struct uid_gid_map uid_map;
|
|
+ struct uid_gid_map gid_map;
|
|
+ struct uid_gid_map projid_map;
|
|
+ atomic_t count;
|
|
+ struct user_namespace___2 *parent;
|
|
+ int level;
|
|
+ kuid_t owner;
|
|
+ kgid_t group;
|
|
+ struct ns_common___2 ns;
|
|
+ long unsigned int flags;
|
|
+ struct key___2 *persistent_keyring_register;
|
|
+ struct rw_semaphore___2 persistent_keyring_register_sem;
|
|
+ struct work_struct work;
|
|
+ struct ctl_table_set set;
|
|
+ struct ctl_table_header *sysctls;
|
|
+ struct ucounts___2 *ucounts;
|
|
+ int ucount_max[9];
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct upid___2 {
|
|
+ int nr;
|
|
+ struct pid_namespace___2 *ns;
|
|
+};
|
|
+
|
|
+struct pid_namespace___2 {
|
|
+ struct kref kref;
|
|
+ struct idr idr;
|
|
+ struct callback_head rcu;
|
|
+ unsigned int pid_allocated;
|
|
+ struct task_struct___2 *child_reaper;
|
|
+ struct kmem_cache *pid_cachep;
|
|
+ unsigned int level;
|
|
+ struct pid_namespace___2 *parent;
|
|
+ struct vfsmount___2 *proc_mnt;
|
|
+ struct dentry___2 *proc_self;
|
|
+ struct dentry___2 *proc_thread_self;
|
|
+ struct fs_pin *bacct;
|
|
+ struct user_namespace___2 *user_ns;
|
|
+ struct ucounts___2 *ucounts;
|
|
+ struct work_struct proc_work;
|
|
+ kgid_t pid_gid;
|
|
+ int hide_pid;
|
|
+ int pid_max;
|
|
+ int reboot;
|
|
+ struct ns_common___2 ns;
|
|
+};
|
|
+
|
|
+struct pid___2 {
|
|
+ atomic_t count;
|
|
+ unsigned int level;
|
|
+ struct hlist_head tasks[4];
|
|
+ struct callback_head rcu;
|
|
+ struct upid___2 numbers[1];
|
|
+};
|
|
+
|
|
+struct vm_operations_struct___2;
|
|
+
|
|
+struct vm_area_struct___2 {
|
|
+ long unsigned int vm_start;
|
|
+ long unsigned int vm_end;
|
|
+ struct vm_area_struct___2 *vm_next;
|
|
+ struct vm_area_struct___2 *vm_prev;
|
|
+ struct rb_node vm_rb;
|
|
+ long unsigned int rb_subtree_gap;
|
|
+ struct mm_struct___2 *vm_mm;
|
|
+ pgprot_t vm_page_prot;
|
|
+ long unsigned int vm_flags;
|
|
+ struct {
|
|
+ struct rb_node rb;
|
|
+ long unsigned int rb_subtree_last;
|
|
+ } shared;
|
|
+ struct list_head anon_vma_chain;
|
|
+ struct anon_vma *anon_vma;
|
|
+ const struct vm_operations_struct___2 *vm_ops;
|
|
+ long unsigned int vm_pgoff;
|
|
+ struct file___2 *vm_file;
|
|
+ void *vm_private_data;
|
|
+ atomic_long_t swap_readahead_info;
|
|
+ struct mempolicy *vm_policy;
|
|
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct blocking_notifier_head___2 {
|
|
+ struct rw_semaphore___2 rwsem;
|
|
+ struct notifier_block *head;
|
|
+};
|
|
+
|
|
+typedef int (*dev_page_fault_t___2)(struct vm_area_struct___2 *, long unsigned int, const struct page___2 *, unsigned int, pmd_t *);
|
|
+
|
|
+typedef void (*dev_page_free_t___2)(struct page___2 *, void *);
|
|
+
|
|
+struct device___2;
|
|
+
|
|
+struct dev_pagemap___2 {
|
|
+ dev_page_fault_t___2 page_fault;
|
|
+ dev_page_free_t___2 page_free;
|
|
+ struct vmem_altmap altmap;
|
|
+ bool altmap_valid;
|
|
+ struct resource res;
|
|
+ struct percpu_ref *ref;
|
|
+ void (*kill)(struct percpu_ref *);
|
|
+ struct device___2 *dev;
|
|
+ void *data;
|
|
+ enum memory_type type;
|
|
+};
|
|
+
|
|
+struct fown_struct___2 {
|
|
+ rwlock_t lock;
|
|
+ struct pid___2 *pid;
|
|
+ enum pid_type pid_type;
|
|
+ kuid_t uid;
|
|
+ kuid_t euid;
|
|
+ int signum;
|
|
+};
|
|
+
|
|
+struct file___2 {
|
|
+ union {
|
|
+ struct llist_node fu_llist;
|
|
+ struct callback_head fu_rcuhead;
|
|
+ } f_u;
|
|
+ struct path___2 f_path;
|
|
+ struct inode___2 *f_inode;
|
|
+ const struct file_operations___2 *f_op;
|
|
+ spinlock_t f_lock;
|
|
+ enum rw_hint f_write_hint;
|
|
+ atomic_long_t f_count;
|
|
+ unsigned int f_flags;
|
|
+ fmode_t f_mode;
|
|
+ struct mutex f_pos_lock;
|
|
+ loff_t f_pos;
|
|
+ struct fown_struct___2 f_owner;
|
|
+ const struct cred___2 *f_cred;
|
|
+ struct file_ra_state f_ra;
|
|
+ u64 f_version;
|
|
+ void *f_security;
|
|
+ void *private_data;
|
|
+ struct list_head f_ep_links;
|
|
+ struct list_head f_tfile_llink;
|
|
+ struct address_space___2 *f_mapping;
|
|
+ errseq_t f_wb_err;
|
|
+};
|
|
+
|
|
+struct vm_fault___2;
|
|
+
|
|
+struct vm_operations_struct___2 {
|
|
+ void (*open)(struct vm_area_struct___2 *);
|
|
+ void (*close)(struct vm_area_struct___2 *);
|
|
+ int (*split)(struct vm_area_struct___2 *, long unsigned int);
|
|
+ int (*mremap)(struct vm_area_struct___2 *);
|
|
+ vm_fault_t (*fault)(struct vm_fault___2 *);
|
|
+ vm_fault_t (*huge_fault)(struct vm_fault___2 *, enum page_entry_size);
|
|
+ void (*map_pages)(struct vm_fault___2 *, long unsigned int, long unsigned int);
|
|
+ long unsigned int (*pagesize)(struct vm_area_struct___2 *);
|
|
+ vm_fault_t (*page_mkwrite)(struct vm_fault___2 *);
|
|
+ vm_fault_t (*pfn_mkwrite)(struct vm_fault___2 *);
|
|
+ int (*access)(struct vm_area_struct___2 *, long unsigned int, void *, int, int);
|
|
+ const char * (*name)(struct vm_area_struct___2 *);
|
|
+ int (*set_policy)(struct vm_area_struct___2 *, struct mempolicy *);
|
|
+ struct mempolicy * (*get_policy)(struct vm_area_struct___2 *, long unsigned int);
|
|
+ struct page___2 * (*find_special_page)(struct vm_area_struct___2 *, long unsigned int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct core_thread___2 {
|
|
+ struct task_struct___2 *task;
|
|
+ struct core_thread___2 *next;
|
|
+};
|
|
+
|
|
+struct core_state___2 {
|
|
+ atomic_t nr_threads;
|
|
+ struct core_thread___2 dumper;
|
|
+ struct completion startup;
|
|
+};
|
|
+
|
|
+struct vm_fault___2 {
|
|
+ struct vm_area_struct___2 *vma;
|
|
+ unsigned int flags;
|
|
+ gfp_t gfp_mask;
|
|
+ long unsigned int pgoff;
|
|
+ long unsigned int address;
|
|
+ pmd_t *pmd;
|
|
+ pud_t *pud;
|
|
+ pte_t orig_pte;
|
|
+ struct page___2 *cow_page;
|
|
+ struct mem_cgroup *memcg;
|
|
+ struct page___2 *page;
|
|
+ pte_t *pte;
|
|
+ spinlock_t *ptl;
|
|
+ pgtable_t___2 prealloc_pte;
|
|
+};
|
|
+
|
|
+struct fwnode_operations___2;
|
|
+
|
|
+struct fwnode_handle___2 {
|
|
+ struct fwnode_handle___2 *secondary;
|
|
+ const struct fwnode_operations___2 *ops;
|
|
+};
|
|
+
|
|
+struct fwnode_reference_args___2;
|
|
+
|
|
+struct fwnode_endpoint___2;
|
|
+
|
|
+struct fwnode_operations___2 {
|
|
+ struct fwnode_handle___2 * (*get)(struct fwnode_handle___2 *);
|
|
+ void (*put)(struct fwnode_handle___2 *);
|
|
+ bool (*device_is_available)(const struct fwnode_handle___2 *);
|
|
+ const void * (*device_get_match_data)(const struct fwnode_handle___2 *, const struct device___2 *);
|
|
+ bool (*property_present)(const struct fwnode_handle___2 *, const char *);
|
|
+ int (*property_read_int_array)(const struct fwnode_handle___2 *, const char *, unsigned int, void *, size_t);
|
|
+ int (*property_read_string_array)(const struct fwnode_handle___2 *, const char *, const char **, size_t);
|
|
+ struct fwnode_handle___2 * (*get_parent)(const struct fwnode_handle___2 *);
|
|
+ struct fwnode_handle___2 * (*get_next_child_node)(const struct fwnode_handle___2 *, struct fwnode_handle___2 *);
|
|
+ struct fwnode_handle___2 * (*get_named_child_node)(const struct fwnode_handle___2 *, const char *);
|
|
+ int (*get_reference_args)(const struct fwnode_handle___2 *, const char *, const char *, unsigned int, unsigned int, struct fwnode_reference_args___2 *);
|
|
+ struct fwnode_handle___2 * (*graph_get_next_endpoint)(const struct fwnode_handle___2 *, struct fwnode_handle___2 *);
|
|
+ struct fwnode_handle___2 * (*graph_get_remote_endpoint)(const struct fwnode_handle___2 *);
|
|
+ struct fwnode_handle___2 * (*graph_get_port_parent)(struct fwnode_handle___2 *);
|
|
+ int (*graph_parse_endpoint)(const struct fwnode_handle___2 *, struct fwnode_endpoint___2 *);
|
|
+};
|
|
+
|
|
+struct fwnode_endpoint___2 {
|
|
+ unsigned int port;
|
|
+ unsigned int id;
|
|
+ const struct fwnode_handle___2 *local_fwnode;
|
|
+};
|
|
+
|
|
+struct fwnode_reference_args___2 {
|
|
+ struct fwnode_handle___2 *fwnode;
|
|
+ unsigned int nargs;
|
|
+ u64 args[8];
|
|
+};
|
|
+
|
|
+struct kset___2;
|
|
+
|
|
+struct kobj_type___2;
|
|
+
|
|
+struct kernfs_node___2;
|
|
+
|
|
+struct kobject___3 {
|
|
+ const char *name;
|
|
+ struct list_head entry;
|
|
+ struct kobject___3 *parent;
|
|
+ struct kset___2 *kset;
|
|
+ struct kobj_type___2 *ktype;
|
|
+ struct kernfs_node___2 *sd;
|
|
+ struct kref kref;
|
|
+ unsigned int state_initialized: 1;
|
|
+ unsigned int state_in_sysfs: 1;
|
|
+ unsigned int state_add_uevent_sent: 1;
|
|
+ unsigned int state_remove_uevent_sent: 1;
|
|
+ unsigned int uevent_suppress: 1;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct dev_pm_info___2 {
|
|
+ pm_message_t power_state;
|
|
+ unsigned int can_wakeup: 1;
|
|
+ unsigned int async_suspend: 1;
|
|
+ bool in_dpm_list: 1;
|
|
+ bool is_prepared: 1;
|
|
+ bool is_suspended: 1;
|
|
+ bool is_noirq_suspended: 1;
|
|
+ bool is_late_suspended: 1;
|
|
+ bool early_init: 1;
|
|
+ bool direct_complete: 1;
|
|
+ u32 driver_flags;
|
|
+ spinlock_t lock;
|
|
+ struct list_head entry;
|
|
+ struct completion completion;
|
|
+ struct wakeup_source *wakeup;
|
|
+ bool wakeup_path: 1;
|
|
+ bool syscore: 1;
|
|
+ bool no_pm_callbacks: 1;
|
|
+ unsigned int must_resume: 1;
|
|
+ unsigned int may_skip_resume: 1;
|
|
+ struct timer_list suspend_timer;
|
|
+ long unsigned int timer_expires;
|
|
+ struct work_struct work;
|
|
+ wait_queue_head_t wait_queue;
|
|
+ struct wake_irq *wakeirq;
|
|
+ atomic_t usage_count;
|
|
+ atomic_t child_count;
|
|
+ unsigned int disable_depth: 3;
|
|
+ unsigned int idle_notification: 1;
|
|
+ unsigned int request_pending: 1;
|
|
+ unsigned int deferred_resume: 1;
|
|
+ unsigned int runtime_auto: 1;
|
|
+ bool ignore_children: 1;
|
|
+ unsigned int no_callbacks: 1;
|
|
+ unsigned int irq_safe: 1;
|
|
+ unsigned int use_autosuspend: 1;
|
|
+ unsigned int timer_autosuspends: 1;
|
|
+ unsigned int memalloc_noio: 1;
|
|
+ unsigned int links_count;
|
|
+ enum rpm_request request;
|
|
+ enum rpm_status runtime_status;
|
|
+ int runtime_error;
|
|
+ int autosuspend_delay;
|
|
+ long unsigned int last_busy;
|
|
+ long unsigned int active_jiffies;
|
|
+ long unsigned int suspended_jiffies;
|
|
+ long unsigned int accounting_timestamp;
|
|
+ struct pm_subsys_data *subsys_data;
|
|
+ void (*set_latency_tolerance)(struct device___2 *, s32);
|
|
+ struct dev_pm_qos *qos;
|
|
+};
|
|
+
|
|
+struct device_type___2;
|
|
+
|
|
+struct bus_type___2;
|
|
+
|
|
+struct device_driver___2;
|
|
+
|
|
+struct dev_pm_domain___2;
|
|
+
|
|
+struct dma_map_ops___2;
|
|
+
|
|
+struct device_node___2;
|
|
+
|
|
+struct class___2;
|
|
+
|
|
+struct attribute_group___2;
|
|
+
|
|
+struct device___2 {
|
|
+ struct device___2 *parent;
|
|
+ struct device_private *p;
|
|
+ struct kobject___3 kobj;
|
|
+ const char *init_name;
|
|
+ const struct device_type___2 *type;
|
|
+ struct mutex mutex;
|
|
+ struct bus_type___2 *bus;
|
|
+ struct device_driver___2 *driver;
|
|
+ void *platform_data;
|
|
+ void *driver_data;
|
|
+ struct dev_links_info links;
|
|
+ struct dev_pm_info___2 power;
|
|
+ struct dev_pm_domain___2 *pm_domain;
|
|
+ struct irq_domain *msi_domain;
|
|
+ struct dev_pin_info *pins;
|
|
+ struct list_head msi_list;
|
|
+ int numa_node;
|
|
+ const struct dma_map_ops___2 *dma_ops;
|
|
+ u64 *dma_mask;
|
|
+ u64 coherent_dma_mask;
|
|
+ u64 bus_dma_mask;
|
|
+ long unsigned int dma_pfn_offset;
|
|
+ struct device_dma_parameters *dma_parms;
|
|
+ struct list_head dma_pools;
|
|
+ struct dma_coherent_mem *dma_mem;
|
|
+ struct dev_archdata archdata;
|
|
+ struct device_node___2 *of_node;
|
|
+ struct fwnode_handle___2 *fwnode;
|
|
+ dev_t devt;
|
|
+ u32 id;
|
|
+ spinlock_t devres_lock;
|
|
+ struct list_head devres_head;
|
|
+ struct klist_node knode_class;
|
|
+ struct class___2 *class;
|
|
+ const struct attribute_group___2 **groups;
|
|
+ void (*release)(struct device___2 *);
|
|
+ struct iommu_group *iommu_group;
|
|
+ struct iommu_fwspec *iommu_fwspec;
|
|
+ struct iommu_param *iommu_param;
|
|
+ bool offline_disabled: 1;
|
|
+ bool offline: 1;
|
|
+ bool of_node_reused: 1;
|
|
+ union {
|
|
+ raw_spinlock_t msi_lock;
|
|
+ long unsigned int kabi_reserve1;
|
|
+ };
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+};
|
|
+
|
|
+struct vm_struct___2 {
|
|
+ struct vm_struct___2 *next;
|
|
+ void *addr;
|
|
+ long unsigned int size;
|
|
+ long unsigned int flags;
|
|
+ struct page___2 **pages;
|
|
+ unsigned int nr_pages;
|
|
+ phys_addr_t phys_addr;
|
|
+ const void *caller;
|
|
+};
|
|
+
|
|
+struct smp_ops___2 {
|
|
+ void (*smp_prepare_boot_cpu)();
|
|
+ void (*smp_prepare_cpus)(unsigned int);
|
|
+ void (*smp_cpus_done)(unsigned int);
|
|
+ void (*stop_other_cpus)(int);
|
|
+ void (*crash_stop_other_cpus)();
|
|
+ void (*smp_send_reschedule)(int);
|
|
+ int (*cpu_up)(unsigned int, struct task_struct___2 *);
|
|
+ int (*cpu_disable)();
|
|
+ void (*cpu_die)(unsigned int);
|
|
+ void (*play_dead)();
|
|
+ void (*send_call_func_ipi)(const struct cpumask *);
|
|
+ void (*send_call_func_single_ipi)(int);
|
|
+};
|
|
+
|
|
+struct user_struct___2 {
|
|
+ refcount_t __count;
|
|
+ atomic_t processes;
|
|
+ atomic_t sigpending;
|
|
+ atomic_t fanotify_listeners;
|
|
+ atomic_long_t epoll_watches;
|
|
+ long unsigned int mq_bytes;
|
|
+ long unsigned int locked_shm;
|
|
+ long unsigned int unix_inflight;
|
|
+ atomic_long_t pipe_bufs;
|
|
+ struct key___2 *uid_keyring;
|
|
+ struct key___2 *session_keyring;
|
|
+ struct hlist_node uidhash_node;
|
|
+ kuid_t uid;
|
|
+ atomic_long_t locked_vm;
|
|
+ struct ratelimit_state ratelimit;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct cred___2 {
|
|
+ atomic_t usage;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ kuid_t suid;
|
|
+ kgid_t sgid;
|
|
+ kuid_t euid;
|
|
+ kgid_t egid;
|
|
+ kuid_t fsuid;
|
|
+ kgid_t fsgid;
|
|
+ unsigned int securebits;
|
|
+ kernel_cap_t cap_inheritable;
|
|
+ kernel_cap_t cap_permitted;
|
|
+ kernel_cap_t cap_effective;
|
|
+ kernel_cap_t cap_bset;
|
|
+ kernel_cap_t cap_ambient;
|
|
+ unsigned char jit_keyring;
|
|
+ struct key___2 *session_keyring;
|
|
+ struct key___2 *process_keyring;
|
|
+ struct key___2 *thread_keyring;
|
|
+ struct key___2 *request_key_auth;
|
|
+ void *security;
|
|
+ struct user_struct___2 *user;
|
|
+ struct user_namespace___2 *user_ns;
|
|
+ struct group_info *group_info;
|
|
+ union {
|
|
+ int non_rcu;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct net___2;
|
|
+
|
|
+struct cgroup_namespace___2;
|
|
+
|
|
+struct nsproxy___2 {
|
|
+ atomic_t count;
|
|
+ struct uts_namespace *uts_ns;
|
|
+ struct ipc_namespace *ipc_ns;
|
|
+ struct mnt_namespace *mnt_ns;
|
|
+ struct pid_namespace___2 *pid_ns_for_children;
|
|
+ struct net___2 *net_ns;
|
|
+ struct cgroup_namespace___2 *cgroup_ns;
|
|
+};
|
|
+
|
|
+struct signal_struct___2 {
|
|
+ atomic_t sigcnt;
|
|
+ atomic_t live;
|
|
+ int nr_threads;
|
|
+ struct list_head thread_head;
|
|
+ wait_queue_head_t wait_chldexit;
|
|
+ struct task_struct___2 *curr_target;
|
|
+ struct sigpending shared_pending;
|
|
+ struct hlist_head multiprocess;
|
|
+ int group_exit_code;
|
|
+ int notify_count;
|
|
+ struct task_struct___2 *group_exit_task;
|
|
+ int group_stop_count;
|
|
+ unsigned int flags;
|
|
+ unsigned int is_child_subreaper: 1;
|
|
+ unsigned int has_child_subreaper: 1;
|
|
+ int posix_timer_id;
|
|
+ struct list_head posix_timers;
|
|
+ struct hrtimer real_timer;
|
|
+ ktime_t it_real_incr;
|
|
+ struct cpu_itimer it[2];
|
|
+ struct thread_group_cputimer cputimer;
|
|
+ struct task_cputime cputime_expires;
|
|
+ struct list_head cpu_timers[3];
|
|
+ struct pid___2 *pids[4];
|
|
+ atomic_t tick_dep_mask;
|
|
+ struct pid___2 *tty_old_pgrp;
|
|
+ int leader;
|
|
+ struct tty_struct *tty;
|
|
+ struct autogroup *autogroup;
|
|
+ seqlock_t stats_lock;
|
|
+ u64 utime;
|
|
+ u64 stime;
|
|
+ u64 cutime;
|
|
+ u64 cstime;
|
|
+ u64 gtime;
|
|
+ u64 cgtime;
|
|
+ struct prev_cputime prev_cputime;
|
|
+ long unsigned int nvcsw;
|
|
+ long unsigned int nivcsw;
|
|
+ long unsigned int cnvcsw;
|
|
+ long unsigned int cnivcsw;
|
|
+ long unsigned int min_flt;
|
|
+ long unsigned int maj_flt;
|
|
+ long unsigned int cmin_flt;
|
|
+ long unsigned int cmaj_flt;
|
|
+ long unsigned int inblock;
|
|
+ long unsigned int oublock;
|
|
+ long unsigned int cinblock;
|
|
+ long unsigned int coublock;
|
|
+ long unsigned int maxrss;
|
|
+ long unsigned int cmaxrss;
|
|
+ struct task_io_accounting ioac;
|
|
+ long long unsigned int sum_sched_runtime;
|
|
+ struct rlimit rlim[16];
|
|
+ struct pacct_struct pacct;
|
|
+ struct taskstats *stats;
|
|
+ unsigned int audit_tty;
|
|
+ struct tty_audit_buf *tty_audit_buf;
|
|
+ bool oom_flag_origin;
|
|
+ short int oom_score_adj;
|
|
+ short int oom_score_adj_min;
|
|
+ struct mm_struct___2 *oom_mm;
|
|
+ struct mutex cred_guard_mutex;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct cgroup_subsys_state___2;
|
|
+
|
|
+struct cgroup___2;
|
|
+
|
|
+struct css_set___2 {
|
|
+ struct cgroup_subsys_state___2 *subsys[14];
|
|
+ refcount_t refcount;
|
|
+ struct css_set___2 *dom_cset;
|
|
+ struct cgroup___2 *dfl_cgrp;
|
|
+ int nr_tasks;
|
|
+ struct list_head tasks;
|
|
+ struct list_head mg_tasks;
|
|
+ struct list_head dying_tasks;
|
|
+ struct list_head task_iters;
|
|
+ struct list_head e_cset_node[14];
|
|
+ struct list_head threaded_csets;
|
|
+ struct list_head threaded_csets_node;
|
|
+ struct hlist_node hlist;
|
|
+ struct list_head cgrp_links;
|
|
+ struct list_head mg_preload_node;
|
|
+ struct list_head mg_node;
|
|
+ struct cgroup___2 *mg_src_cgrp;
|
|
+ struct cgroup___2 *mg_dst_cgrp;
|
|
+ struct css_set___2 *mg_dst_cset;
|
|
+ bool dead;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+struct perf_event_context___2 {
|
|
+ struct pmu___2 *pmu;
|
|
+ raw_spinlock_t lock;
|
|
+ struct mutex mutex;
|
|
+ struct list_head active_ctx_list;
|
|
+ struct perf_event_groups pinned_groups;
|
|
+ struct perf_event_groups flexible_groups;
|
|
+ struct list_head event_list;
|
|
+ struct list_head pinned_active;
|
|
+ struct list_head flexible_active;
|
|
+ int nr_events;
|
|
+ int nr_active;
|
|
+ int is_active;
|
|
+ int nr_stat;
|
|
+ int nr_freq;
|
|
+ int rotate_disable;
|
|
+ atomic_t refcount;
|
|
+ struct task_struct___2 *task;
|
|
+ u64 time;
|
|
+ u64 timestamp;
|
|
+ struct perf_event_context___2 *parent_ctx;
|
|
+ u64 parent_gen;
|
|
+ u64 generation;
|
|
+ int pin_count;
|
|
+ int nr_cgroups;
|
|
+ void *task_ctx_data;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+struct pipe_buffer___2;
|
|
+
|
|
+struct pipe_inode_info___2 {
|
|
+ struct mutex mutex;
|
|
+ wait_queue_head_t wait;
|
|
+ unsigned int nrbufs;
|
|
+ unsigned int curbuf;
|
|
+ unsigned int buffers;
|
|
+ unsigned int readers;
|
|
+ unsigned int writers;
|
|
+ unsigned int files;
|
|
+ unsigned int waiting_writers;
|
|
+ unsigned int r_counter;
|
|
+ unsigned int w_counter;
|
|
+ struct page___2 *tmp_page;
|
|
+ struct fasync_struct___2 *fasync_readers;
|
|
+ struct fasync_struct___2 *fasync_writers;
|
|
+ struct pipe_buffer___2 *bufs;
|
|
+ struct user_struct___2 *user;
|
|
+};
|
|
+
|
|
+union thread_union___2 {
|
|
+ struct task_struct___2 task;
|
|
+ long unsigned int stack[2048];
|
|
+};
|
|
+
|
|
+struct kiocb___2 {
|
|
+ struct file___2 *ki_filp;
|
|
+ loff_t ki_pos;
|
|
+ void (*ki_complete)(struct kiocb___2 *, long int, long int);
|
|
+ void *private;
|
|
+ int ki_flags;
|
|
+ u16 ki_hint;
|
|
+ u16 ki_ioprio;
|
|
+};
|
|
+
|
|
+struct iattr___2 {
|
|
+ unsigned int ia_valid;
|
|
+ umode_t ia_mode;
|
|
+ kuid_t ia_uid;
|
|
+ kgid_t ia_gid;
|
|
+ loff_t ia_size;
|
|
+ struct timespec64 ia_atime;
|
|
+ struct timespec64 ia_mtime;
|
|
+ struct timespec64 ia_ctime;
|
|
+ struct file___2 *ia_file;
|
|
+};
|
|
+
|
|
+struct pglist_data___2;
|
|
+
|
|
+struct lruvec___2 {
|
|
+ struct list_head lists[5];
|
|
+ struct zone_reclaim_stat reclaim_stat;
|
|
+ atomic_long_t inactive_age;
|
|
+ long unsigned int refaults;
|
|
+ struct pglist_data___2 *pgdat;
|
|
+};
|
|
+
|
|
+struct zone___2 {
|
|
+ long unsigned int watermark[3];
|
|
+ long unsigned int nr_reserved_highatomic;
|
|
+ long int lowmem_reserve[5];
|
|
+ int node;
|
|
+ struct pglist_data___2 *zone_pgdat;
|
|
+ struct per_cpu_pageset *pageset;
|
|
+ long unsigned int zone_start_pfn;
|
|
+ long unsigned int managed_pages;
|
|
+ long unsigned int spanned_pages;
|
|
+ long unsigned int present_pages;
|
|
+ const char *name;
|
|
+ long unsigned int nr_isolate_pageblock;
|
|
+ seqlock_t span_seqlock;
|
|
+ int initialized;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad1_;
|
|
+ struct free_area free_area[11];
|
|
+ long unsigned int flags;
|
|
+ spinlock_t lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad2_;
|
|
+ long unsigned int percpu_drift_mark;
|
|
+ long unsigned int compact_cached_free_pfn;
|
|
+ long unsigned int compact_cached_migrate_pfn[2];
|
|
+ unsigned int compact_considered;
|
|
+ unsigned int compact_defer_shift;
|
|
+ int compact_order_failed;
|
|
+ bool compact_blockskip_flush;
|
|
+ bool contiguous;
|
|
+ long: 16;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad3_;
|
|
+ atomic_long_t vm_stat[13];
|
|
+ atomic_long_t vm_numa_stat[6];
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct zoneref___2 {
|
|
+ struct zone___2 *zone;
|
|
+ int zone_idx;
|
|
+};
|
|
+
|
|
+struct zonelist___2 {
|
|
+ struct zoneref___2 _zonerefs[5121];
|
|
+};
|
|
+
|
|
+struct pglist_data___2 {
|
|
+ struct zone___2 node_zones[5];
|
|
+ struct zonelist___2 node_zonelists[2];
|
|
+ int nr_zones;
|
|
+ spinlock_t node_size_lock;
|
|
+ long unsigned int node_start_pfn;
|
|
+ long unsigned int node_present_pages;
|
|
+ long unsigned int node_spanned_pages;
|
|
+ int node_id;
|
|
+ wait_queue_head_t kswapd_wait;
|
|
+ wait_queue_head_t pfmemalloc_wait;
|
|
+ struct task_struct___2 *kswapd;
|
|
+ int kswapd_order;
|
|
+ enum zone_type kswapd_classzone_idx;
|
|
+ int kswapd_failures;
|
|
+ int kcompactd_max_order;
|
|
+ enum zone_type kcompactd_classzone_idx;
|
|
+ wait_queue_head_t kcompactd_wait;
|
|
+ struct task_struct___2 *kcompactd;
|
|
+ long unsigned int totalreserve_pages;
|
|
+ long unsigned int min_unmapped_pages;
|
|
+ long unsigned int min_slab_pages;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad1_;
|
|
+ spinlock_t lru_lock;
|
|
+ long unsigned int first_deferred_pfn;
|
|
+ long unsigned int static_init_pgcnt;
|
|
+ spinlock_t split_queue_lock;
|
|
+ struct list_head split_queue;
|
|
+ long unsigned int split_queue_len;
|
|
+ struct lruvec___2 lruvec;
|
|
+ long unsigned int flags;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct zone_padding _pad2_;
|
|
+ struct per_cpu_nodestat *per_cpu_nodestats;
|
|
+ atomic_long_t vm_stat[28];
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+typedef struct pglist_data___2 pg_data_t___2;
|
|
+
|
|
+struct dquot___2 {
|
|
+ struct hlist_node dq_hash;
|
|
+ struct list_head dq_inuse;
|
|
+ struct list_head dq_free;
|
|
+ struct list_head dq_dirty;
|
|
+ struct mutex dq_lock;
|
|
+ spinlock_t dq_dqb_lock;
|
|
+ atomic_t dq_count;
|
|
+ struct super_block___2 *dq_sb;
|
|
+ struct kqid dq_id;
|
|
+ loff_t dq_off;
|
|
+ long unsigned int dq_flags;
|
|
+ struct mem_dqblk dq_dqb;
|
|
+};
|
|
+
|
|
+struct quota_format_type___2 {
|
|
+ int qf_fmt_id;
|
|
+ const struct quota_format_ops___2 *qf_ops;
|
|
+ struct module___2 *qf_owner;
|
|
+ struct quota_format_type___2 *qf_next;
|
|
+};
|
|
+
|
|
+struct quota_format_ops___2 {
|
|
+ int (*check_quota_file)(struct super_block___2 *, int);
|
|
+ int (*read_file_info)(struct super_block___2 *, int);
|
|
+ int (*write_file_info)(struct super_block___2 *, int);
|
|
+ int (*free_file_info)(struct super_block___2 *, int);
|
|
+ int (*read_dqblk)(struct dquot___2 *);
|
|
+ int (*commit_dqblk)(struct dquot___2 *);
|
|
+ int (*release_dqblk)(struct dquot___2 *);
|
|
+ int (*get_next_id)(struct super_block___2 *, struct kqid *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct dquot_operations___2 {
|
|
+ int (*write_dquot)(struct dquot___2 *);
|
|
+ struct dquot___2 * (*alloc_dquot)(struct super_block___2 *, int);
|
|
+ void (*destroy_dquot)(struct dquot___2 *);
|
|
+ int (*acquire_dquot)(struct dquot___2 *);
|
|
+ int (*release_dquot)(struct dquot___2 *);
|
|
+ int (*mark_dirty)(struct dquot___2 *);
|
|
+ int (*write_info)(struct super_block___2 *, int);
|
|
+ qsize_t * (*get_reserved_space)(struct inode___2 *);
|
|
+ int (*get_projid)(struct inode___2 *, kprojid_t *);
|
|
+ int (*get_inode_usage)(struct inode___2 *, qsize_t *);
|
|
+ int (*get_next_id)(struct super_block___2 *, struct kqid *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct quotactl_ops___2 {
|
|
+ int (*quota_on)(struct super_block___2 *, int, int, const struct path___2 *);
|
|
+ int (*quota_off)(struct super_block___2 *, int);
|
|
+ int (*quota_enable)(struct super_block___2 *, unsigned int);
|
|
+ int (*quota_disable)(struct super_block___2 *, unsigned int);
|
|
+ int (*quota_sync)(struct super_block___2 *, int);
|
|
+ int (*set_info)(struct super_block___2 *, int, struct qc_info *);
|
|
+ int (*get_dqblk)(struct super_block___2 *, struct kqid, struct qc_dqblk *);
|
|
+ int (*get_nextdqblk)(struct super_block___2 *, struct kqid *, struct qc_dqblk *);
|
|
+ int (*set_dqblk)(struct super_block___2 *, struct kqid, struct qc_dqblk *);
|
|
+ int (*get_state)(struct super_block___2 *, struct qc_state *);
|
|
+ int (*rm_xquota)(struct super_block___2 *, unsigned int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct module_kobject___2 {
|
|
+ struct kobject___3 kobj;
|
|
+ struct module___2 *mod;
|
|
+ struct kobject___3 *drivers_dir;
|
|
+ struct module_param_attrs *mp;
|
|
+ struct completion *kobj_completion;
|
|
+};
|
|
+
|
|
+struct mod_tree_node___2 {
|
|
+ struct module___2 *mod;
|
|
+ struct latch_tree_node node;
|
|
+};
|
|
+
|
|
+struct module_layout___2 {
|
|
+ void *base;
|
|
+ unsigned int size;
|
|
+ unsigned int text_size;
|
|
+ unsigned int ro_size;
|
|
+ unsigned int ro_after_init_size;
|
|
+ struct mod_tree_node___2 mtn;
|
|
+};
|
|
+
|
|
+struct module_attribute___2;
|
|
+
|
|
+struct kernel_param___2;
|
|
+
|
|
+struct module___2 {
|
|
+ enum module_state state;
|
|
+ struct list_head list;
|
|
+ char name[56];
|
|
+ struct module_kobject___2 mkobj;
|
|
+ struct module_attribute___2 *modinfo_attrs;
|
|
+ const char *version;
|
|
+ const char *srcversion;
|
|
+ struct kobject___3 *holders_dir;
|
|
+ const struct kernel_symbol *syms;
|
|
+ const s32 *crcs;
|
|
+ unsigned int num_syms;
|
|
+ struct mutex param_lock;
|
|
+ struct kernel_param___2 *kp;
|
|
+ unsigned int num_kp;
|
|
+ unsigned int num_gpl_syms;
|
|
+ const struct kernel_symbol *gpl_syms;
|
|
+ const s32 *gpl_crcs;
|
|
+ bool sig_ok;
|
|
+ bool async_probe_requested;
|
|
+ const struct kernel_symbol *gpl_future_syms;
|
|
+ const s32 *gpl_future_crcs;
|
|
+ unsigned int num_gpl_future_syms;
|
|
+ unsigned int num_exentries;
|
|
+ struct exception_table_entry *extable;
|
|
+ int (*init)();
|
|
+ long: 64;
|
|
+ struct module_layout___2 core_layout;
|
|
+ struct module_layout___2 init_layout;
|
|
+ struct mod_arch_specific arch;
|
|
+ long unsigned int taints;
|
|
+ unsigned int num_bugs;
|
|
+ struct list_head bug_list;
|
|
+ struct bug_entry *bug_table;
|
|
+ struct mod_kallsyms *kallsyms;
|
|
+ struct mod_kallsyms core_kallsyms;
|
|
+ struct module_sect_attrs *sect_attrs;
|
|
+ struct module_notes_attrs *notes_attrs;
|
|
+ char *args;
|
|
+ void *percpu;
|
|
+ unsigned int percpu_size;
|
|
+ unsigned int num_tracepoints;
|
|
+ tracepoint_ptr_t *tracepoints_ptrs;
|
|
+ unsigned int num_bpf_raw_events;
|
|
+ struct bpf_raw_event_map *bpf_raw_events;
|
|
+ struct jump_entry *jump_entries;
|
|
+ unsigned int num_jump_entries;
|
|
+ unsigned int num_trace_bprintk_fmt;
|
|
+ const char **trace_bprintk_fmt_start;
|
|
+ struct trace_event_call___2 **trace_events;
|
|
+ unsigned int num_trace_events;
|
|
+ struct trace_eval_map **trace_evals;
|
|
+ unsigned int num_trace_evals;
|
|
+ unsigned int num_ftrace_callsites;
|
|
+ long unsigned int *ftrace_callsites;
|
|
+ bool klp;
|
|
+ bool klp_alive;
|
|
+ struct klp_modinfo *klp_info;
|
|
+ struct list_head source_list;
|
|
+ struct list_head target_list;
|
|
+ void (*exit)();
|
|
+ atomic_t refcnt;
|
|
+ struct error_injection_entry *ei_funcs;
|
|
+ unsigned int num_ei_funcs;
|
|
+ union {
|
|
+ enum MODULE_KLP_REL_STATE klp_rel_state;
|
|
+ long int klp_rel_state_KABI;
|
|
+ };
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct address_space_operations___2 {
|
|
+ int (*writepage)(struct page___2 *, struct writeback_control *);
|
|
+ int (*readpage)(struct file___2 *, struct page___2 *);
|
|
+ int (*writepages)(struct address_space___2 *, struct writeback_control *);
|
|
+ int (*set_page_dirty)(struct page___2 *);
|
|
+ int (*readpages)(struct file___2 *, struct address_space___2 *, struct list_head *, unsigned int);
|
|
+ int (*write_begin)(struct file___2 *, struct address_space___2 *, loff_t, unsigned int, unsigned int, struct page___2 **, void **);
|
|
+ int (*write_end)(struct file___2 *, struct address_space___2 *, loff_t, unsigned int, unsigned int, struct page___2 *, void *);
|
|
+ sector_t (*bmap)(struct address_space___2 *, sector_t);
|
|
+ void (*invalidatepage)(struct page___2 *, unsigned int, unsigned int);
|
|
+ int (*releasepage)(struct page___2 *, gfp_t);
|
|
+ void (*freepage)(struct page___2 *);
|
|
+ ssize_t (*direct_IO)(struct kiocb___2 *, struct iov_iter___2 *);
|
|
+ int (*migratepage)(struct address_space___2 *, struct page___2 *, struct page___2 *, enum migrate_mode);
|
|
+ bool (*isolate_page)(struct page___2 *, isolate_mode_t);
|
|
+ void (*putback_page)(struct page___2 *);
|
|
+ int (*launder_page)(struct page___2 *);
|
|
+ int (*is_partially_uptodate)(struct page___2 *, long unsigned int, long unsigned int);
|
|
+ void (*is_dirty_writeback)(struct page___2 *, bool *, bool *);
|
|
+ int (*error_remove_page)(struct address_space___2 *, struct page___2 *);
|
|
+ int (*swap_activate)(struct swap_info_struct *, struct file___2 *, sector_t *);
|
|
+ void (*swap_deactivate)(struct file___2 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct iov_iter___2 {
|
|
+ int type;
|
|
+ size_t iov_offset;
|
|
+ size_t count;
|
|
+ union {
|
|
+ const struct iovec *iov;
|
|
+ const struct kvec *kvec;
|
|
+ const struct bio_vec *bvec;
|
|
+ struct pipe_inode_info___2 *pipe;
|
|
+ };
|
|
+ union {
|
|
+ long unsigned int nr_segs;
|
|
+ struct {
|
|
+ int idx;
|
|
+ int start_idx;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct block_device___2 {
|
|
+ dev_t bd_dev;
|
|
+ int bd_openers;
|
|
+ int bd_write_openers;
|
|
+ struct inode___2 *bd_inode;
|
|
+ struct super_block___2 *bd_super;
|
|
+ struct mutex bd_mutex;
|
|
+ void *bd_claiming;
|
|
+ void *bd_holder;
|
|
+ int bd_holders;
|
|
+ bool bd_write_holder;
|
|
+ struct list_head bd_holder_disks;
|
|
+ struct block_device___2 *bd_contains;
|
|
+ unsigned int bd_block_size;
|
|
+ u8 bd_partno;
|
|
+ struct hd_struct *bd_part;
|
|
+ unsigned int bd_part_count;
|
|
+ int bd_invalidated;
|
|
+ struct gendisk *bd_disk;
|
|
+ struct request_queue *bd_queue;
|
|
+ struct backing_dev_info *bd_bdi;
|
|
+ struct list_head bd_list;
|
|
+ long unsigned int bd_private;
|
|
+ int bd_fsfreeze_count;
|
|
+ struct mutex bd_fsfreeze_mutex;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct inode_operations___2 {
|
|
+ struct dentry___2 * (*lookup)(struct inode___2 *, struct dentry___2 *, unsigned int);
|
|
+ const char * (*get_link)(struct dentry___2 *, struct inode___2 *, struct delayed_call *);
|
|
+ int (*permission)(struct inode___2 *, int);
|
|
+ struct posix_acl * (*get_acl)(struct inode___2 *, int);
|
|
+ int (*readlink)(struct dentry___2 *, char *, int);
|
|
+ int (*create)(struct inode___2 *, struct dentry___2 *, umode_t, bool);
|
|
+ int (*link)(struct dentry___2 *, struct inode___2 *, struct dentry___2 *);
|
|
+ int (*unlink)(struct inode___2 *, struct dentry___2 *);
|
|
+ int (*symlink)(struct inode___2 *, struct dentry___2 *, const char *);
|
|
+ int (*mkdir)(struct inode___2 *, struct dentry___2 *, umode_t);
|
|
+ int (*rmdir)(struct inode___2 *, struct dentry___2 *);
|
|
+ int (*mknod)(struct inode___2 *, struct dentry___2 *, umode_t, dev_t);
|
|
+ int (*rename)(struct inode___2 *, struct dentry___2 *, struct inode___2 *, struct dentry___2 *, unsigned int);
|
|
+ int (*setattr)(struct dentry___2 *, struct iattr___2 *);
|
|
+ int (*getattr)(const struct path___2 *, struct kstat *, u32, unsigned int);
|
|
+ ssize_t (*listxattr)(struct dentry___2 *, char *, size_t);
|
|
+ int (*fiemap)(struct inode___2 *, struct fiemap_extent_info *, u64, u64);
|
|
+ int (*update_time)(struct inode___2 *, struct timespec64 *, int);
|
|
+ int (*atomic_open)(struct inode___2 *, struct dentry___2 *, struct file___2 *, unsigned int, umode_t);
|
|
+ int (*tmpfile)(struct inode___2 *, struct dentry___2 *, umode_t);
|
|
+ int (*set_acl)(struct inode___2 *, struct posix_acl *, int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct file_lock_operations___2 {
|
|
+ void (*fl_copy_lock)(struct file_lock___2 *, struct file_lock___2 *);
|
|
+ void (*fl_release_private)(struct file_lock___2 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct lock_manager_operations___2;
|
|
+
|
|
+struct file_lock___2 {
|
|
+ struct file_lock___2 *fl_next;
|
|
+ struct list_head fl_list;
|
|
+ struct hlist_node fl_link;
|
|
+ struct list_head fl_block;
|
|
+ fl_owner_t fl_owner;
|
|
+ unsigned int fl_flags;
|
|
+ unsigned char fl_type;
|
|
+ unsigned int fl_pid;
|
|
+ int fl_link_cpu;
|
|
+ wait_queue_head_t fl_wait;
|
|
+ struct file___2 *fl_file;
|
|
+ loff_t fl_start;
|
|
+ loff_t fl_end;
|
|
+ struct fasync_struct___2 *fl_fasync;
|
|
+ long unsigned int fl_break_time;
|
|
+ long unsigned int fl_downgrade_time;
|
|
+ const struct file_lock_operations___2 *fl_ops;
|
|
+ const struct lock_manager_operations___2 *fl_lmops;
|
|
+ union {
|
|
+ struct nfs_lock_info nfs_fl;
|
|
+ struct nfs4_lock_info nfs4_fl;
|
|
+ struct {
|
|
+ struct list_head link;
|
|
+ int state;
|
|
+ } afs;
|
|
+ } fl_u;
|
|
+};
|
|
+
|
|
+struct lock_manager_operations___2 {
|
|
+ int (*lm_compare_owner)(struct file_lock___2 *, struct file_lock___2 *);
|
|
+ long unsigned int (*lm_owner_key)(struct file_lock___2 *);
|
|
+ fl_owner_t (*lm_get_owner)(fl_owner_t);
|
|
+ void (*lm_put_owner)(fl_owner_t);
|
|
+ void (*lm_notify)(struct file_lock___2 *);
|
|
+ int (*lm_grant)(struct file_lock___2 *, int);
|
|
+ bool (*lm_break)(struct file_lock___2 *);
|
|
+ int (*lm_change)(struct file_lock___2 *, int, struct list_head *);
|
|
+ void (*lm_setup)(struct file_lock___2 *, void **);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct fasync_struct___2 {
|
|
+ rwlock_t fa_lock;
|
|
+ int magic;
|
|
+ int fa_fd;
|
|
+ struct fasync_struct___2 *fa_next;
|
|
+ struct file___2 *fa_file;
|
|
+ struct callback_head fa_rcu;
|
|
+};
|
|
+
|
|
+struct file_system_type___2 {
|
|
+ const char *name;
|
|
+ int fs_flags;
|
|
+ struct dentry___2 * (*mount)(struct file_system_type___2 *, int, const char *, void *);
|
|
+ void (*kill_sb)(struct super_block___2 *);
|
|
+ struct module___2 *owner;
|
|
+ struct file_system_type___2 *next;
|
|
+ struct hlist_head fs_supers;
|
|
+ struct lock_class_key s_lock_key;
|
|
+ struct lock_class_key s_umount_key;
|
|
+ struct lock_class_key s_vfs_rename_key;
|
|
+ struct lock_class_key s_writers_key[3];
|
|
+ struct lock_class_key i_lock_key;
|
|
+ struct lock_class_key i_mutex_key;
|
|
+ struct lock_class_key i_mutex_dir_key;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct super_operations___2 {
|
|
+ struct inode___2 * (*alloc_inode)(struct super_block___2 *);
|
|
+ void (*destroy_inode)(struct inode___2 *);
|
|
+ void (*dirty_inode)(struct inode___2 *, int);
|
|
+ int (*write_inode)(struct inode___2 *, struct writeback_control *);
|
|
+ int (*drop_inode)(struct inode___2 *);
|
|
+ void (*evict_inode)(struct inode___2 *);
|
|
+ void (*put_super)(struct super_block___2 *);
|
|
+ int (*sync_fs)(struct super_block___2 *, int);
|
|
+ int (*freeze_super)(struct super_block___2 *);
|
|
+ int (*freeze_fs)(struct super_block___2 *);
|
|
+ int (*thaw_super)(struct super_block___2 *);
|
|
+ int (*unfreeze_fs)(struct super_block___2 *);
|
|
+ int (*statfs)(struct dentry___2 *, struct kstatfs *);
|
|
+ int (*remount_fs)(struct super_block___2 *, int *, char *);
|
|
+ void (*umount_begin)(struct super_block___2 *);
|
|
+ int (*show_options)(struct seq_file___2 *, struct dentry___2 *);
|
|
+ int (*show_devname)(struct seq_file___2 *, struct dentry___2 *);
|
|
+ int (*show_path)(struct seq_file___2 *, struct dentry___2 *);
|
|
+ int (*show_stats)(struct seq_file___2 *, struct dentry___2 *);
|
|
+ ssize_t (*quota_read)(struct super_block___2 *, int, char *, size_t, loff_t);
|
|
+ ssize_t (*quota_write)(struct super_block___2 *, int, const char *, size_t, loff_t);
|
|
+ struct dquot___2 ** (*get_dquots)(struct inode___2 *);
|
|
+ int (*bdev_try_to_free_page)(struct super_block___2 *, struct page___2 *, gfp_t);
|
|
+ long int (*nr_cached_objects)(struct super_block___2 *, struct shrink_control *);
|
|
+ long int (*free_cached_objects)(struct super_block___2 *, struct shrink_control *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+typedef void (*poll_queue_proc___2)(struct file___2 *, wait_queue_head_t *, struct poll_table_struct___2 *);
|
|
+
|
|
+struct poll_table_struct___2 {
|
|
+ poll_queue_proc___2 _qproc;
|
|
+ __poll_t _key;
|
|
+};
|
|
+
|
|
+struct seq_file___2 {
|
|
+ char *buf;
|
|
+ size_t size;
|
|
+ size_t from;
|
|
+ size_t count;
|
|
+ size_t pad_until;
|
|
+ loff_t index;
|
|
+ loff_t read_pos;
|
|
+ u64 version;
|
|
+ struct mutex lock;
|
|
+ const struct seq_operations *op;
|
|
+ int poll_event;
|
|
+ const struct file___2 *file;
|
|
+ void *private;
|
|
+};
|
|
+
|
|
+struct kobj_attribute___3 {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct kobject___3 *, struct kobj_attribute___3 *, char *);
|
|
+ ssize_t (*store)(struct kobject___3 *, struct kobj_attribute___3 *, const char *, size_t);
|
|
+};
|
|
+
|
|
+typedef void compound_page_dtor___2(struct page___2 *);
|
|
+
|
|
+struct kernfs_root___2;
|
|
+
|
|
+struct kernfs_elem_dir___2 {
|
|
+ long unsigned int subdirs;
|
|
+ struct rb_root children;
|
|
+ struct kernfs_root___2 *root;
|
|
+};
|
|
+
|
|
+struct kernfs_syscall_ops___2;
|
|
+
|
|
+struct kernfs_root___2 {
|
|
+ struct kernfs_node___2 *kn;
|
|
+ unsigned int flags;
|
|
+ struct idr ino_idr;
|
|
+ u32 last_ino;
|
|
+ u32 next_generation;
|
|
+ struct kernfs_syscall_ops___2 *syscall_ops;
|
|
+ struct list_head supers;
|
|
+ wait_queue_head_t deactivate_waitq;
|
|
+};
|
|
+
|
|
+struct kernfs_elem_symlink___2 {
|
|
+ struct kernfs_node___2 *target_kn;
|
|
+};
|
|
+
|
|
+struct kernfs_ops___2;
|
|
+
|
|
+struct kernfs_elem_attr___2 {
|
|
+ const struct kernfs_ops___2 *ops;
|
|
+ struct kernfs_open_node *open;
|
|
+ loff_t size;
|
|
+ struct kernfs_node___2 *notify_next;
|
|
+};
|
|
+
|
|
+struct kernfs_node___2 {
|
|
+ atomic_t count;
|
|
+ atomic_t active;
|
|
+ struct kernfs_node___2 *parent;
|
|
+ const char *name;
|
|
+ struct rb_node rb;
|
|
+ const void *ns;
|
|
+ unsigned int hash;
|
|
+ union {
|
|
+ struct kernfs_elem_dir___2 dir;
|
|
+ struct kernfs_elem_symlink___2 symlink;
|
|
+ struct kernfs_elem_attr___2 attr;
|
|
+ };
|
|
+ void *priv;
|
|
+ union kernfs_node_id id;
|
|
+ short unsigned int flags;
|
|
+ umode_t mode;
|
|
+ struct kernfs_iattrs *iattr;
|
|
+};
|
|
+
|
|
+struct kernfs_open_file___2;
|
|
+
|
|
+struct kernfs_ops___2 {
|
|
+ int (*open)(struct kernfs_open_file___2 *);
|
|
+ void (*release)(struct kernfs_open_file___2 *);
|
|
+ int (*seq_show)(struct seq_file___2 *, void *);
|
|
+ void * (*seq_start)(struct seq_file___2 *, loff_t *);
|
|
+ void * (*seq_next)(struct seq_file___2 *, void *, loff_t *);
|
|
+ void (*seq_stop)(struct seq_file___2 *, void *);
|
|
+ ssize_t (*read)(struct kernfs_open_file___2 *, char *, size_t, loff_t);
|
|
+ size_t atomic_write_len;
|
|
+ bool prealloc;
|
|
+ ssize_t (*write)(struct kernfs_open_file___2 *, char *, size_t, loff_t);
|
|
+ int (*mmap)(struct kernfs_open_file___2 *, struct vm_area_struct___2 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct kernfs_syscall_ops___2 {
|
|
+ int (*remount_fs)(struct kernfs_root___2 *, int *, char *);
|
|
+ int (*show_options)(struct seq_file___2 *, struct kernfs_root___2 *);
|
|
+ int (*mkdir)(struct kernfs_node___2 *, const char *, umode_t);
|
|
+ int (*rmdir)(struct kernfs_node___2 *);
|
|
+ int (*rename)(struct kernfs_node___2 *, struct kernfs_node___2 *, const char *);
|
|
+ int (*show_path)(struct seq_file___2 *, struct kernfs_node___2 *, struct kernfs_root___2 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct kernfs_open_file___2 {
|
|
+ struct kernfs_node___2 *kn;
|
|
+ struct file___2 *file;
|
|
+ struct seq_file___2 *seq_file;
|
|
+ void *priv;
|
|
+ struct mutex mutex;
|
|
+ struct mutex prealloc_mutex;
|
|
+ int event;
|
|
+ struct list_head list;
|
|
+ char *prealloc_buf;
|
|
+ size_t atomic_write_len;
|
|
+ bool mmapped: 1;
|
|
+ bool released: 1;
|
|
+ const struct vm_operations_struct___2 *vm_ops;
|
|
+};
|
|
+
|
|
+struct bin_attribute___2;
|
|
+
|
|
+struct attribute_group___2 {
|
|
+ const char *name;
|
|
+ umode_t (*is_visible)(struct kobject___3 *, struct attribute *, int);
|
|
+ umode_t (*is_bin_visible)(struct kobject___3 *, struct bin_attribute___2 *, int);
|
|
+ struct attribute **attrs;
|
|
+ struct bin_attribute___2 **bin_attrs;
|
|
+};
|
|
+
|
|
+struct bin_attribute___2 {
|
|
+ struct attribute attr;
|
|
+ size_t size;
|
|
+ void *private;
|
|
+ ssize_t (*read)(struct file___2 *, struct kobject___3 *, struct bin_attribute___2 *, char *, loff_t, size_t);
|
|
+ ssize_t (*write)(struct file___2 *, struct kobject___3 *, struct bin_attribute___2 *, char *, loff_t, size_t);
|
|
+ int (*mmap)(struct file___2 *, struct kobject___3 *, struct bin_attribute___2 *, struct vm_area_struct___2 *);
|
|
+};
|
|
+
|
|
+struct sysfs_ops___2 {
|
|
+ ssize_t (*show)(struct kobject___3 *, struct attribute *, char *);
|
|
+ ssize_t (*store)(struct kobject___3 *, struct attribute *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct kset_uevent_ops___2;
|
|
+
|
|
+struct kset___2 {
|
|
+ struct list_head list;
|
|
+ spinlock_t list_lock;
|
|
+ struct kobject___3 kobj;
|
|
+ const struct kset_uevent_ops___2 *uevent_ops;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct kobj_type___2 {
|
|
+ void (*release)(struct kobject___3 *);
|
|
+ const struct sysfs_ops___2 *sysfs_ops;
|
|
+ struct attribute **default_attrs;
|
|
+ const struct kobj_ns_type_operations * (*child_ns_type)(struct kobject___3 *);
|
|
+ const void * (*namespace)(struct kobject___3 *);
|
|
+ void (*get_ownership)(struct kobject___3 *, kuid_t *, kgid_t *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct kset_uevent_ops___2 {
|
|
+ int (* const filter)(struct kset___2 *, struct kobject___3 *);
|
|
+ const char * (* const name)(struct kset___2 *, struct kobject___3 *);
|
|
+ int (* const uevent)(struct kset___2 *, struct kobject___3 *, struct kobj_uevent_env *);
|
|
+};
|
|
+
|
|
+struct dev_pm_ops___2 {
|
|
+ int (*prepare)(struct device___2 *);
|
|
+ void (*complete)(struct device___2 *);
|
|
+ int (*suspend)(struct device___2 *);
|
|
+ int (*resume)(struct device___2 *);
|
|
+ int (*freeze)(struct device___2 *);
|
|
+ int (*thaw)(struct device___2 *);
|
|
+ int (*poweroff)(struct device___2 *);
|
|
+ int (*restore)(struct device___2 *);
|
|
+ int (*suspend_late)(struct device___2 *);
|
|
+ int (*resume_early)(struct device___2 *);
|
|
+ int (*freeze_late)(struct device___2 *);
|
|
+ int (*thaw_early)(struct device___2 *);
|
|
+ int (*poweroff_late)(struct device___2 *);
|
|
+ int (*restore_early)(struct device___2 *);
|
|
+ int (*suspend_noirq)(struct device___2 *);
|
|
+ int (*resume_noirq)(struct device___2 *);
|
|
+ int (*freeze_noirq)(struct device___2 *);
|
|
+ int (*thaw_noirq)(struct device___2 *);
|
|
+ int (*poweroff_noirq)(struct device___2 *);
|
|
+ int (*restore_noirq)(struct device___2 *);
|
|
+ int (*runtime_suspend)(struct device___2 *);
|
|
+ int (*runtime_resume)(struct device___2 *);
|
|
+ int (*runtime_idle)(struct device___2 *);
|
|
+};
|
|
+
|
|
+struct dev_pm_domain___2 {
|
|
+ struct dev_pm_ops___2 ops;
|
|
+ void (*detach)(struct device___2 *, bool);
|
|
+ int (*activate)(struct device___2 *);
|
|
+ void (*sync)(struct device___2 *);
|
|
+ void (*dismiss)(struct device___2 *);
|
|
+};
|
|
+
|
|
+struct dma_map_ops___2 {
|
|
+ void * (*alloc)(struct device___2 *, size_t, dma_addr_t *, gfp_t, long unsigned int);
|
|
+ void (*free)(struct device___2 *, size_t, void *, dma_addr_t, long unsigned int);
|
|
+ int (*mmap)(struct device___2 *, struct vm_area_struct___2 *, void *, dma_addr_t, size_t, long unsigned int);
|
|
+ int (*get_sgtable)(struct device___2 *, struct sg_table *, void *, dma_addr_t, size_t, long unsigned int);
|
|
+ dma_addr_t (*map_page)(struct device___2 *, struct page___2 *, long unsigned int, size_t, enum dma_data_direction, long unsigned int);
|
|
+ void (*unmap_page)(struct device___2 *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int);
|
|
+ int (*map_sg)(struct device___2 *, struct scatterlist *, int, enum dma_data_direction, long unsigned int);
|
|
+ void (*unmap_sg)(struct device___2 *, struct scatterlist *, int, enum dma_data_direction, long unsigned int);
|
|
+ dma_addr_t (*map_resource)(struct device___2 *, phys_addr_t, size_t, enum dma_data_direction, long unsigned int);
|
|
+ void (*unmap_resource)(struct device___2 *, dma_addr_t, size_t, enum dma_data_direction, long unsigned int);
|
|
+ void (*sync_single_for_cpu)(struct device___2 *, dma_addr_t, size_t, enum dma_data_direction);
|
|
+ void (*sync_single_for_device)(struct device___2 *, dma_addr_t, size_t, enum dma_data_direction);
|
|
+ void (*sync_sg_for_cpu)(struct device___2 *, struct scatterlist *, int, enum dma_data_direction);
|
|
+ void (*sync_sg_for_device)(struct device___2 *, struct scatterlist *, int, enum dma_data_direction);
|
|
+ void (*cache_sync)(struct device___2 *, void *, size_t, enum dma_data_direction);
|
|
+ int (*mapping_error)(struct device___2 *, dma_addr_t);
|
|
+ int (*dma_supported)(struct device___2 *, u64);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+};
|
|
+
|
|
+struct bus_type___2 {
|
|
+ const char *name;
|
|
+ const char *dev_name;
|
|
+ struct device___2 *dev_root;
|
|
+ const struct attribute_group___2 **bus_groups;
|
|
+ const struct attribute_group___2 **dev_groups;
|
|
+ const struct attribute_group___2 **drv_groups;
|
|
+ int (*match)(struct device___2 *, struct device_driver___2 *);
|
|
+ int (*uevent)(struct device___2 *, struct kobj_uevent_env *);
|
|
+ int (*probe)(struct device___2 *);
|
|
+ int (*remove)(struct device___2 *);
|
|
+ void (*shutdown)(struct device___2 *);
|
|
+ int (*online)(struct device___2 *);
|
|
+ int (*offline)(struct device___2 *);
|
|
+ int (*suspend)(struct device___2 *, pm_message_t);
|
|
+ int (*resume)(struct device___2 *);
|
|
+ int (*num_vf)(struct device___2 *);
|
|
+ int (*dma_configure)(struct device___2 *);
|
|
+ const struct dev_pm_ops___2 *pm;
|
|
+ const struct iommu_ops *iommu_ops;
|
|
+ struct subsys_private *p;
|
|
+ struct lock_class_key lock_key;
|
|
+ bool need_parent_lock;
|
|
+};
|
|
+
|
|
+struct device_driver___2 {
|
|
+ const char *name;
|
|
+ struct bus_type___2 *bus;
|
|
+ struct module___2 *owner;
|
|
+ const char *mod_name;
|
|
+ bool suppress_bind_attrs;
|
|
+ enum probe_type probe_type;
|
|
+ const struct of_device_id *of_match_table;
|
|
+ const struct acpi_device_id *acpi_match_table;
|
|
+ int (*probe)(struct device___2 *);
|
|
+ int (*remove)(struct device___2 *);
|
|
+ void (*shutdown)(struct device___2 *);
|
|
+ int (*suspend)(struct device___2 *, pm_message_t);
|
|
+ int (*resume)(struct device___2 *);
|
|
+ const struct attribute_group___2 **groups;
|
|
+ const struct dev_pm_ops___2 *pm;
|
|
+ void (*coredump)(struct device___2 *);
|
|
+ struct driver_private *p;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct device_type___2 {
|
|
+ const char *name;
|
|
+ const struct attribute_group___2 **groups;
|
|
+ int (*uevent)(struct device___2 *, struct kobj_uevent_env *);
|
|
+ char * (*devnode)(struct device___2 *, umode_t *, kuid_t *, kgid_t *);
|
|
+ void (*release)(struct device___2 *);
|
|
+ const struct dev_pm_ops___2 *pm;
|
|
+};
|
|
+
|
|
+struct class___2 {
|
|
+ const char *name;
|
|
+ struct module___2 *owner;
|
|
+ const struct attribute_group___2 **class_groups;
|
|
+ const struct attribute_group___2 **dev_groups;
|
|
+ struct kobject___3 *dev_kobj;
|
|
+ int (*dev_uevent)(struct device___2 *, struct kobj_uevent_env *);
|
|
+ char * (*devnode)(struct device___2 *, umode_t *);
|
|
+ void (*class_release)(struct class___2 *);
|
|
+ void (*dev_release)(struct device___2 *);
|
|
+ int (*shutdown_pre)(struct device___2 *);
|
|
+ const struct kobj_ns_type_operations *ns_type;
|
|
+ const void * (*namespace)(struct device___2 *);
|
|
+ void (*get_ownership)(struct device___2 *, kuid_t *, kgid_t *);
|
|
+ const struct dev_pm_ops___2 *pm;
|
|
+ struct subsys_private *p;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct device_attribute___2 {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct device___2 *, struct device_attribute___2 *, char *);
|
|
+ ssize_t (*store)(struct device___2 *, struct device_attribute___2 *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct device_node___2 {
|
|
+ const char *name;
|
|
+ const char *type;
|
|
+ phandle phandle;
|
|
+ const char *full_name;
|
|
+ struct fwnode_handle___2 fwnode;
|
|
+ struct property *properties;
|
|
+ struct property *deadprops;
|
|
+ struct device_node___2 *parent;
|
|
+ struct device_node___2 *child;
|
|
+ struct device_node___2 *sibling;
|
|
+ long unsigned int _flags;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct node___3 {
|
|
+ struct device___2 dev;
|
|
+ struct list_head access_list;
|
|
+ struct work_struct node_work;
|
|
+ struct list_head cache_attrs;
|
|
+ struct device___2 *cache_dev;
|
|
+};
|
|
+
|
|
+struct fd___2 {
|
|
+ struct file___2 *file;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+typedef struct poll_table_struct___2 poll_table___2;
|
|
+
|
|
+typedef int (*key_restrict_link_func_t___2)(struct key___2 *, const struct key_type *, const union key_payload *, struct key___2 *);
|
|
+
|
|
+struct key_restriction___2;
|
|
+
|
|
+struct key___2 {
|
|
+ refcount_t usage;
|
|
+ key_serial_t serial;
|
|
+ union {
|
|
+ struct list_head graveyard_link;
|
|
+ struct rb_node serial_node;
|
|
+ };
|
|
+ struct rw_semaphore___2 sem;
|
|
+ struct key_user *user;
|
|
+ void *security;
|
|
+ union {
|
|
+ time64_t expiry;
|
|
+ time64_t revoked_at;
|
|
+ };
|
|
+ time64_t last_used_at;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ key_perm_t perm;
|
|
+ short unsigned int quotalen;
|
|
+ short unsigned int datalen;
|
|
+ short int state;
|
|
+ long unsigned int flags;
|
|
+ union {
|
|
+ struct keyring_index_key index_key;
|
|
+ struct {
|
|
+ struct key_type *type;
|
|
+ char *description;
|
|
+ };
|
|
+ };
|
|
+ union {
|
|
+ union key_payload payload;
|
|
+ struct {
|
|
+ struct list_head name_link;
|
|
+ struct assoc_array keys;
|
|
+ };
|
|
+ };
|
|
+ struct key_restriction___2 *restrict_link;
|
|
+};
|
|
+
|
|
+struct key_restriction___2 {
|
|
+ key_restrict_link_func_t___2 check;
|
|
+ struct key___2 *key;
|
|
+ struct key_type *keytype;
|
|
+};
|
|
+
|
|
+struct inet_frags___2;
|
|
+
|
|
+struct netns_frags___2 {
|
|
+ long int high_thresh;
|
|
+ long int low_thresh;
|
|
+ int timeout;
|
|
+ int max_dist;
|
|
+ struct inet_frags___2 *f;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct rhashtable rhashtable;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ atomic_long_t mem;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct netns_ipv4___2 {
|
|
+ struct ctl_table_header *forw_hdr;
|
|
+ struct ctl_table_header *frags_hdr;
|
|
+ struct ctl_table_header *ipv4_hdr;
|
|
+ struct ctl_table_header *route_hdr;
|
|
+ struct ctl_table_header *xfrm4_hdr;
|
|
+ struct ipv4_devconf *devconf_all;
|
|
+ struct ipv4_devconf *devconf_dflt;
|
|
+ struct ip_ra_chain *ra_chain;
|
|
+ struct mutex ra_mutex;
|
|
+ struct fib_rules_ops *rules_ops;
|
|
+ bool fib_has_custom_rules;
|
|
+ unsigned int fib_rules_require_fldissect;
|
|
+ struct fib_table *fib_main;
|
|
+ struct fib_table *fib_default;
|
|
+ bool fib_has_custom_local_routes;
|
|
+ int fib_num_tclassid_users;
|
|
+ struct hlist_head *fib_table_hash;
|
|
+ bool fib_offload_disabled;
|
|
+ struct sock *fibnl;
|
|
+ struct sock **icmp_sk;
|
|
+ struct sock *mc_autojoin_sk;
|
|
+ struct inet_peer_base *peers;
|
|
+ struct sock **tcp_sk;
|
|
+ struct netns_frags___2 frags;
|
|
+ struct xt_table *iptable_filter;
|
|
+ struct xt_table *iptable_mangle;
|
|
+ struct xt_table *iptable_raw;
|
|
+ struct xt_table *arptable_filter;
|
|
+ struct xt_table *iptable_security;
|
|
+ struct xt_table *nat_table;
|
|
+ int sysctl_icmp_echo_ignore_all;
|
|
+ int sysctl_icmp_echo_ignore_broadcasts;
|
|
+ int sysctl_icmp_ignore_bogus_error_responses;
|
|
+ int sysctl_icmp_ratelimit;
|
|
+ int sysctl_icmp_ratemask;
|
|
+ int sysctl_icmp_errors_use_inbound_ifaddr;
|
|
+ struct local_ports ip_local_ports;
|
|
+ int sysctl_tcp_ecn;
|
|
+ int sysctl_tcp_ecn_fallback;
|
|
+ int sysctl_ip_default_ttl;
|
|
+ int sysctl_ip_no_pmtu_disc;
|
|
+ int sysctl_ip_fwd_use_pmtu;
|
|
+ int sysctl_ip_fwd_update_priority;
|
|
+ int sysctl_ip_nonlocal_bind;
|
|
+ int sysctl_ip_dynaddr;
|
|
+ int sysctl_ip_early_demux;
|
|
+ int sysctl_raw_l3mdev_accept;
|
|
+ int sysctl_tcp_early_demux;
|
|
+ int sysctl_udp_early_demux;
|
|
+ int sysctl_fwmark_reflect;
|
|
+ int sysctl_tcp_fwmark_accept;
|
|
+ int sysctl_tcp_l3mdev_accept;
|
|
+ int sysctl_tcp_mtu_probing;
|
|
+ int sysctl_tcp_base_mss;
|
|
+ int sysctl_tcp_min_snd_mss;
|
|
+ int sysctl_tcp_probe_threshold;
|
|
+ u32 sysctl_tcp_probe_interval;
|
|
+ int sysctl_tcp_keepalive_time;
|
|
+ int sysctl_tcp_keepalive_probes;
|
|
+ int sysctl_tcp_keepalive_intvl;
|
|
+ int sysctl_tcp_syn_retries;
|
|
+ int sysctl_tcp_synack_retries;
|
|
+ int sysctl_tcp_syncookies;
|
|
+ int sysctl_tcp_reordering;
|
|
+ int sysctl_tcp_retries1;
|
|
+ int sysctl_tcp_retries2;
|
|
+ int sysctl_tcp_orphan_retries;
|
|
+ int sysctl_tcp_fin_timeout;
|
|
+ unsigned int sysctl_tcp_notsent_lowat;
|
|
+ int sysctl_tcp_tw_reuse;
|
|
+ int sysctl_tcp_sack;
|
|
+ int sysctl_tcp_window_scaling;
|
|
+ int sysctl_tcp_timestamps;
|
|
+ int sysctl_tcp_early_retrans;
|
|
+ int sysctl_tcp_recovery;
|
|
+ int sysctl_tcp_thin_linear_timeouts;
|
|
+ int sysctl_tcp_slow_start_after_idle;
|
|
+ int sysctl_tcp_retrans_collapse;
|
|
+ int sysctl_tcp_stdurg;
|
|
+ int sysctl_tcp_rfc1337;
|
|
+ int sysctl_tcp_abort_on_overflow;
|
|
+ int sysctl_tcp_fack;
|
|
+ int sysctl_tcp_max_reordering;
|
|
+ int sysctl_tcp_dsack;
|
|
+ int sysctl_tcp_app_win;
|
|
+ int sysctl_tcp_adv_win_scale;
|
|
+ int sysctl_tcp_frto;
|
|
+ int sysctl_tcp_nometrics_save;
|
|
+ int sysctl_tcp_moderate_rcvbuf;
|
|
+ int sysctl_tcp_tso_win_divisor;
|
|
+ int sysctl_tcp_workaround_signed_windows;
|
|
+ int sysctl_tcp_limit_output_bytes;
|
|
+ int sysctl_tcp_challenge_ack_limit;
|
|
+ int sysctl_tcp_min_tso_segs;
|
|
+ int sysctl_tcp_min_rtt_wlen;
|
|
+ int sysctl_tcp_autocorking;
|
|
+ int sysctl_tcp_invalid_ratelimit;
|
|
+ int sysctl_tcp_pacing_ss_ratio;
|
|
+ int sysctl_tcp_pacing_ca_ratio;
|
|
+ int sysctl_tcp_wmem[3];
|
|
+ int sysctl_tcp_rmem[3];
|
|
+ int sysctl_tcp_comp_sack_nr;
|
|
+ long unsigned int sysctl_tcp_comp_sack_delay_ns;
|
|
+ long: 64;
|
|
+ struct inet_timewait_death_row tcp_death_row;
|
|
+ int sysctl_max_syn_backlog;
|
|
+ int sysctl_tcp_fastopen;
|
|
+ const struct tcp_congestion_ops *tcp_congestion_control;
|
|
+ struct tcp_fastopen_context *tcp_fastopen_ctx;
|
|
+ spinlock_t tcp_fastopen_ctx_lock;
|
|
+ unsigned int sysctl_tcp_fastopen_blackhole_timeout;
|
|
+ atomic_t tfo_active_disable_times;
|
|
+ long unsigned int tfo_active_disable_stamp;
|
|
+ int sysctl_udp_wmem_min;
|
|
+ int sysctl_udp_rmem_min;
|
|
+ int sysctl_udp_l3mdev_accept;
|
|
+ int sysctl_igmp_max_memberships;
|
|
+ int sysctl_igmp_max_msf;
|
|
+ int sysctl_igmp_llm_reports;
|
|
+ int sysctl_igmp_qrv;
|
|
+ struct ping_group_range ping_group_range;
|
|
+ atomic_t dev_addr_genid;
|
|
+ long unsigned int *sysctl_local_reserved_ports;
|
|
+ int sysctl_ip_prot_sock;
|
|
+ struct list_head mr_tables;
|
|
+ struct fib_rules_ops *mr_rules_ops;
|
|
+ int sysctl_fib_multipath_use_neigh;
|
|
+ int sysctl_fib_multipath_hash_policy;
|
|
+ struct fib_notifier_ops *notifier_ops;
|
|
+ unsigned int fib_seq;
|
|
+ struct fib_notifier_ops *ipmr_notifier_ops;
|
|
+ unsigned int ipmr_seq;
|
|
+ atomic_t rt_genid;
|
|
+ siphash_key_t ip_id_key;
|
|
+};
|
|
+
|
|
+struct net_device___2;
|
|
+
|
|
+struct sk_buff___2;
|
|
+
|
|
+struct dst_ops___2 {
|
|
+ short unsigned int family;
|
|
+ unsigned int gc_thresh;
|
|
+ int (*gc)(struct dst_ops___2 *);
|
|
+ struct dst_entry * (*check)(struct dst_entry *, __u32);
|
|
+ unsigned int (*default_advmss)(const struct dst_entry *);
|
|
+ unsigned int (*mtu)(const struct dst_entry *);
|
|
+ u32 * (*cow_metrics)(struct dst_entry *, long unsigned int);
|
|
+ void (*destroy)(struct dst_entry *);
|
|
+ void (*ifdown)(struct dst_entry *, struct net_device___2 *, int);
|
|
+ struct dst_entry * (*negative_advice)(struct dst_entry *);
|
|
+ void (*link_failure)(struct sk_buff___2 *);
|
|
+ void (*update_pmtu)(struct dst_entry *, struct sock *, struct sk_buff___2 *, u32, bool);
|
|
+ void (*redirect)(struct dst_entry *, struct sock *, struct sk_buff___2 *);
|
|
+ int (*local_out)(struct net___2 *, struct sock *, struct sk_buff___2 *);
|
|
+ struct neighbour * (*neigh_lookup)(const struct dst_entry *, struct sk_buff___2 *, const void *);
|
|
+ void (*confirm_neigh)(const struct dst_entry *, const void *);
|
|
+ struct kmem_cache *kmem_cachep;
|
|
+ struct percpu_counter pcpuc_entries;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct netns_ipv6___2 {
|
|
+ struct netns_sysctl_ipv6 sysctl;
|
|
+ struct ipv6_devconf *devconf_all;
|
|
+ struct ipv6_devconf *devconf_dflt;
|
|
+ struct inet_peer_base *peers;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_frags___2 frags;
|
|
+ struct xt_table *ip6table_filter;
|
|
+ struct xt_table *ip6table_mangle;
|
|
+ struct xt_table *ip6table_raw;
|
|
+ struct xt_table *ip6table_security;
|
|
+ struct xt_table *ip6table_nat;
|
|
+ struct fib6_info *fib6_null_entry;
|
|
+ struct rt6_info *ip6_null_entry;
|
|
+ struct rt6_statistics *rt6_stats;
|
|
+ struct timer_list ip6_fib_timer;
|
|
+ struct hlist_head *fib_table_hash;
|
|
+ struct fib6_table *fib6_main_tbl;
|
|
+ struct list_head fib6_walkers;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct dst_ops___2 ip6_dst_ops;
|
|
+ rwlock_t fib6_walker_lock;
|
|
+ spinlock_t fib6_gc_lock;
|
|
+ unsigned int ip6_rt_gc_expire;
|
|
+ long unsigned int ip6_rt_last_gc;
|
|
+ unsigned int fib6_rules_require_fldissect;
|
|
+ bool fib6_has_custom_rules;
|
|
+ struct rt6_info *ip6_prohibit_entry;
|
|
+ struct rt6_info *ip6_blk_hole_entry;
|
|
+ struct fib6_table *fib6_local_tbl;
|
|
+ struct fib_rules_ops *fib6_rules_ops;
|
|
+ struct sock **icmp_sk;
|
|
+ struct sock *ndisc_sk;
|
|
+ struct sock *tcp_sk;
|
|
+ struct sock *igmp_sk;
|
|
+ struct sock *mc_autojoin_sk;
|
|
+ struct list_head mr6_tables;
|
|
+ struct fib_rules_ops *mr6_rules_ops;
|
|
+ atomic_t dev_addr_genid;
|
|
+ atomic_t fib6_sernum;
|
|
+ struct seg6_pernet_data *seg6_data;
|
|
+ struct fib_notifier_ops *notifier_ops;
|
|
+ struct fib_notifier_ops *ip6mr_notifier_ops;
|
|
+ unsigned int ipmr_seq;
|
|
+ struct {
|
|
+ struct hlist_head head;
|
|
+ spinlock_t lock;
|
|
+ u32 seq;
|
|
+ } ip6addrlbl_table;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct netns_ieee802154_lowpan___2 {
|
|
+ struct netns_sysctl_lowpan sysctl;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_frags___2 frags;
|
|
+};
|
|
+
|
|
+struct netns_nf_frag___2 {
|
|
+ struct netns_frags___2 frags;
|
|
+};
|
|
+
|
|
+struct netns_xfrm___2 {
|
|
+ struct list_head state_all;
|
|
+ struct hlist_head *state_bydst;
|
|
+ struct hlist_head *state_bysrc;
|
|
+ struct hlist_head *state_byspi;
|
|
+ unsigned int state_hmask;
|
|
+ unsigned int state_num;
|
|
+ struct work_struct state_hash_work;
|
|
+ struct list_head policy_all;
|
|
+ struct hlist_head *policy_byidx;
|
|
+ unsigned int policy_idx_hmask;
|
|
+ struct hlist_head policy_inexact[3];
|
|
+ struct xfrm_policy_hash policy_bydst[3];
|
|
+ unsigned int policy_count[6];
|
|
+ struct work_struct policy_hash_work;
|
|
+ struct xfrm_policy_hthresh policy_hthresh;
|
|
+ struct sock *nlsk;
|
|
+ struct sock *nlsk_stash;
|
|
+ u32 sysctl_aevent_etime;
|
|
+ u32 sysctl_aevent_rseqth;
|
|
+ int sysctl_larval_drop;
|
|
+ u32 sysctl_acq_expires;
|
|
+ struct ctl_table_header *sysctl_hdr;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct dst_ops___2 xfrm4_dst_ops;
|
|
+ struct dst_ops___2 xfrm6_dst_ops;
|
|
+ spinlock_t xfrm_state_lock;
|
|
+ spinlock_t xfrm_policy_lock;
|
|
+ struct mutex xfrm_cfg_mutex;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct net___2 {
|
|
+ refcount_t passive;
|
|
+ refcount_t count;
|
|
+ spinlock_t rules_mod_lock;
|
|
+ u32 hash_mix;
|
|
+ atomic64_t cookie_gen;
|
|
+ struct list_head list;
|
|
+ struct list_head exit_list;
|
|
+ struct llist_node cleanup_list;
|
|
+ struct user_namespace___2 *user_ns;
|
|
+ struct ucounts___2 *ucounts;
|
|
+ spinlock_t nsid_lock;
|
|
+ struct idr netns_ids;
|
|
+ struct ns_common___2 ns;
|
|
+ struct proc_dir_entry *proc_net;
|
|
+ struct proc_dir_entry *proc_net_stat;
|
|
+ struct ctl_table_set sysctls;
|
|
+ struct sock *rtnl;
|
|
+ struct sock *genl_sock;
|
|
+ struct uevent_sock *uevent_sock;
|
|
+ struct list_head dev_base_head;
|
|
+ struct hlist_head *dev_name_head;
|
|
+ struct hlist_head *dev_index_head;
|
|
+ unsigned int dev_base_seq;
|
|
+ int ifindex;
|
|
+ unsigned int dev_unreg_count;
|
|
+ struct list_head rules_ops;
|
|
+ struct list_head fib_notifier_ops;
|
|
+ struct net_device___2 *loopback_dev;
|
|
+ struct netns_core core;
|
|
+ struct netns_mib mib;
|
|
+ struct netns_packet packet;
|
|
+ struct netns_unix unx;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_ipv4___2 ipv4;
|
|
+ struct netns_ipv6___2 ipv6;
|
|
+ struct netns_ieee802154_lowpan___2 ieee802154_lowpan;
|
|
+ struct netns_sctp sctp;
|
|
+ struct netns_nf nf;
|
|
+ struct netns_xt xt;
|
|
+ struct netns_ct ct;
|
|
+ struct netns_nftables nft;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_nf_frag___2 nf_frag;
|
|
+ struct ctl_table_header *nf_frag_frags_hdr;
|
|
+ struct sock *nfnl;
|
|
+ struct sock *nfnl_stash;
|
|
+ struct list_head nfct_timeout_list;
|
|
+ struct net_generic *gen;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netns_xfrm___2 xfrm;
|
|
+ struct netns_ipvs *ipvs;
|
|
+ struct netns_mpls mpls;
|
|
+ struct netns_can can;
|
|
+ struct sock *diag_nlsk;
|
|
+ atomic_t fnhe_genid;
|
|
+};
|
|
+
|
|
+struct cgroup_namespace___2 {
|
|
+ refcount_t count;
|
|
+ struct ns_common___2 ns;
|
|
+ struct user_namespace___2 *user_ns;
|
|
+ struct ucounts___2 *ucounts;
|
|
+ struct css_set___2 *root_cset;
|
|
+};
|
|
+
|
|
+struct proc_ns_operations___2 {
|
|
+ const char *name;
|
|
+ const char *real_ns_name;
|
|
+ int type;
|
|
+ struct ns_common___2 * (*get)(struct task_struct___2 *);
|
|
+ void (*put)(struct ns_common___2 *);
|
|
+ int (*install)(struct nsproxy___2 *, struct ns_common___2 *);
|
|
+ struct user_namespace___2 * (*owner)(struct ns_common___2 *);
|
|
+ struct ns_common___2 * (*get_parent)(struct ns_common___2 *);
|
|
+};
|
|
+
|
|
+struct ucounts___2 {
|
|
+ struct hlist_node node;
|
|
+ struct user_namespace___2 *ns;
|
|
+ kuid_t uid;
|
|
+ int count;
|
|
+ atomic_t ucount[9];
|
|
+};
|
|
+
|
|
+enum perf_event_read_format {
|
|
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
|
|
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
|
|
+ PERF_FORMAT_ID = 4,
|
|
+ PERF_FORMAT_GROUP = 8,
|
|
+ PERF_FORMAT_MAX = 16,
|
|
+};
|
|
+
|
|
+enum perf_event_ioc_flags {
|
|
+ PERF_IOC_FLAG_GROUP = 1,
|
|
+};
|
|
+
|
|
+struct perf_ns_link_info {
|
|
+ __u64 dev;
|
|
+ __u64 ino;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NET_NS_INDEX = 0,
|
|
+ UTS_NS_INDEX = 1,
|
|
+ IPC_NS_INDEX = 2,
|
|
+ PID_NS_INDEX = 3,
|
|
+ USER_NS_INDEX = 4,
|
|
+ MNT_NS_INDEX = 5,
|
|
+ CGROUP_NS_INDEX = 6,
|
|
+ NR_NAMESPACES = 7,
|
|
+};
|
|
+
|
|
+enum perf_event_type {
|
|
+ PERF_RECORD_MMAP = 1,
|
|
+ PERF_RECORD_LOST = 2,
|
|
+ PERF_RECORD_COMM = 3,
|
|
+ PERF_RECORD_EXIT = 4,
|
|
+ PERF_RECORD_THROTTLE = 5,
|
|
+ PERF_RECORD_UNTHROTTLE = 6,
|
|
+ PERF_RECORD_FORK = 7,
|
|
+ PERF_RECORD_READ = 8,
|
|
+ PERF_RECORD_SAMPLE = 9,
|
|
+ PERF_RECORD_MMAP2 = 10,
|
|
+ PERF_RECORD_AUX = 11,
|
|
+ PERF_RECORD_ITRACE_START = 12,
|
|
+ PERF_RECORD_LOST_SAMPLES = 13,
|
|
+ PERF_RECORD_SWITCH = 14,
|
|
+ PERF_RECORD_SWITCH_CPU_WIDE = 15,
|
|
+ PERF_RECORD_NAMESPACES = 16,
|
|
+ PERF_RECORD_MAX = 17,
|
|
+};
|
|
+
|
|
+struct perf_cpu_context___2;
|
|
+
|
|
+struct pmu___2 {
|
|
+ struct list_head entry;
|
|
+ struct module___2 *module;
|
|
+ struct device___2 *dev;
|
|
+ const struct attribute_group___2 **attr_groups;
|
|
+ const char *name;
|
|
+ int type;
|
|
+ int capabilities;
|
|
+ int *pmu_disable_count;
|
|
+ struct perf_cpu_context___2 *pmu_cpu_context;
|
|
+ atomic_t exclusive_cnt;
|
|
+ int task_ctx_nr;
|
|
+ int hrtimer_interval_ms;
|
|
+ unsigned int nr_addr_filters;
|
|
+ void (*pmu_enable)(struct pmu___2 *);
|
|
+ void (*pmu_disable)(struct pmu___2 *);
|
|
+ int (*event_init)(struct perf_event___2 *);
|
|
+ void (*event_mapped)(struct perf_event___2 *, struct mm_struct___2 *);
|
|
+ void (*event_unmapped)(struct perf_event___2 *, struct mm_struct___2 *);
|
|
+ int (*add)(struct perf_event___2 *, int);
|
|
+ void (*del)(struct perf_event___2 *, int);
|
|
+ void (*start)(struct perf_event___2 *, int);
|
|
+ void (*stop)(struct perf_event___2 *, int);
|
|
+ void (*read)(struct perf_event___2 *);
|
|
+ void (*start_txn)(struct pmu___2 *, unsigned int);
|
|
+ int (*commit_txn)(struct pmu___2 *);
|
|
+ void (*cancel_txn)(struct pmu___2 *);
|
|
+ int (*event_idx)(struct perf_event___2 *);
|
|
+ void (*sched_task)(struct perf_event_context___2 *, bool);
|
|
+ size_t task_ctx_size;
|
|
+ void * (*setup_aux)(struct perf_event___2 *, void **, int, bool);
|
|
+ void (*free_aux)(void *);
|
|
+ int (*addr_filters_validate)(struct list_head *);
|
|
+ void (*addr_filters_sync)(struct perf_event___2 *);
|
|
+ int (*filter_match)(struct perf_event___2 *);
|
|
+ int (*check_period)(struct perf_event___2 *, u64);
|
|
+};
|
|
+
|
|
+struct kernel_param_ops___2 {
|
|
+ unsigned int flags;
|
|
+ int (*set)(const char *, const struct kernel_param___2 *);
|
|
+ int (*get)(char *, const struct kernel_param___2 *);
|
|
+ void (*free)(void *);
|
|
+};
|
|
+
|
|
+struct kparam_array___2;
|
|
+
|
|
+struct kernel_param___2 {
|
|
+ const char *name;
|
|
+ struct module___2 *mod;
|
|
+ const struct kernel_param_ops___2 *ops;
|
|
+ const u16 perm;
|
|
+ s8 level;
|
|
+ u8 flags;
|
|
+ union {
|
|
+ void *arg;
|
|
+ const struct kparam_string *str;
|
|
+ const struct kparam_array___2 *arr;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct kparam_array___2 {
|
|
+ unsigned int max;
|
|
+ unsigned int elemsize;
|
|
+ unsigned int *num;
|
|
+ const struct kernel_param_ops___2 *ops;
|
|
+ void *elem;
|
|
+};
|
|
+
|
|
+struct module_attribute___2 {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct module_attribute___2 *, struct module_kobject___2 *, char *);
|
|
+ ssize_t (*store)(struct module_attribute___2 *, struct module_kobject___2 *, const char *, size_t);
|
|
+ void (*setup)(struct module___2 *, const char *);
|
|
+ int (*test)(struct module___2 *);
|
|
+ void (*free)(struct module___2 *);
|
|
+};
|
|
+
|
|
+struct trace_event_class___2;
|
|
+
|
|
+struct bpf_prog_array___2;
|
|
+
|
|
+struct trace_event_call___2 {
|
|
+ struct list_head list;
|
|
+ struct trace_event_class___2 *class;
|
|
+ union {
|
|
+ char *name;
|
|
+ struct tracepoint *tp;
|
|
+ };
|
|
+ struct trace_event event;
|
|
+ char *print_fmt;
|
|
+ struct event_filter *filter;
|
|
+ void *mod;
|
|
+ void *data;
|
|
+ int flags;
|
|
+ int perf_refcount;
|
|
+ struct hlist_head *perf_events;
|
|
+ struct bpf_prog_array___2 *prog_array;
|
|
+ int (*perf_perm)(struct trace_event_call___2 *, struct perf_event___2 *);
|
|
+};
|
|
+
|
|
+struct bpf_prog_aux___2;
|
|
+
|
|
+struct bpf_prog___2 {
|
|
+ u16 pages;
|
|
+ u16 jited: 1;
|
|
+ u16 jit_requested: 1;
|
|
+ u16 undo_set_mem: 1;
|
|
+ u16 gpl_compatible: 1;
|
|
+ u16 cb_access: 1;
|
|
+ u16 dst_needed: 1;
|
|
+ u16 blinded: 1;
|
|
+ u16 is_func: 1;
|
|
+ u16 kprobe_override: 1;
|
|
+ u16 has_callchain_buf: 1;
|
|
+ enum bpf_prog_type type;
|
|
+ enum bpf_attach_type expected_attach_type;
|
|
+ u32 len;
|
|
+ u32 jited_len;
|
|
+ u8 tag[8];
|
|
+ struct bpf_prog_aux___2 *aux;
|
|
+ struct sock_fprog_kern *orig_prog;
|
|
+ unsigned int (*bpf_func)(const void *, const struct bpf_insn *);
|
|
+ union {
|
|
+ struct sock_filter insns[0];
|
|
+ struct bpf_insn insnsi[0];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct cgroup_bpf___2 {
|
|
+ struct bpf_prog_array___2 *effective[21];
|
|
+ struct list_head progs[21];
|
|
+ u32 flags[21];
|
|
+ struct bpf_prog_array___2 *inactive;
|
|
+};
|
|
+
|
|
+struct bpf_prog_array_item___2 {
|
|
+ struct bpf_prog___2 *prog;
|
|
+ struct bpf_cgroup_storage *cgroup_storage;
|
|
+};
|
|
+
|
|
+struct bpf_prog_array___2 {
|
|
+ struct callback_head rcu;
|
|
+ struct bpf_prog_array_item___2 items[0];
|
|
+};
|
|
+
|
|
+struct cgroup_file___2 {
|
|
+ struct kernfs_node___2 *kn;
|
|
+ long unsigned int notified_at;
|
|
+ struct timer_list notify_timer;
|
|
+};
|
|
+
|
|
+struct cgroup_subsys___2;
|
|
+
|
|
+struct cgroup_subsys_state___2 {
|
|
+ struct cgroup___2 *cgroup;
|
|
+ struct cgroup_subsys___2 *ss;
|
|
+ struct percpu_ref refcnt;
|
|
+ struct list_head sibling;
|
|
+ struct list_head children;
|
|
+ struct list_head rstat_css_node;
|
|
+ int id;
|
|
+ unsigned int flags;
|
|
+ u64 serial_nr;
|
|
+ atomic_t online_cnt;
|
|
+ struct work_struct destroy_work;
|
|
+ struct rcu_work destroy_rwork;
|
|
+ struct cgroup_subsys_state___2 *parent;
|
|
+};
|
|
+
|
|
+struct cgroup_root___2;
|
|
+
|
|
+struct cgroup_rstat_cpu___2;
|
|
+
|
|
+struct cgroup___2 {
|
|
+ struct cgroup_subsys_state___2 self;
|
|
+ long unsigned int flags;
|
|
+ int id;
|
|
+ int level;
|
|
+ int max_depth;
|
|
+ int nr_descendants;
|
|
+ int nr_dying_descendants;
|
|
+ int max_descendants;
|
|
+ int nr_populated_csets;
|
|
+ int nr_populated_domain_children;
|
|
+ int nr_populated_threaded_children;
|
|
+ int nr_threaded_children;
|
|
+ struct kernfs_node___2 *kn;
|
|
+ struct cgroup_file___2 procs_file;
|
|
+ struct cgroup_file___2 events_file;
|
|
+ u16 subtree_control;
|
|
+ u16 subtree_ss_mask;
|
|
+ u16 old_subtree_control;
|
|
+ u16 old_subtree_ss_mask;
|
|
+ struct cgroup_subsys_state___2 *subsys[14];
|
|
+ struct cgroup_root___2 *root;
|
|
+ struct list_head cset_links;
|
|
+ struct list_head e_csets[14];
|
|
+ struct cgroup___2 *dom_cgrp;
|
|
+ struct cgroup___2 *old_dom_cgrp;
|
|
+ struct cgroup_rstat_cpu___2 *rstat_cpu;
|
|
+ struct list_head rstat_css_list;
|
|
+ struct cgroup_base_stat pending_bstat;
|
|
+ struct cgroup_base_stat bstat;
|
|
+ struct prev_cputime prev_cputime;
|
|
+ struct list_head pidlists;
|
|
+ struct mutex pidlist_mutex;
|
|
+ wait_queue_head_t offline_waitq;
|
|
+ struct work_struct release_agent_work;
|
|
+ struct cgroup_bpf___2 bpf;
|
|
+ atomic_t congestion_count;
|
|
+ int ancestor_ids[0];
|
|
+};
|
|
+
|
|
+struct cftype___2;
|
|
+
|
|
+struct cgroup_subsys___2 {
|
|
+ struct cgroup_subsys_state___2 * (*css_alloc)(struct cgroup_subsys_state___2 *);
|
|
+ int (*css_online)(struct cgroup_subsys_state___2 *);
|
|
+ void (*css_offline)(struct cgroup_subsys_state___2 *);
|
|
+ void (*css_released)(struct cgroup_subsys_state___2 *);
|
|
+ void (*css_free)(struct cgroup_subsys_state___2 *);
|
|
+ void (*css_reset)(struct cgroup_subsys_state___2 *);
|
|
+ void (*css_rstat_flush)(struct cgroup_subsys_state___2 *, int);
|
|
+ int (*css_extra_stat_show)(struct seq_file___2 *, struct cgroup_subsys_state___2 *);
|
|
+ int (*can_attach)(struct cgroup_taskset *);
|
|
+ void (*cancel_attach)(struct cgroup_taskset *);
|
|
+ void (*attach)(struct cgroup_taskset *);
|
|
+ void (*post_attach)();
|
|
+ int (*can_fork)(struct task_struct___2 *);
|
|
+ void (*cancel_fork)(struct task_struct___2 *);
|
|
+ void (*fork)(struct task_struct___2 *);
|
|
+ void (*exit)(struct task_struct___2 *);
|
|
+ void (*release)(struct task_struct___2 *);
|
|
+ void (*bind)(struct cgroup_subsys_state___2 *);
|
|
+ bool early_init: 1;
|
|
+ bool implicit_on_dfl: 1;
|
|
+ bool threaded: 1;
|
|
+ bool broken_hierarchy: 1;
|
|
+ bool warned_broken_hierarchy: 1;
|
|
+ int id;
|
|
+ const char *name;
|
|
+ const char *legacy_name;
|
|
+ struct cgroup_root___2 *root;
|
|
+ struct idr css_idr;
|
|
+ struct list_head cfts;
|
|
+ struct cftype___2 *dfl_cftypes;
|
|
+ struct cftype___2 *legacy_cftypes;
|
|
+ unsigned int depends_on;
|
|
+};
|
|
+
|
|
+struct cgroup_rstat_cpu___2 {
|
|
+ struct u64_stats_sync bsync;
|
|
+ struct cgroup_base_stat bstat;
|
|
+ struct cgroup_base_stat last_bstat;
|
|
+ struct cgroup___2 *updated_children;
|
|
+ struct cgroup___2 *updated_next;
|
|
+};
|
|
+
|
|
+struct cgroup_root___2 {
|
|
+ struct kernfs_root___2 *kf_root;
|
|
+ unsigned int subsys_mask;
|
|
+ int hierarchy_id;
|
|
+ struct cgroup___2 cgrp;
|
|
+ int cgrp_ancestor_id_storage;
|
|
+ atomic_t nr_cgrps;
|
|
+ struct list_head root_list;
|
|
+ unsigned int flags;
|
|
+ struct idr cgroup_idr;
|
|
+ char release_agent_path[4096];
|
|
+ char name[64];
|
|
+};
|
|
+
|
|
+struct cftype___2 {
|
|
+ char name[64];
|
|
+ long unsigned int private;
|
|
+ size_t max_write_len;
|
|
+ unsigned int flags;
|
|
+ unsigned int file_offset;
|
|
+ struct cgroup_subsys___2 *ss;
|
|
+ struct list_head node;
|
|
+ struct kernfs_ops___2 *kf_ops;
|
|
+ int (*open)(struct kernfs_open_file___2 *);
|
|
+ void (*release)(struct kernfs_open_file___2 *);
|
|
+ u64 (*read_u64)(struct cgroup_subsys_state___2 *, struct cftype___2 *);
|
|
+ s64 (*read_s64)(struct cgroup_subsys_state___2 *, struct cftype___2 *);
|
|
+ int (*seq_show)(struct seq_file___2 *, void *);
|
|
+ void * (*seq_start)(struct seq_file___2 *, loff_t *);
|
|
+ void * (*seq_next)(struct seq_file___2 *, void *, loff_t *);
|
|
+ void (*seq_stop)(struct seq_file___2 *, void *);
|
|
+ int (*write_u64)(struct cgroup_subsys_state___2 *, struct cftype___2 *, u64);
|
|
+ int (*write_s64)(struct cgroup_subsys_state___2 *, struct cftype___2 *, s64);
|
|
+ ssize_t (*write)(struct kernfs_open_file___2 *, char *, size_t, loff_t);
|
|
+};
|
|
+
|
|
+struct perf_cpu_context___2 {
|
|
+ struct perf_event_context___2 ctx;
|
|
+ struct perf_event_context___2 *task_ctx;
|
|
+ int active_oncpu;
|
|
+ int exclusive;
|
|
+ raw_spinlock_t hrtimer_lock;
|
|
+ struct hrtimer hrtimer;
|
|
+ ktime_t hrtimer_interval;
|
|
+ unsigned int hrtimer_active;
|
|
+ struct perf_cgroup___2 *cgrp;
|
|
+ struct list_head cgrp_cpuctx_entry;
|
|
+ struct list_head sched_cb_entry;
|
|
+ int sched_cb_usage;
|
|
+ int online;
|
|
+};
|
|
+
|
|
+struct perf_addr_filter___2 {
|
|
+ struct list_head entry;
|
|
+ struct path___2 path;
|
|
+ long unsigned int offset;
|
|
+ long unsigned int size;
|
|
+ enum perf_addr_filter_action_t action;
|
|
+};
|
|
+
|
|
+struct swevent_hlist {
|
|
+ struct hlist_head heads[256];
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+struct pmu_event_list {
|
|
+ raw_spinlock_t lock;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct ring_buffer___2 {
|
|
+ atomic_t refcount;
|
|
+ struct callback_head callback_head;
|
|
+ int nr_pages;
|
|
+ int overwrite;
|
|
+ int paused;
|
|
+ atomic_t poll;
|
|
+ local_t head;
|
|
+ local_t nest;
|
|
+ local_t events;
|
|
+ local_t wakeup;
|
|
+ local_t lost;
|
|
+ long int watermark;
|
|
+ long int aux_watermark;
|
|
+ spinlock_t event_lock;
|
|
+ struct list_head event_list;
|
|
+ atomic_t mmap_count;
|
|
+ long unsigned int mmap_locked;
|
|
+ struct user_struct___2 *mmap_user;
|
|
+ long int aux_head;
|
|
+ local_t aux_nest;
|
|
+ long int aux_wakeup;
|
|
+ long unsigned int aux_pgoff;
|
|
+ int aux_nr_pages;
|
|
+ int aux_overwrite;
|
|
+ atomic_t aux_mmap_count;
|
|
+ long unsigned int aux_mmap_locked;
|
|
+ void (*free_aux)(void *);
|
|
+ atomic_t aux_refcount;
|
|
+ void **aux_pages;
|
|
+ void *aux_priv;
|
|
+ struct perf_event_mmap_page *user_page;
|
|
+ void *data_pages[0];
|
|
+};
|
|
+
|
|
+struct perf_cgroup___2 {
|
|
+ struct cgroup_subsys_state___2 css;
|
|
+ struct perf_cgroup_info *info;
|
|
+};
|
|
+
|
|
+struct perf_output_handle___2 {
|
|
+ struct perf_event___2 *event;
|
|
+ struct ring_buffer___2 *rb;
|
|
+ long unsigned int wakeup;
|
|
+ long unsigned int size;
|
|
+ u64 aux_flags;
|
|
+ union {
|
|
+ void *addr;
|
|
+ long unsigned int head;
|
|
+ };
|
|
+ int page;
|
|
+};
|
|
+
|
|
+struct bpf_perf_event_data_kern___2 {
|
|
+ bpf_user_pt_regs_t *regs;
|
|
+ struct perf_sample_data *data;
|
|
+ struct perf_event___2 *event;
|
|
+};
|
|
+
|
|
+struct perf_pmu_events_attr___2 {
|
|
+ struct device_attribute___2 attr;
|
|
+ u64 id;
|
|
+ const char *event_str;
|
|
+};
|
|
+
|
|
+struct trace_event_class___2 {
|
|
+ const char *system;
|
|
+ void *probe;
|
|
+ void *perf_probe;
|
|
+ int (*reg)(struct trace_event_call___2 *, enum trace_reg, void *);
|
|
+ int (*define_fields)(struct trace_event_call___2 *);
|
|
+ struct list_head * (*get_fields)(struct trace_event_call___2 *);
|
|
+ struct list_head fields;
|
|
+ int (*raw_init)(struct trace_event_call___2 *);
|
|
+};
|
|
+
|
|
+struct syscall_metadata___2 {
|
|
+ const char *name;
|
|
+ int syscall_nr;
|
|
+ int nb_args;
|
|
+ const char **types;
|
|
+ const char **args;
|
|
+ struct list_head enter_fields;
|
|
+ struct trace_event_call___2 *enter_event;
|
|
+ struct trace_event_call___2 *exit_event;
|
|
+};
|
|
+
|
|
+struct bpf_map___2;
|
|
+
|
|
+struct bpf_map_ops___2 {
|
|
+ int (*map_alloc_check)(union bpf_attr *);
|
|
+ struct bpf_map___2 * (*map_alloc)(union bpf_attr *);
|
|
+ void (*map_release)(struct bpf_map___2 *, struct file___2 *);
|
|
+ void (*map_free)(struct bpf_map___2 *);
|
|
+ int (*map_get_next_key)(struct bpf_map___2 *, void *, void *);
|
|
+ void (*map_release_uref)(struct bpf_map___2 *);
|
|
+ void * (*map_lookup_elem_sys_only)(struct bpf_map___2 *, void *);
|
|
+ void * (*map_lookup_elem)(struct bpf_map___2 *, void *);
|
|
+ int (*map_update_elem)(struct bpf_map___2 *, void *, void *, u64);
|
|
+ int (*map_delete_elem)(struct bpf_map___2 *, void *);
|
|
+ void * (*map_fd_get_ptr)(struct bpf_map___2 *, struct file___2 *, int);
|
|
+ void (*map_fd_put_ptr)(void *);
|
|
+ u32 (*map_gen_lookup)(struct bpf_map___2 *, struct bpf_insn *);
|
|
+ u32 (*map_fd_sys_lookup_elem)(void *);
|
|
+ void (*map_seq_show_elem)(struct bpf_map___2 *, void *, struct seq_file___2 *);
|
|
+ int (*map_check_btf)(const struct bpf_map___2 *, const struct btf_type *, const struct btf_type *);
|
|
+};
|
|
+
|
|
+struct bpf_map___2 {
|
|
+ const struct bpf_map_ops___2 *ops;
|
|
+ struct bpf_map___2 *inner_map_meta;
|
|
+ void *security;
|
|
+ enum bpf_map_type map_type;
|
|
+ u32 key_size;
|
|
+ u32 value_size;
|
|
+ u32 max_entries;
|
|
+ u32 map_flags;
|
|
+ u32 pages;
|
|
+ u32 id;
|
|
+ int numa_node;
|
|
+ u32 btf_key_type_id;
|
|
+ u32 btf_value_type_id;
|
|
+ struct btf *btf;
|
|
+ bool unpriv_array;
|
|
+ long: 56;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct user_struct___2 *user;
|
|
+ atomic_t refcnt;
|
|
+ atomic_t usercnt;
|
|
+ struct work_struct work;
|
|
+ char name[16];
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bpf_offloaded_map___2;
|
|
+
|
|
+struct bpf_map_dev_ops___2 {
|
|
+ int (*map_get_next_key)(struct bpf_offloaded_map___2 *, void *, void *);
|
|
+ int (*map_lookup_elem)(struct bpf_offloaded_map___2 *, void *, void *);
|
|
+ int (*map_update_elem)(struct bpf_offloaded_map___2 *, void *, void *, u64);
|
|
+ int (*map_delete_elem)(struct bpf_offloaded_map___2 *, void *);
|
|
+};
|
|
+
|
|
+struct bpf_offloaded_map___2 {
|
|
+ struct bpf_map___2 map;
|
|
+ struct net_device___2 *netdev;
|
|
+ const struct bpf_map_dev_ops___2 *dev_ops;
|
|
+ void *dev_priv;
|
|
+ struct list_head offloads;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+typedef rx_handler_result_t rx_handler_func_t___2(struct sk_buff___2 **);
|
|
+
|
|
+typedef struct {
|
|
+ struct net___2 *net;
|
|
+} possible_net_t___2;
|
|
+
|
|
+struct net_device_ops___2;
|
|
+
|
|
+struct ethtool_ops___2;
|
|
+
|
|
+struct xfrmdev_ops___2;
|
|
+
|
|
+struct tlsdev_ops___2;
|
|
+
|
|
+struct header_ops___2;
|
|
+
|
|
+struct netdev_rx_queue___2;
|
|
+
|
|
+struct mini_Qdisc___2;
|
|
+
|
|
+struct netdev_queue___2;
|
|
+
|
|
+struct Qdisc___2;
|
|
+
|
|
+struct rtnl_link_ops___2;
|
|
+
|
|
+struct dcbnl_rtnl_ops___2;
|
|
+
|
|
+struct net_device___2 {
|
|
+ char name[16];
|
|
+ struct hlist_node name_hlist;
|
|
+ struct dev_ifalias *ifalias;
|
|
+ long unsigned int mem_end;
|
|
+ long unsigned int mem_start;
|
|
+ long unsigned int base_addr;
|
|
+ int irq;
|
|
+ long unsigned int state;
|
|
+ struct list_head dev_list;
|
|
+ struct list_head napi_list;
|
|
+ struct list_head unreg_list;
|
|
+ struct list_head close_list;
|
|
+ struct list_head ptype_all;
|
|
+ struct list_head ptype_specific;
|
|
+ struct {
|
|
+ struct list_head upper;
|
|
+ struct list_head lower;
|
|
+ } adj_list;
|
|
+ netdev_features_t features;
|
|
+ netdev_features_t hw_features;
|
|
+ netdev_features_t wanted_features;
|
|
+ netdev_features_t vlan_features;
|
|
+ netdev_features_t hw_enc_features;
|
|
+ netdev_features_t mpls_features;
|
|
+ netdev_features_t gso_partial_features;
|
|
+ int ifindex;
|
|
+ int group;
|
|
+ struct net_device_stats stats;
|
|
+ atomic_long_t rx_dropped;
|
|
+ atomic_long_t tx_dropped;
|
|
+ atomic_long_t rx_nohandler;
|
|
+ atomic_t carrier_up_count;
|
|
+ atomic_t carrier_down_count;
|
|
+ const struct net_device_ops___2 *netdev_ops;
|
|
+ const struct ethtool_ops___2 *ethtool_ops;
|
|
+ const struct switchdev_ops *switchdev_ops;
|
|
+ const struct l3mdev_ops *l3mdev_ops;
|
|
+ const struct ndisc_ops *ndisc_ops;
|
|
+ const struct xfrmdev_ops___2 *xfrmdev_ops;
|
|
+ const struct tlsdev_ops___2 *tlsdev_ops;
|
|
+ const struct header_ops___2 *header_ops;
|
|
+ unsigned int flags;
|
|
+ unsigned int priv_flags;
|
|
+ short unsigned int gflags;
|
|
+ short unsigned int padded;
|
|
+ unsigned char operstate;
|
|
+ unsigned char link_mode;
|
|
+ unsigned char if_port;
|
|
+ unsigned char dma;
|
|
+ unsigned int mtu;
|
|
+ unsigned int min_mtu;
|
|
+ unsigned int max_mtu;
|
|
+ short unsigned int type;
|
|
+ short unsigned int hard_header_len;
|
|
+ unsigned char min_header_len;
|
|
+ short unsigned int needed_headroom;
|
|
+ short unsigned int needed_tailroom;
|
|
+ unsigned char perm_addr[32];
|
|
+ unsigned char addr_assign_type;
|
|
+ unsigned char addr_len;
|
|
+ unsigned char upper_level;
|
|
+ unsigned char lower_level;
|
|
+ short unsigned int neigh_priv_len;
|
|
+ short unsigned int dev_id;
|
|
+ short unsigned int dev_port;
|
|
+ spinlock_t addr_list_lock;
|
|
+ unsigned char name_assign_type;
|
|
+ bool uc_promisc;
|
|
+ struct netdev_hw_addr_list uc;
|
|
+ struct netdev_hw_addr_list mc;
|
|
+ struct netdev_hw_addr_list dev_addrs;
|
|
+ struct kset___2 *queues_kset;
|
|
+ unsigned int promiscuity;
|
|
+ unsigned int allmulti;
|
|
+ struct vlan_info *vlan_info;
|
|
+ struct tipc_bearer *tipc_ptr;
|
|
+ struct in_device *ip_ptr;
|
|
+ struct inet6_dev *ip6_ptr;
|
|
+ struct wireless_dev *ieee80211_ptr;
|
|
+ struct wpan_dev *ieee802154_ptr;
|
|
+ struct mpls_dev *mpls_ptr;
|
|
+ unsigned char *dev_addr;
|
|
+ struct netdev_rx_queue___2 *_rx;
|
|
+ unsigned int num_rx_queues;
|
|
+ unsigned int real_num_rx_queues;
|
|
+ struct bpf_prog___2 *xdp_prog;
|
|
+ long unsigned int gro_flush_timeout;
|
|
+ rx_handler_func_t___2 *rx_handler;
|
|
+ void *rx_handler_data;
|
|
+ struct mini_Qdisc___2 *miniq_ingress;
|
|
+ struct netdev_queue___2 *ingress_queue;
|
|
+ struct nf_hook_entries *nf_hooks_ingress;
|
|
+ unsigned char broadcast[32];
|
|
+ struct cpu_rmap *rx_cpu_rmap;
|
|
+ struct hlist_node index_hlist;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct netdev_queue___2 *_tx;
|
|
+ unsigned int num_tx_queues;
|
|
+ unsigned int real_num_tx_queues;
|
|
+ struct Qdisc___2 *qdisc;
|
|
+ struct hlist_head qdisc_hash[16];
|
|
+ unsigned int tx_queue_len;
|
|
+ spinlock_t tx_global_lock;
|
|
+ int watchdog_timeo;
|
|
+ struct xps_dev_maps *xps_cpus_map;
|
|
+ struct xps_dev_maps *xps_rxqs_map;
|
|
+ struct mini_Qdisc___2 *miniq_egress;
|
|
+ struct timer_list watchdog_timer;
|
|
+ int *pcpu_refcnt;
|
|
+ struct list_head todo_list;
|
|
+ struct list_head link_watch_list;
|
|
+ enum {
|
|
+ NETREG_UNINITIALIZED___2 = 0,
|
|
+ NETREG_REGISTERED___2 = 1,
|
|
+ NETREG_UNREGISTERING___2 = 2,
|
|
+ NETREG_UNREGISTERED___2 = 3,
|
|
+ NETREG_RELEASED___2 = 4,
|
|
+ NETREG_DUMMY___2 = 5,
|
|
+ } reg_state: 8;
|
|
+ bool dismantle;
|
|
+ enum {
|
|
+ RTNL_LINK_INITIALIZED___2 = 0,
|
|
+ RTNL_LINK_INITIALIZING___2 = 1,
|
|
+ } rtnl_link_state: 16;
|
|
+ bool needs_free_netdev;
|
|
+ void (*priv_destructor)(struct net_device___2 *);
|
|
+ struct netpoll_info *npinfo;
|
|
+ possible_net_t___2 nd_net;
|
|
+ union {
|
|
+ void *ml_priv;
|
|
+ struct pcpu_lstats *lstats;
|
|
+ struct pcpu_sw_netstats *tstats;
|
|
+ struct pcpu_dstats *dstats;
|
|
+ struct pcpu_vstats *vstats;
|
|
+ };
|
|
+ struct garp_port *garp_port;
|
|
+ struct mrp_port *mrp_port;
|
|
+ struct device___2 dev;
|
|
+ const struct attribute_group___2 *sysfs_groups[4];
|
|
+ const struct attribute_group___2 *sysfs_rx_queue_group;
|
|
+ const struct rtnl_link_ops___2 *rtnl_link_ops;
|
|
+ unsigned int gso_max_size;
|
|
+ u16 gso_max_segs;
|
|
+ const struct dcbnl_rtnl_ops___2 *dcbnl_ops;
|
|
+ s16 num_tc;
|
|
+ struct netdev_tc_txq tc_to_txq[16];
|
|
+ u8 prio_tc_map[16];
|
|
+ unsigned int fcoe_ddp_xid;
|
|
+ struct netprio_map *priomap;
|
|
+ struct phy_device *phydev;
|
|
+ struct sfp_bus *sfp_bus;
|
|
+ struct lock_class_key *qdisc_tx_busylock;
|
|
+ struct lock_class_key *qdisc_running_key;
|
|
+ bool proto_down;
|
|
+ unsigned int wol_enabled: 1;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+ long unsigned int kabi_reserved17;
|
|
+ long unsigned int kabi_reserved18;
|
|
+ long unsigned int kabi_reserved19;
|
|
+ long unsigned int kabi_reserved20;
|
|
+ long unsigned int kabi_reserved21;
|
|
+ long unsigned int kabi_reserved22;
|
|
+ long unsigned int kabi_reserved23;
|
|
+ long unsigned int kabi_reserved24;
|
|
+ long unsigned int kabi_reserved25;
|
|
+ long unsigned int kabi_reserved26;
|
|
+ long unsigned int kabi_reserved27;
|
|
+ long unsigned int kabi_reserved28;
|
|
+ long unsigned int kabi_reserved29;
|
|
+ long unsigned int kabi_reserved30;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct bpf_prog_ops___2 {
|
|
+ int (*test_run)(struct bpf_prog___2 *, const union bpf_attr *, union bpf_attr *);
|
|
+};
|
|
+
|
|
+struct bpf_verifier_ops___2 {
|
|
+ const struct bpf_func_proto * (*get_func_proto)(enum bpf_func_id, const struct bpf_prog___2 *);
|
|
+ bool (*is_valid_access)(int, int, enum bpf_access_type, const struct bpf_prog___2 *, struct bpf_insn_access_aux *);
|
|
+ int (*gen_prologue)(struct bpf_insn *, bool, const struct bpf_prog___2 *);
|
|
+ int (*gen_ld_abs)(const struct bpf_insn *, struct bpf_insn *);
|
|
+ u32 (*convert_ctx_access)(enum bpf_access_type, const struct bpf_insn *, struct bpf_insn *, struct bpf_prog___2 *, u32 *);
|
|
+};
|
|
+
|
|
+struct bpf_prog_offload___2 {
|
|
+ struct bpf_prog___2 *prog;
|
|
+ struct net_device___2 *netdev;
|
|
+ void *dev_priv;
|
|
+ struct list_head offloads;
|
|
+ bool dev_state;
|
|
+ const struct bpf_prog_offload_ops *dev_ops;
|
|
+ void *jited_image;
|
|
+ u32 jited_len;
|
|
+};
|
|
+
|
|
+struct bpf_prog_aux___2 {
|
|
+ atomic_t refcnt;
|
|
+ u32 used_map_cnt;
|
|
+ u32 max_ctx_offset;
|
|
+ u32 stack_depth;
|
|
+ u32 id;
|
|
+ u32 func_cnt;
|
|
+ bool offload_requested;
|
|
+ struct bpf_prog___2 **func;
|
|
+ void *jit_data;
|
|
+ struct latch_tree_node ksym_tnode;
|
|
+ struct list_head ksym_lnode;
|
|
+ const struct bpf_prog_ops___2 *ops;
|
|
+ struct bpf_map___2 **used_maps;
|
|
+ struct bpf_prog___2 *prog;
|
|
+ struct user_struct___2 *user;
|
|
+ u64 load_time;
|
|
+ struct bpf_map___2 *cgroup_storage;
|
|
+ char name[16];
|
|
+ void *security;
|
|
+ struct bpf_prog_offload___2 *offload;
|
|
+ union {
|
|
+ struct work_struct work;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ u32 max_tp_access;
|
|
+};
|
|
+
|
|
+struct nf_bridge_info___2;
|
|
+
|
|
+struct sk_buff___2 {
|
|
+ union {
|
|
+ struct {
|
|
+ struct sk_buff___2 *next;
|
|
+ struct sk_buff___2 *prev;
|
|
+ union {
|
|
+ struct net_device___2 *dev;
|
|
+ long unsigned int dev_scratch;
|
|
+ };
|
|
+ };
|
|
+ struct rb_node rbnode;
|
|
+ struct list_head list;
|
|
+ };
|
|
+ union {
|
|
+ struct sock *sk;
|
|
+ int ip_defrag_offset;
|
|
+ };
|
|
+ union {
|
|
+ ktime_t tstamp;
|
|
+ u64 skb_mstamp;
|
|
+ };
|
|
+ char cb[48];
|
|
+ union {
|
|
+ struct {
|
|
+ long unsigned int _skb_refdst;
|
|
+ void (*destructor)(struct sk_buff___2 *);
|
|
+ };
|
|
+ struct list_head tcp_tsorted_anchor;
|
|
+ };
|
|
+ struct sec_path *sp;
|
|
+ long unsigned int _nfct;
|
|
+ struct nf_bridge_info___2 *nf_bridge;
|
|
+ unsigned int len;
|
|
+ unsigned int data_len;
|
|
+ __u16 mac_len;
|
|
+ __u16 hdr_len;
|
|
+ __u16 queue_mapping;
|
|
+ __u8 __cloned_offset[0];
|
|
+ __u8 cloned: 1;
|
|
+ __u8 nohdr: 1;
|
|
+ __u8 fclone: 2;
|
|
+ __u8 peeked: 1;
|
|
+ __u8 head_frag: 1;
|
|
+ __u8 xmit_more: 1;
|
|
+ __u8 pfmemalloc: 1;
|
|
+ __u32 headers_start[0];
|
|
+ __u8 __pkt_type_offset[0];
|
|
+ __u8 pkt_type: 3;
|
|
+ __u8 ignore_df: 1;
|
|
+ __u8 nf_trace: 1;
|
|
+ __u8 ip_summed: 2;
|
|
+ __u8 ooo_okay: 1;
|
|
+ __u8 l4_hash: 1;
|
|
+ __u8 sw_hash: 1;
|
|
+ __u8 wifi_acked_valid: 1;
|
|
+ __u8 wifi_acked: 1;
|
|
+ __u8 no_fcs: 1;
|
|
+ __u8 encapsulation: 1;
|
|
+ __u8 encap_hdr_csum: 1;
|
|
+ __u8 csum_valid: 1;
|
|
+ __u8 csum_complete_sw: 1;
|
|
+ __u8 csum_level: 2;
|
|
+ __u8 csum_not_inet: 1;
|
|
+ __u8 dst_pending_confirm: 1;
|
|
+ __u8 ndisc_nodetype: 2;
|
|
+ __u8 ipvs_property: 1;
|
|
+ __u8 inner_protocol_type: 1;
|
|
+ __u8 remcsum_offload: 1;
|
|
+ __u8 offload_fwd_mark: 1;
|
|
+ __u8 offload_mr_fwd_mark: 1;
|
|
+ __u8 tc_skip_classify: 1;
|
|
+ __u8 tc_at_ingress: 1;
|
|
+ __u8 tc_redirected: 1;
|
|
+ __u8 tc_from_ingress: 1;
|
|
+ __u8 decrypted: 1;
|
|
+ __u16 tc_index;
|
|
+ union {
|
|
+ __wsum csum;
|
|
+ struct {
|
|
+ __u16 csum_start;
|
|
+ __u16 csum_offset;
|
|
+ };
|
|
+ };
|
|
+ __u32 priority;
|
|
+ int skb_iif;
|
|
+ __u32 hash;
|
|
+ __be16 vlan_proto;
|
|
+ __u16 vlan_tci;
|
|
+ union {
|
|
+ unsigned int napi_id;
|
|
+ unsigned int sender_cpu;
|
|
+ };
|
|
+ __u32 secmark;
|
|
+ union {
|
|
+ __u32 mark;
|
|
+ __u32 reserved_tailroom;
|
|
+ };
|
|
+ union {
|
|
+ __be16 inner_protocol;
|
|
+ __u8 inner_ipproto;
|
|
+ };
|
|
+ __u16 inner_transport_header;
|
|
+ __u16 inner_network_header;
|
|
+ __u16 inner_mac_header;
|
|
+ __be16 protocol;
|
|
+ __u16 transport_header;
|
|
+ __u16 network_header;
|
|
+ __u16 mac_header;
|
|
+ __u32 headers_end[0];
|
|
+ sk_buff_data_t tail;
|
|
+ sk_buff_data_t end;
|
|
+ unsigned char *head;
|
|
+ unsigned char *data;
|
|
+ unsigned int truesize;
|
|
+ refcount_t users;
|
|
+};
|
|
+
|
|
+struct pipe_buf_operations___2;
|
|
+
|
|
+struct pipe_buffer___2 {
|
|
+ struct page___2 *page;
|
|
+ unsigned int offset;
|
|
+ unsigned int len;
|
|
+ const struct pipe_buf_operations___2 *ops;
|
|
+ unsigned int flags;
|
|
+ long unsigned int private;
|
|
+};
|
|
+
|
|
+struct pipe_buf_operations___2 {
|
|
+ int can_merge;
|
|
+ int (*confirm)(struct pipe_inode_info___2 *, struct pipe_buffer___2 *);
|
|
+ void (*release)(struct pipe_inode_info___2 *, struct pipe_buffer___2 *);
|
|
+ int (*steal)(struct pipe_inode_info___2 *, struct pipe_buffer___2 *);
|
|
+ bool (*get)(struct pipe_inode_info___2 *, struct pipe_buffer___2 *);
|
|
+};
|
|
+
|
|
+struct nf_bridge_info___2 {
|
|
+ refcount_t use;
|
|
+ enum {
|
|
+ BRNF_PROTO_UNCHANGED___2 = 0,
|
|
+ BRNF_PROTO_8021Q___2 = 1,
|
|
+ BRNF_PROTO_PPPOE___2 = 2,
|
|
+ } orig_proto: 8;
|
|
+ u8 pkt_otherhost: 1;
|
|
+ u8 in_prerouting: 1;
|
|
+ u8 bridged_dnat: 1;
|
|
+ __u16 frag_max_size;
|
|
+ struct net_device___2 *physindev;
|
|
+ struct net_device___2 *physoutdev;
|
|
+ union {
|
|
+ __be32 ipv4_daddr;
|
|
+ struct in6_addr ipv6_daddr;
|
|
+ char neigh_header[8];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct sk_buff_head___2 {
|
|
+ struct sk_buff___2 *next;
|
|
+ struct sk_buff___2 *prev;
|
|
+ __u32 qlen;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct ethtool_ops___2 {
|
|
+ int (*get_settings)(struct net_device___2 *, struct ethtool_cmd *);
|
|
+ int (*set_settings)(struct net_device___2 *, struct ethtool_cmd *);
|
|
+ void (*get_drvinfo)(struct net_device___2 *, struct ethtool_drvinfo *);
|
|
+ int (*get_regs_len)(struct net_device___2 *);
|
|
+ void (*get_regs)(struct net_device___2 *, struct ethtool_regs *, void *);
|
|
+ void (*get_wol)(struct net_device___2 *, struct ethtool_wolinfo *);
|
|
+ int (*set_wol)(struct net_device___2 *, struct ethtool_wolinfo *);
|
|
+ u32 (*get_msglevel)(struct net_device___2 *);
|
|
+ void (*set_msglevel)(struct net_device___2 *, u32);
|
|
+ int (*nway_reset)(struct net_device___2 *);
|
|
+ u32 (*get_link)(struct net_device___2 *);
|
|
+ int (*get_eeprom_len)(struct net_device___2 *);
|
|
+ int (*get_eeprom)(struct net_device___2 *, struct ethtool_eeprom *, u8 *);
|
|
+ int (*set_eeprom)(struct net_device___2 *, struct ethtool_eeprom *, u8 *);
|
|
+ int (*get_coalesce)(struct net_device___2 *, struct ethtool_coalesce *);
|
|
+ int (*set_coalesce)(struct net_device___2 *, struct ethtool_coalesce *);
|
|
+ void (*get_ringparam)(struct net_device___2 *, struct ethtool_ringparam *);
|
|
+ int (*set_ringparam)(struct net_device___2 *, struct ethtool_ringparam *);
|
|
+ void (*get_pauseparam)(struct net_device___2 *, struct ethtool_pauseparam *);
|
|
+ int (*set_pauseparam)(struct net_device___2 *, struct ethtool_pauseparam *);
|
|
+ void (*self_test)(struct net_device___2 *, struct ethtool_test *, u64 *);
|
|
+ void (*get_strings)(struct net_device___2 *, u32, u8 *);
|
|
+ int (*set_phys_id)(struct net_device___2 *, enum ethtool_phys_id_state);
|
|
+ void (*get_ethtool_stats)(struct net_device___2 *, struct ethtool_stats *, u64 *);
|
|
+ int (*begin)(struct net_device___2 *);
|
|
+ void (*complete)(struct net_device___2 *);
|
|
+ u32 (*get_priv_flags)(struct net_device___2 *);
|
|
+ int (*set_priv_flags)(struct net_device___2 *, u32);
|
|
+ int (*get_sset_count)(struct net_device___2 *, int);
|
|
+ int (*get_rxnfc)(struct net_device___2 *, struct ethtool_rxnfc *, u32 *);
|
|
+ int (*set_rxnfc)(struct net_device___2 *, struct ethtool_rxnfc *);
|
|
+ int (*flash_device)(struct net_device___2 *, struct ethtool_flash *);
|
|
+ int (*reset)(struct net_device___2 *, u32 *);
|
|
+ u32 (*get_rxfh_key_size)(struct net_device___2 *);
|
|
+ u32 (*get_rxfh_indir_size)(struct net_device___2 *);
|
|
+ int (*get_rxfh)(struct net_device___2 *, u32 *, u8 *, u8 *);
|
|
+ int (*set_rxfh)(struct net_device___2 *, const u32 *, const u8 *, const u8);
|
|
+ int (*get_rxfh_context)(struct net_device___2 *, u32 *, u8 *, u8 *, u32);
|
|
+ int (*set_rxfh_context)(struct net_device___2 *, const u32 *, const u8 *, const u8, u32 *, bool);
|
|
+ void (*get_channels)(struct net_device___2 *, struct ethtool_channels *);
|
|
+ int (*set_channels)(struct net_device___2 *, struct ethtool_channels *);
|
|
+ int (*get_dump_flag)(struct net_device___2 *, struct ethtool_dump *);
|
|
+ int (*get_dump_data)(struct net_device___2 *, struct ethtool_dump *, void *);
|
|
+ int (*set_dump)(struct net_device___2 *, struct ethtool_dump *);
|
|
+ int (*get_ts_info)(struct net_device___2 *, struct ethtool_ts_info *);
|
|
+ int (*get_module_info)(struct net_device___2 *, struct ethtool_modinfo *);
|
|
+ int (*get_module_eeprom)(struct net_device___2 *, struct ethtool_eeprom *, u8 *);
|
|
+ int (*get_eee)(struct net_device___2 *, struct ethtool_eee *);
|
|
+ int (*set_eee)(struct net_device___2 *, struct ethtool_eee *);
|
|
+ int (*get_tunable)(struct net_device___2 *, const struct ethtool_tunable *, void *);
|
|
+ int (*set_tunable)(struct net_device___2 *, const struct ethtool_tunable *, const void *);
|
|
+ int (*get_per_queue_coalesce)(struct net_device___2 *, u32, struct ethtool_coalesce *);
|
|
+ int (*set_per_queue_coalesce)(struct net_device___2 *, u32, struct ethtool_coalesce *);
|
|
+ int (*get_link_ksettings)(struct net_device___2 *, struct ethtool_link_ksettings *);
|
|
+ int (*set_link_ksettings)(struct net_device___2 *, const struct ethtool_link_ksettings *);
|
|
+ int (*get_fecparam)(struct net_device___2 *, struct ethtool_fecparam *);
|
|
+ int (*set_fecparam)(struct net_device___2 *, struct ethtool_fecparam *);
|
|
+ void (*get_ethtool_phy_stats)(struct net_device___2 *, struct ethtool_stats *, u64 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+ long unsigned int kabi_reserved17;
|
|
+ long unsigned int kabi_reserved18;
|
|
+ long unsigned int kabi_reserved19;
|
|
+ long unsigned int kabi_reserved20;
|
|
+ long unsigned int kabi_reserved21;
|
|
+ long unsigned int kabi_reserved22;
|
|
+ long unsigned int kabi_reserved23;
|
|
+ long unsigned int kabi_reserved24;
|
|
+ long unsigned int kabi_reserved25;
|
|
+ long unsigned int kabi_reserved26;
|
|
+ long unsigned int kabi_reserved27;
|
|
+ long unsigned int kabi_reserved28;
|
|
+ long unsigned int kabi_reserved29;
|
|
+ long unsigned int kabi_reserved30;
|
|
+ long unsigned int kabi_reserved31;
|
|
+ long unsigned int kabi_reserved32;
|
|
+};
|
|
+
|
|
+struct inet_frag_queue___2;
|
|
+
|
|
+struct inet_frags___2 {
|
|
+ unsigned int qsize;
|
|
+ void (*constructor)(struct inet_frag_queue___2 *, const void *);
|
|
+ void (*destructor)(struct inet_frag_queue___2 *);
|
|
+ void (*frag_expire)(struct timer_list *);
|
|
+ struct kmem_cache *frags_cachep;
|
|
+ const char *frags_cache_name;
|
|
+ struct rhashtable_params rhash_params;
|
|
+};
|
|
+
|
|
+struct inet_frag_queue___2 {
|
|
+ struct rhash_head node;
|
|
+ union {
|
|
+ struct frag_v4_compare_key v4;
|
|
+ struct frag_v6_compare_key v6;
|
|
+ } key;
|
|
+ struct timer_list timer;
|
|
+ spinlock_t lock;
|
|
+ refcount_t refcnt;
|
|
+ struct sk_buff___2 *fragments;
|
|
+ struct rb_root rb_fragments;
|
|
+ struct sk_buff___2 *fragments_tail;
|
|
+ struct sk_buff___2 *last_run_head;
|
|
+ ktime_t stamp;
|
|
+ int len;
|
|
+ int meat;
|
|
+ __u8 flags;
|
|
+ u16 max_size;
|
|
+ struct netns_frags___2 *net;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct pernet_operations___2 {
|
|
+ struct list_head list;
|
|
+ int (*init)(struct net___2 *);
|
|
+ void (*exit)(struct net___2 *);
|
|
+ void (*exit_batch)(struct list_head *);
|
|
+ unsigned int *id;
|
|
+ size_t size;
|
|
+};
|
|
+
|
|
+struct dcbnl_rtnl_ops___2 {
|
|
+ int (*ieee_getets)(struct net_device___2 *, struct ieee_ets *);
|
|
+ int (*ieee_setets)(struct net_device___2 *, struct ieee_ets *);
|
|
+ int (*ieee_getmaxrate)(struct net_device___2 *, struct ieee_maxrate *);
|
|
+ int (*ieee_setmaxrate)(struct net_device___2 *, struct ieee_maxrate *);
|
|
+ int (*ieee_getqcn)(struct net_device___2 *, struct ieee_qcn *);
|
|
+ int (*ieee_setqcn)(struct net_device___2 *, struct ieee_qcn *);
|
|
+ int (*ieee_getqcnstats)(struct net_device___2 *, struct ieee_qcn_stats *);
|
|
+ int (*ieee_getpfc)(struct net_device___2 *, struct ieee_pfc *);
|
|
+ int (*ieee_setpfc)(struct net_device___2 *, struct ieee_pfc *);
|
|
+ int (*ieee_getapp)(struct net_device___2 *, struct dcb_app *);
|
|
+ int (*ieee_setapp)(struct net_device___2 *, struct dcb_app *);
|
|
+ int (*ieee_delapp)(struct net_device___2 *, struct dcb_app *);
|
|
+ int (*ieee_peer_getets)(struct net_device___2 *, struct ieee_ets *);
|
|
+ int (*ieee_peer_getpfc)(struct net_device___2 *, struct ieee_pfc *);
|
|
+ u8 (*getstate)(struct net_device___2 *);
|
|
+ u8 (*setstate)(struct net_device___2 *, u8);
|
|
+ void (*getpermhwaddr)(struct net_device___2 *, u8 *);
|
|
+ void (*setpgtccfgtx)(struct net_device___2 *, int, u8, u8, u8, u8);
|
|
+ void (*setpgbwgcfgtx)(struct net_device___2 *, int, u8);
|
|
+ void (*setpgtccfgrx)(struct net_device___2 *, int, u8, u8, u8, u8);
|
|
+ void (*setpgbwgcfgrx)(struct net_device___2 *, int, u8);
|
|
+ void (*getpgtccfgtx)(struct net_device___2 *, int, u8 *, u8 *, u8 *, u8 *);
|
|
+ void (*getpgbwgcfgtx)(struct net_device___2 *, int, u8 *);
|
|
+ void (*getpgtccfgrx)(struct net_device___2 *, int, u8 *, u8 *, u8 *, u8 *);
|
|
+ void (*getpgbwgcfgrx)(struct net_device___2 *, int, u8 *);
|
|
+ void (*setpfccfg)(struct net_device___2 *, int, u8);
|
|
+ void (*getpfccfg)(struct net_device___2 *, int, u8 *);
|
|
+ u8 (*setall)(struct net_device___2 *);
|
|
+ u8 (*getcap)(struct net_device___2 *, int, u8 *);
|
|
+ int (*getnumtcs)(struct net_device___2 *, int, u8 *);
|
|
+ int (*setnumtcs)(struct net_device___2 *, int, u8);
|
|
+ u8 (*getpfcstate)(struct net_device___2 *);
|
|
+ void (*setpfcstate)(struct net_device___2 *, u8);
|
|
+ void (*getbcncfg)(struct net_device___2 *, int, u32 *);
|
|
+ void (*setbcncfg)(struct net_device___2 *, int, u32);
|
|
+ void (*getbcnrp)(struct net_device___2 *, int, u8 *);
|
|
+ void (*setbcnrp)(struct net_device___2 *, int, u8);
|
|
+ int (*setapp)(struct net_device___2 *, u8, u16, u8);
|
|
+ int (*getapp)(struct net_device___2 *, u8, u16);
|
|
+ u8 (*getfeatcfg)(struct net_device___2 *, int, u8 *);
|
|
+ u8 (*setfeatcfg)(struct net_device___2 *, int, u8);
|
|
+ u8 (*getdcbx)(struct net_device___2 *);
|
|
+ u8 (*setdcbx)(struct net_device___2 *, u8);
|
|
+ int (*peer_getappinfo)(struct net_device___2 *, struct dcb_peer_app_info *, u16 *);
|
|
+ int (*peer_getapptable)(struct net_device___2 *, struct dcb_app *);
|
|
+ int (*cee_peer_getpg)(struct net_device___2 *, struct cee_pg *);
|
|
+ int (*cee_peer_getpfc)(struct net_device___2 *, struct cee_pfc *);
|
|
+ int (*dcbnl_getbuffer)(struct net_device___2 *, struct dcbnl_buffer *);
|
|
+ int (*dcbnl_setbuffer)(struct net_device___2 *, struct dcbnl_buffer *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+};
|
|
+
|
|
+struct xdp_rxq_info___2 {
|
|
+ struct net_device___2 *dev;
|
|
+ u32 queue_index;
|
|
+ u32 reg_state;
|
|
+ struct xdp_mem_info mem;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct xdp_frame___2 {
|
|
+ void *data;
|
|
+ u16 len;
|
|
+ u16 headroom;
|
|
+ u16 metasize;
|
|
+ struct xdp_mem_info mem;
|
|
+ struct net_device___2 *dev_rx;
|
|
+};
|
|
+
|
|
+struct netlink_callback___2 {
|
|
+ struct sk_buff___2 *skb;
|
|
+ const struct nlmsghdr *nlh;
|
|
+ int (*dump)(struct sk_buff___2 *, struct netlink_callback___2 *);
|
|
+ int (*done)(struct netlink_callback___2 *);
|
|
+ void *data;
|
|
+ struct module___2 *module;
|
|
+ u16 family;
|
|
+ u16 min_dump_alloc;
|
|
+ unsigned int prev_seq;
|
|
+ unsigned int seq;
|
|
+ long int args[6];
|
|
+};
|
|
+
|
|
+struct header_ops___2 {
|
|
+ int (*create)(struct sk_buff___2 *, struct net_device___2 *, short unsigned int, const void *, const void *, unsigned int);
|
|
+ int (*parse)(const struct sk_buff___2 *, unsigned char *);
|
|
+ int (*cache)(const struct neighbour *, struct hh_cache *, __be16);
|
|
+ void (*cache_update)(struct hh_cache *, const struct net_device___2 *, const unsigned char *);
|
|
+ bool (*validate)(const char *, unsigned int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+};
|
|
+
|
|
+struct napi_struct___2 {
|
|
+ struct list_head poll_list;
|
|
+ long unsigned int state;
|
|
+ int weight;
|
|
+ long unsigned int gro_bitmask;
|
|
+ int (*poll)(struct napi_struct___2 *, int);
|
|
+ int poll_owner;
|
|
+ struct net_device___2 *dev;
|
|
+ struct gro_list gro_hash[8];
|
|
+ struct sk_buff___2 *skb;
|
|
+ struct hrtimer timer;
|
|
+ struct list_head dev_list;
|
|
+ struct hlist_node napi_hash_node;
|
|
+ unsigned int napi_id;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+struct netdev_queue___2 {
|
|
+ struct net_device___2 *dev;
|
|
+ struct Qdisc___2 *qdisc;
|
|
+ struct Qdisc___2 *qdisc_sleeping;
|
|
+ struct kobject___3 kobj;
|
|
+ int numa_node;
|
|
+ long unsigned int tx_maxrate;
|
|
+ long unsigned int trans_timeout;
|
|
+ struct net_device___2 *sb_dev;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ spinlock_t _xmit_lock;
|
|
+ int xmit_lock_owner;
|
|
+ long unsigned int trans_start;
|
|
+ long unsigned int state;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct dql dql;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct qdisc_skb_head___2 {
|
|
+ struct sk_buff___2 *head;
|
|
+ struct sk_buff___2 *tail;
|
|
+ union {
|
|
+ u32 qlen;
|
|
+ atomic_t atomic_qlen;
|
|
+ };
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct Qdisc_ops___2;
|
|
+
|
|
+struct Qdisc___2 {
|
|
+ int (*enqueue)(struct sk_buff___2 *, struct Qdisc___2 *, struct sk_buff___2 **);
|
|
+ struct sk_buff___2 * (*dequeue)(struct Qdisc___2 *);
|
|
+ unsigned int flags;
|
|
+ u32 limit;
|
|
+ const struct Qdisc_ops___2 *ops;
|
|
+ struct qdisc_size_table *stab;
|
|
+ struct hlist_node hash;
|
|
+ u32 handle;
|
|
+ u32 parent;
|
|
+ struct netdev_queue___2 *dev_queue;
|
|
+ struct net_rate_estimator *rate_est;
|
|
+ struct gnet_stats_basic_cpu *cpu_bstats;
|
|
+ struct gnet_stats_queue *cpu_qstats;
|
|
+ int padded;
|
|
+ refcount_t refcnt;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct sk_buff_head___2 gso_skb;
|
|
+ struct qdisc_skb_head___2 q;
|
|
+ struct gnet_stats_basic_packed bstats;
|
|
+ seqcount_t running;
|
|
+ struct gnet_stats_queue qstats;
|
|
+ long unsigned int state;
|
|
+ struct Qdisc___2 *next_sched;
|
|
+ struct sk_buff_head___2 skb_bad_txq;
|
|
+ spinlock_t busylock;
|
|
+ spinlock_t seqlock;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct netdev_rx_queue___2 {
|
|
+ struct rps_map *rps_map;
|
|
+ struct rps_dev_flow_table *rps_flow_table;
|
|
+ struct kobject___3 kobj;
|
|
+ struct net_device___2 *dev;
|
|
+ long: 64;
|
|
+ struct xdp_rxq_info___2 xdp_rxq;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+typedef u16 (*select_queue_fallback_t___2)(struct net_device___2 *, struct sk_buff___2 *, struct net_device___2 *);
|
|
+
|
|
+struct netdev_bpf___2 {
|
|
+ enum bpf_netdev_command command;
|
|
+ union {
|
|
+ struct {
|
|
+ u32 flags;
|
|
+ struct bpf_prog___2 *prog;
|
|
+ struct netlink_ext_ack *extack;
|
|
+ };
|
|
+ struct {
|
|
+ u32 prog_id;
|
|
+ u32 prog_flags;
|
|
+ };
|
|
+ struct {
|
|
+ struct bpf_prog___2 *prog;
|
|
+ const struct bpf_prog_offload_ops *ops;
|
|
+ } verifier;
|
|
+ struct {
|
|
+ struct bpf_prog___2 *prog;
|
|
+ } offload;
|
|
+ struct {
|
|
+ struct bpf_offloaded_map___2 *offmap;
|
|
+ };
|
|
+ struct {
|
|
+ struct xdp_umem *umem;
|
|
+ u16 queue_id;
|
|
+ } xsk;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct xfrmdev_ops___2 {
|
|
+ int (*xdo_dev_state_add)(struct xfrm_state *);
|
|
+ void (*xdo_dev_state_delete)(struct xfrm_state *);
|
|
+ void (*xdo_dev_state_free)(struct xfrm_state *);
|
|
+ bool (*xdo_dev_offload_ok)(struct sk_buff___2 *, struct xfrm_state *);
|
|
+ void (*xdo_dev_state_advance_esn)(struct xfrm_state *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+struct tlsdev_ops___2 {
|
|
+ int (*tls_dev_add)(struct net_device___2 *, struct sock *, enum tls_offload_ctx_dir, struct tls_crypto_info *, u32);
|
|
+ void (*tls_dev_del)(struct net_device___2 *, struct tls_context *, enum tls_offload_ctx_dir);
|
|
+ void (*tls_dev_resync_rx)(struct net_device___2 *, struct sock *, u32, u64);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+};
|
|
+
|
|
+struct net_device_ops___2 {
|
|
+ int (*ndo_init)(struct net_device___2 *);
|
|
+ void (*ndo_uninit)(struct net_device___2 *);
|
|
+ int (*ndo_open)(struct net_device___2 *);
|
|
+ int (*ndo_stop)(struct net_device___2 *);
|
|
+ netdev_tx_t (*ndo_start_xmit)(struct sk_buff___2 *, struct net_device___2 *);
|
|
+ netdev_features_t (*ndo_features_check)(struct sk_buff___2 *, struct net_device___2 *, netdev_features_t);
|
|
+ u16 (*ndo_select_queue)(struct net_device___2 *, struct sk_buff___2 *, struct net_device___2 *, select_queue_fallback_t___2);
|
|
+ void (*ndo_change_rx_flags)(struct net_device___2 *, int);
|
|
+ void (*ndo_set_rx_mode)(struct net_device___2 *);
|
|
+ int (*ndo_set_mac_address)(struct net_device___2 *, void *);
|
|
+ int (*ndo_validate_addr)(struct net_device___2 *);
|
|
+ int (*ndo_do_ioctl)(struct net_device___2 *, struct ifreq *, int);
|
|
+ int (*ndo_set_config)(struct net_device___2 *, struct ifmap *);
|
|
+ int (*ndo_change_mtu)(struct net_device___2 *, int);
|
|
+ int (*ndo_neigh_setup)(struct net_device___2 *, struct neigh_parms *);
|
|
+ void (*ndo_tx_timeout)(struct net_device___2 *);
|
|
+ void (*ndo_get_stats64)(struct net_device___2 *, struct rtnl_link_stats64 *);
|
|
+ bool (*ndo_has_offload_stats)(const struct net_device___2 *, int);
|
|
+ int (*ndo_get_offload_stats)(int, const struct net_device___2 *, void *);
|
|
+ struct net_device_stats * (*ndo_get_stats)(struct net_device___2 *);
|
|
+ int (*ndo_vlan_rx_add_vid)(struct net_device___2 *, __be16, u16);
|
|
+ int (*ndo_vlan_rx_kill_vid)(struct net_device___2 *, __be16, u16);
|
|
+ void (*ndo_poll_controller)(struct net_device___2 *);
|
|
+ int (*ndo_netpoll_setup)(struct net_device___2 *, struct netpoll_info *);
|
|
+ void (*ndo_netpoll_cleanup)(struct net_device___2 *);
|
|
+ int (*ndo_set_vf_mac)(struct net_device___2 *, int, u8 *);
|
|
+ int (*ndo_set_vf_vlan)(struct net_device___2 *, int, u16, u8, __be16);
|
|
+ int (*ndo_set_vf_rate)(struct net_device___2 *, int, int, int);
|
|
+ int (*ndo_set_vf_spoofchk)(struct net_device___2 *, int, bool);
|
|
+ int (*ndo_set_vf_trust)(struct net_device___2 *, int, bool);
|
|
+ int (*ndo_get_vf_config)(struct net_device___2 *, int, struct ifla_vf_info *);
|
|
+ int (*ndo_set_vf_link_state)(struct net_device___2 *, int, int);
|
|
+ int (*ndo_get_vf_stats)(struct net_device___2 *, int, struct ifla_vf_stats *);
|
|
+ int (*ndo_set_vf_port)(struct net_device___2 *, int, struct nlattr **);
|
|
+ int (*ndo_get_vf_port)(struct net_device___2 *, int, struct sk_buff___2 *);
|
|
+ int (*ndo_set_vf_guid)(struct net_device___2 *, int, u64, int);
|
|
+ int (*ndo_set_vf_rss_query_en)(struct net_device___2 *, int, bool);
|
|
+ int (*ndo_setup_tc)(struct net_device___2 *, enum tc_setup_type, void *);
|
|
+ int (*ndo_fcoe_enable)(struct net_device___2 *);
|
|
+ int (*ndo_fcoe_disable)(struct net_device___2 *);
|
|
+ int (*ndo_fcoe_ddp_setup)(struct net_device___2 *, u16, struct scatterlist *, unsigned int);
|
|
+ int (*ndo_fcoe_ddp_done)(struct net_device___2 *, u16);
|
|
+ int (*ndo_fcoe_ddp_target)(struct net_device___2 *, u16, struct scatterlist *, unsigned int);
|
|
+ int (*ndo_fcoe_get_hbainfo)(struct net_device___2 *, struct netdev_fcoe_hbainfo *);
|
|
+ int (*ndo_fcoe_get_wwn)(struct net_device___2 *, u64 *, int);
|
|
+ int (*ndo_rx_flow_steer)(struct net_device___2 *, const struct sk_buff___2 *, u16, u32);
|
|
+ int (*ndo_add_slave)(struct net_device___2 *, struct net_device___2 *, struct netlink_ext_ack *);
|
|
+ int (*ndo_del_slave)(struct net_device___2 *, struct net_device___2 *);
|
|
+ netdev_features_t (*ndo_fix_features)(struct net_device___2 *, netdev_features_t);
|
|
+ int (*ndo_set_features)(struct net_device___2 *, netdev_features_t);
|
|
+ int (*ndo_neigh_construct)(struct net_device___2 *, struct neighbour *);
|
|
+ void (*ndo_neigh_destroy)(struct net_device___2 *, struct neighbour *);
|
|
+ int (*ndo_fdb_add)(struct ndmsg *, struct nlattr **, struct net_device___2 *, const unsigned char *, u16, u16);
|
|
+ int (*ndo_fdb_del)(struct ndmsg *, struct nlattr **, struct net_device___2 *, const unsigned char *, u16);
|
|
+ int (*ndo_fdb_dump)(struct sk_buff___2 *, struct netlink_callback___2 *, struct net_device___2 *, struct net_device___2 *, int *);
|
|
+ int (*ndo_bridge_setlink)(struct net_device___2 *, struct nlmsghdr *, u16);
|
|
+ int (*ndo_bridge_getlink)(struct sk_buff___2 *, u32, u32, struct net_device___2 *, u32, int);
|
|
+ int (*ndo_bridge_dellink)(struct net_device___2 *, struct nlmsghdr *, u16);
|
|
+ int (*ndo_change_carrier)(struct net_device___2 *, bool);
|
|
+ int (*ndo_get_phys_port_id)(struct net_device___2 *, struct netdev_phys_item_id *);
|
|
+ int (*ndo_get_phys_port_name)(struct net_device___2 *, char *, size_t);
|
|
+ void (*ndo_udp_tunnel_add)(struct net_device___2 *, struct udp_tunnel_info *);
|
|
+ void (*ndo_udp_tunnel_del)(struct net_device___2 *, struct udp_tunnel_info *);
|
|
+ void * (*ndo_dfwd_add_station)(struct net_device___2 *, struct net_device___2 *);
|
|
+ void (*ndo_dfwd_del_station)(struct net_device___2 *, void *);
|
|
+ int (*ndo_get_lock_subclass)(struct net_device___2 *);
|
|
+ int (*ndo_set_tx_maxrate)(struct net_device___2 *, int, u32);
|
|
+ int (*ndo_get_iflink)(const struct net_device___2 *);
|
|
+ int (*ndo_change_proto_down)(struct net_device___2 *, bool);
|
|
+ int (*ndo_fill_metadata_dst)(struct net_device___2 *, struct sk_buff___2 *);
|
|
+ void (*ndo_set_rx_headroom)(struct net_device___2 *, int);
|
|
+ int (*ndo_bpf)(struct net_device___2 *, struct netdev_bpf___2 *);
|
|
+ int (*ndo_xdp_xmit)(struct net_device___2 *, int, struct xdp_frame___2 **, u32);
|
|
+ int (*ndo_xsk_async_xmit)(struct net_device___2 *, u32);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+ long unsigned int kabi_reserved16;
|
|
+ long unsigned int kabi_reserved17;
|
|
+ long unsigned int kabi_reserved18;
|
|
+ long unsigned int kabi_reserved19;
|
|
+ long unsigned int kabi_reserved20;
|
|
+ long unsigned int kabi_reserved21;
|
|
+ long unsigned int kabi_reserved22;
|
|
+ long unsigned int kabi_reserved23;
|
|
+ long unsigned int kabi_reserved24;
|
|
+ long unsigned int kabi_reserved25;
|
|
+ long unsigned int kabi_reserved26;
|
|
+ long unsigned int kabi_reserved27;
|
|
+ long unsigned int kabi_reserved28;
|
|
+ long unsigned int kabi_reserved29;
|
|
+ long unsigned int kabi_reserved30;
|
|
+ long unsigned int kabi_reserved31;
|
|
+ long unsigned int kabi_reserved32;
|
|
+ long unsigned int kabi_reserved33;
|
|
+ long unsigned int kabi_reserved34;
|
|
+ long unsigned int kabi_reserved35;
|
|
+ long unsigned int kabi_reserved36;
|
|
+ long unsigned int kabi_reserved37;
|
|
+ long unsigned int kabi_reserved38;
|
|
+ long unsigned int kabi_reserved39;
|
|
+ long unsigned int kabi_reserved40;
|
|
+ long unsigned int kabi_reserved41;
|
|
+ long unsigned int kabi_reserved42;
|
|
+ long unsigned int kabi_reserved43;
|
|
+ long unsigned int kabi_reserved44;
|
|
+ long unsigned int kabi_reserved45;
|
|
+ long unsigned int kabi_reserved46;
|
|
+ long unsigned int kabi_reserved47;
|
|
+};
|
|
+
|
|
+struct tcf_proto___2;
|
|
+
|
|
+struct mini_Qdisc___2 {
|
|
+ struct tcf_proto___2 *filter_list;
|
|
+ struct gnet_stats_basic_cpu *cpu_bstats;
|
|
+ struct gnet_stats_queue *cpu_qstats;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct rtnl_link_ops___2 {
|
|
+ struct list_head list;
|
|
+ const char *kind;
|
|
+ size_t priv_size;
|
|
+ void (*setup)(struct net_device___2 *);
|
|
+ unsigned int maxtype;
|
|
+ const struct nla_policy *policy;
|
|
+ int (*validate)(struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
|
|
+ int (*newlink)(struct net___2 *, struct net_device___2 *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
|
|
+ int (*changelink)(struct net_device___2 *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
|
|
+ void (*dellink)(struct net_device___2 *, struct list_head *);
|
|
+ size_t (*get_size)(const struct net_device___2 *);
|
|
+ int (*fill_info)(struct sk_buff___2 *, const struct net_device___2 *);
|
|
+ size_t (*get_xstats_size)(const struct net_device___2 *);
|
|
+ int (*fill_xstats)(struct sk_buff___2 *, const struct net_device___2 *);
|
|
+ unsigned int (*get_num_tx_queues)();
|
|
+ unsigned int (*get_num_rx_queues)();
|
|
+ unsigned int slave_maxtype;
|
|
+ const struct nla_policy *slave_policy;
|
|
+ int (*slave_changelink)(struct net_device___2 *, struct net_device___2 *, struct nlattr **, struct nlattr **, struct netlink_ext_ack *);
|
|
+ size_t (*get_slave_size)(const struct net_device___2 *, const struct net_device___2 *);
|
|
+ int (*fill_slave_info)(struct sk_buff___2 *, const struct net_device___2 *, const struct net_device___2 *);
|
|
+ struct net___2 * (*get_link_net)(const struct net_device___2 *);
|
|
+ size_t (*get_linkxstats_size)(const struct net_device___2 *, int);
|
|
+ int (*fill_linkxstats)(struct sk_buff___2 *, const struct net_device___2 *, int *, int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+ long unsigned int kabi_reserved9;
|
|
+ long unsigned int kabi_reserved10;
|
|
+ long unsigned int kabi_reserved11;
|
|
+ long unsigned int kabi_reserved12;
|
|
+ long unsigned int kabi_reserved13;
|
|
+ long unsigned int kabi_reserved14;
|
|
+ long unsigned int kabi_reserved15;
|
|
+};
|
|
+
|
|
+struct softnet_data___2 {
|
|
+ struct list_head poll_list;
|
|
+ struct sk_buff_head___2 process_queue;
|
|
+ unsigned int processed;
|
|
+ unsigned int time_squeeze;
|
|
+ unsigned int received_rps;
|
|
+ struct softnet_data___2 *rps_ipi_list;
|
|
+ struct sd_flow_limit *flow_limit;
|
|
+ struct Qdisc___2 *output_queue;
|
|
+ struct Qdisc___2 **output_queue_tailp;
|
|
+ struct sk_buff___2 *completion_queue;
|
|
+ struct sk_buff_head___2 xfrm_backlog;
|
|
+ struct {
|
|
+ u16 recursion;
|
|
+ u8 more;
|
|
+ } xmit;
|
|
+ int: 32;
|
|
+ unsigned int input_queue_head;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ call_single_data_t csd;
|
|
+ struct softnet_data___2 *rps_ipi_next;
|
|
+ unsigned int cpu;
|
|
+ unsigned int input_queue_tail;
|
|
+ unsigned int dropped;
|
|
+ struct sk_buff_head___2 input_pkt_queue;
|
|
+ struct napi_struct___2 backlog;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct gnet_dump___2 {
|
|
+ spinlock_t *lock;
|
|
+ struct sk_buff___2 *skb;
|
|
+ struct nlattr *tail;
|
|
+ int compat_tc_stats;
|
|
+ int compat_xstats;
|
|
+ int padattr;
|
|
+ void *xstats;
|
|
+ int xstats_len;
|
|
+ struct tc_stats tc_stats;
|
|
+};
|
|
+
|
|
+struct Qdisc_class_ops___2;
|
|
+
|
|
+struct Qdisc_ops___2 {
|
|
+ struct Qdisc_ops___2 *next;
|
|
+ const struct Qdisc_class_ops___2 *cl_ops;
|
|
+ char id[16];
|
|
+ int priv_size;
|
|
+ unsigned int static_flags;
|
|
+ int (*enqueue)(struct sk_buff___2 *, struct Qdisc___2 *, struct sk_buff___2 **);
|
|
+ struct sk_buff___2 * (*dequeue)(struct Qdisc___2 *);
|
|
+ struct sk_buff___2 * (*peek)(struct Qdisc___2 *);
|
|
+ int (*init)(struct Qdisc___2 *, struct nlattr *, struct netlink_ext_ack *);
|
|
+ void (*reset)(struct Qdisc___2 *);
|
|
+ void (*destroy)(struct Qdisc___2 *);
|
|
+ int (*change)(struct Qdisc___2 *, struct nlattr *, struct netlink_ext_ack *);
|
|
+ void (*attach)(struct Qdisc___2 *);
|
|
+ int (*change_tx_queue_len)(struct Qdisc___2 *, unsigned int);
|
|
+ int (*dump)(struct Qdisc___2 *, struct sk_buff___2 *);
|
|
+ int (*dump_stats)(struct Qdisc___2 *, struct gnet_dump___2 *);
|
|
+ void (*ingress_block_set)(struct Qdisc___2 *, u32);
|
|
+ void (*egress_block_set)(struct Qdisc___2 *, u32);
|
|
+ u32 (*ingress_block_get)(struct Qdisc___2 *);
|
|
+ u32 (*egress_block_get)(struct Qdisc___2 *);
|
|
+ struct module___2 *owner;
|
|
+};
|
|
+
|
|
+struct tcf_block___2;
|
|
+
|
|
+struct Qdisc_class_ops___2 {
|
|
+ struct netdev_queue___2 * (*select_queue)(struct Qdisc___2 *, struct tcmsg *);
|
|
+ int (*graft)(struct Qdisc___2 *, long unsigned int, struct Qdisc___2 *, struct Qdisc___2 **, struct netlink_ext_ack *);
|
|
+ struct Qdisc___2 * (*leaf)(struct Qdisc___2 *, long unsigned int);
|
|
+ void (*qlen_notify)(struct Qdisc___2 *, long unsigned int);
|
|
+ long unsigned int (*find)(struct Qdisc___2 *, u32);
|
|
+ int (*change)(struct Qdisc___2 *, u32, u32, struct nlattr **, long unsigned int *, struct netlink_ext_ack *);
|
|
+ int (*delete)(struct Qdisc___2 *, long unsigned int);
|
|
+ void (*walk)(struct Qdisc___2 *, struct qdisc_walker *);
|
|
+ struct tcf_block___2 * (*tcf_block)(struct Qdisc___2 *, long unsigned int, struct netlink_ext_ack *);
|
|
+ long unsigned int (*bind_tcf)(struct Qdisc___2 *, long unsigned int, u32);
|
|
+ void (*unbind_tcf)(struct Qdisc___2 *, long unsigned int);
|
|
+ int (*dump)(struct Qdisc___2 *, long unsigned int, struct sk_buff___2 *, struct tcmsg *);
|
|
+ int (*dump_stats)(struct Qdisc___2 *, long unsigned int, struct gnet_dump___2 *);
|
|
+};
|
|
+
|
|
+struct tcf_chain___2;
|
|
+
|
|
+struct tcf_block___2 {
|
|
+ struct list_head chain_list;
|
|
+ u32 index;
|
|
+ unsigned int refcnt;
|
|
+ struct net___2 *net;
|
|
+ struct Qdisc___2 *q;
|
|
+ struct list_head cb_list;
|
|
+ struct list_head owner_list;
|
|
+ bool keep_dst;
|
|
+ unsigned int offloadcnt;
|
|
+ unsigned int nooffloaddevcnt;
|
|
+ struct {
|
|
+ struct tcf_chain___2 *chain;
|
|
+ struct list_head filter_chain_list;
|
|
+ } chain0;
|
|
+};
|
|
+
|
|
+struct tcf_result___2;
|
|
+
|
|
+struct tcf_proto_ops___2;
|
|
+
|
|
+struct tcf_proto___2 {
|
|
+ struct tcf_proto___2 *next;
|
|
+ void *root;
|
|
+ int (*classify)(struct sk_buff___2 *, const struct tcf_proto___2 *, struct tcf_result___2 *);
|
|
+ __be16 protocol;
|
|
+ u32 prio;
|
|
+ void *data;
|
|
+ const struct tcf_proto_ops___2 *ops;
|
|
+ struct tcf_chain___2 *chain;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct tcf_result___2 {
|
|
+ union {
|
|
+ struct {
|
|
+ long unsigned int class;
|
|
+ u32 classid;
|
|
+ };
|
|
+ const struct tcf_proto___2 *goto_tp;
|
|
+ struct {
|
|
+ bool ingress;
|
|
+ struct gnet_stats_queue *qstats;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct tcf_proto_ops___2 {
|
|
+ struct list_head head;
|
|
+ char kind[16];
|
|
+ int (*classify)(struct sk_buff___2 *, const struct tcf_proto___2 *, struct tcf_result___2 *);
|
|
+ int (*init)(struct tcf_proto___2 *);
|
|
+ void (*destroy)(struct tcf_proto___2 *, struct netlink_ext_ack *);
|
|
+ void * (*get)(struct tcf_proto___2 *, u32);
|
|
+ int (*change)(struct net___2 *, struct sk_buff___2 *, struct tcf_proto___2 *, long unsigned int, u32, struct nlattr **, void **, bool, struct netlink_ext_ack *);
|
|
+ int (*delete)(struct tcf_proto___2 *, void *, bool *, struct netlink_ext_ack *);
|
|
+ void (*walk)(struct tcf_proto___2 *, struct tcf_walker *);
|
|
+ int (*reoffload)(struct tcf_proto___2 *, bool, tc_setup_cb_t *, void *, struct netlink_ext_ack *);
|
|
+ void (*bind_class)(void *, u32, long unsigned int);
|
|
+ void * (*tmplt_create)(struct net___2 *, struct tcf_chain___2 *, struct nlattr **, struct netlink_ext_ack *);
|
|
+ void (*tmplt_destroy)(void *);
|
|
+ int (*dump)(struct net___2 *, struct tcf_proto___2 *, void *, struct sk_buff___2 *, struct tcmsg *);
|
|
+ int (*tmplt_dump)(struct sk_buff___2 *, struct net___2 *, void *);
|
|
+ struct module___2 *owner;
|
|
+};
|
|
+
|
|
+struct tcf_chain___2 {
|
|
+ struct tcf_proto___2 *filter_chain;
|
|
+ struct list_head list;
|
|
+ struct tcf_block___2 *block;
|
|
+ u32 index;
|
|
+ unsigned int refcnt;
|
|
+ unsigned int action_refcnt;
|
|
+ bool explicitly_created;
|
|
+ const struct tcf_proto_ops___2 *tmplt_ops;
|
|
+ void *tmplt_priv;
|
|
+};
|
|
+
|
|
+struct bpf_redirect_info___2 {
|
|
+ u32 ifindex;
|
|
+ u32 flags;
|
|
+ struct bpf_map___2 *map;
|
|
+ struct bpf_map___2 *map_to_flush;
|
|
+ u32 kern_flags;
|
|
+};
|
|
+
|
|
+typedef int (*remote_function_f)(void *);
|
|
+
|
|
+struct remote_function_call {
|
|
+ struct task_struct___2 *p;
|
|
+ remote_function_f func;
|
|
+ void *info;
|
|
+ int ret;
|
|
+};
|
|
+
|
|
+typedef void (*event_f)(struct perf_event___2 *, struct perf_cpu_context___2 *, struct perf_event_context___2 *, void *);
|
|
+
|
|
+struct event_function_struct {
|
|
+ struct perf_event___2 *event;
|
|
+ event_f func;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+enum event_type_t {
|
|
+ EVENT_FLEXIBLE = 1,
|
|
+ EVENT_PINNED = 2,
|
|
+ EVENT_TIME = 4,
|
|
+ EVENT_CPU = 8,
|
|
+ EVENT_ALL = 3,
|
|
+};
|
|
+
|
|
+struct stop_event_data {
|
|
+ struct perf_event___2 *event;
|
|
+ unsigned int restart;
|
|
+};
|
|
+
|
|
+struct sched_in_data {
|
|
+ struct perf_event_context___2 *ctx;
|
|
+ struct perf_cpu_context___2 *cpuctx;
|
|
+ int can_add_hw;
|
|
+};
|
|
+
|
|
+struct perf_read_data {
|
|
+ struct perf_event___2 *event;
|
|
+ bool group;
|
|
+ int ret;
|
|
+};
|
|
+
|
|
+struct perf_read_event {
|
|
+ struct perf_event_header header;
|
|
+ u32 pid;
|
|
+ u32 tid;
|
|
+};
|
|
+
|
|
+typedef void perf_iterate_f(struct perf_event___2 *, void *);
|
|
+
|
|
+struct remote_output {
|
|
+ struct ring_buffer___2 *rb;
|
|
+ int err;
|
|
+};
|
|
+
|
|
+struct perf_task_event {
|
|
+ struct task_struct___2 *task;
|
|
+ struct perf_event_context___2 *task_ctx;
|
|
+ struct {
|
|
+ struct perf_event_header header;
|
|
+ u32 pid;
|
|
+ u32 ppid;
|
|
+ u32 tid;
|
|
+ u32 ptid;
|
|
+ u64 time;
|
|
+ } event_id;
|
|
+};
|
|
+
|
|
+struct perf_comm_event {
|
|
+ struct task_struct___2 *task;
|
|
+ char *comm;
|
|
+ int comm_size;
|
|
+ struct {
|
|
+ struct perf_event_header header;
|
|
+ u32 pid;
|
|
+ u32 tid;
|
|
+ } event_id;
|
|
+};
|
|
+
|
|
+struct perf_namespaces_event {
|
|
+ struct task_struct___2 *task;
|
|
+ struct {
|
|
+ struct perf_event_header header;
|
|
+ u32 pid;
|
|
+ u32 tid;
|
|
+ u64 nr_namespaces;
|
|
+ struct perf_ns_link_info link_info[7];
|
|
+ } event_id;
|
|
+};
|
|
+
|
|
+struct perf_mmap_event {
|
|
+ struct vm_area_struct___2 *vma;
|
|
+ const char *file_name;
|
|
+ int file_size;
|
|
+ int maj;
|
|
+ int min;
|
|
+ u64 ino;
|
|
+ u64 ino_generation;
|
|
+ u32 prot;
|
|
+ u32 flags;
|
|
+ struct {
|
|
+ struct perf_event_header header;
|
|
+ u32 pid;
|
|
+ u32 tid;
|
|
+ u64 start;
|
|
+ u64 len;
|
|
+ u64 pgoff;
|
|
+ } event_id;
|
|
+};
|
|
+
|
|
+struct perf_switch_event {
|
|
+ struct task_struct___2 *task;
|
|
+ struct task_struct___2 *next_prev;
|
|
+ struct {
|
|
+ struct perf_event_header header;
|
|
+ u32 next_prev_pid;
|
|
+ u32 next_prev_tid;
|
|
+ } event_id;
|
|
+};
|
|
+
|
|
+struct swevent_htable {
|
|
+ struct swevent_hlist *swevent_hlist;
|
|
+ struct mutex hlist_mutex;
|
|
+ int hlist_refcount;
|
|
+ int recursion[4];
|
|
+};
|
|
+
|
|
+enum perf_probe_config {
|
|
+ PERF_PROBE_CONFIG_IS_RETPROBE = 1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IF_ACT_NONE = -1,
|
|
+ IF_ACT_FILTER = 0,
|
|
+ IF_ACT_START = 1,
|
|
+ IF_ACT_STOP = 2,
|
|
+ IF_SRC_FILE = 3,
|
|
+ IF_SRC_KERNEL = 4,
|
|
+ IF_SRC_FILEADDR = 5,
|
|
+ IF_SRC_KERNELADDR = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IF_STATE_ACTION = 0,
|
|
+ IF_STATE_SOURCE = 1,
|
|
+ IF_STATE_END = 2,
|
|
+};
|
|
+
|
|
+struct perf_aux_event {
|
|
+ struct perf_event_header header;
|
|
+ u32 pid;
|
|
+ u32 tid;
|
|
+};
|
|
+
|
|
+struct perf_aux_event___2 {
|
|
+ struct perf_event_header header;
|
|
+ u64 offset;
|
|
+ u64 size;
|
|
+ u64 flags;
|
|
+};
|
|
+
|
|
+enum perf_callchain_context {
|
|
+ PERF_CONTEXT_HV = -32,
|
|
+ PERF_CONTEXT_KERNEL = -128,
|
|
+ PERF_CONTEXT_USER = -512,
|
|
+ PERF_CONTEXT_GUEST = -2048,
|
|
+ PERF_CONTEXT_GUEST_KERNEL = -2176,
|
|
+ PERF_CONTEXT_GUEST_USER = -2560,
|
|
+ PERF_CONTEXT_MAX = -4095,
|
|
+};
|
|
+
|
|
+struct callchain_cpus_entries {
|
|
+ struct callback_head callback_head;
|
|
+ struct perf_callchain_entry *cpu_entries[0];
|
|
+};
|
|
+
|
|
+enum bp_type_idx {
|
|
+ TYPE_INST = 0,
|
|
+ TYPE_DATA = 0,
|
|
+ TYPE_MAX = 1,
|
|
+};
|
|
+
|
|
+struct bp_cpuinfo {
|
|
+ unsigned int cpu_pinned;
|
|
+ unsigned int *tsk_pinned;
|
|
+ unsigned int flexible;
|
|
+};
|
|
+
|
|
+struct bp_busy_slots {
|
|
+ unsigned int pinned;
|
|
+ unsigned int flexible;
|
|
+};
|
|
+
|
|
+typedef u8 uprobe_opcode_t;
|
|
+
|
|
+struct uprobe {
|
|
+ struct rb_node rb_node;
|
|
+ atomic_t ref;
|
|
+ struct rw_semaphore register_rwsem;
|
|
+ struct rw_semaphore consumer_rwsem;
|
|
+ struct list_head pending_list;
|
|
+ struct uprobe_consumer *consumers;
|
|
+ struct inode___2 *inode;
|
|
+ loff_t offset;
|
|
+ long unsigned int flags;
|
|
+ struct arch_uprobe arch;
|
|
+};
|
|
+
|
|
+struct xol_area {
|
|
+ wait_queue_head_t wq;
|
|
+ atomic_t slot_count;
|
|
+ long unsigned int *bitmap;
|
|
+ struct vm_special_mapping xol_mapping;
|
|
+ struct page *pages[2];
|
|
+ long unsigned int vaddr;
|
|
+};
|
|
+
|
|
+typedef long unsigned int vm_flags_t;
|
|
+
|
|
+typedef int filler_t(void *, struct page *);
|
|
+
|
|
+struct page_vma_mapped_walk {
|
|
+ struct page *page;
|
|
+ struct vm_area_struct *vma;
|
|
+ long unsigned int address;
|
|
+ pmd_t *pmd;
|
|
+ pte_t *pte;
|
|
+ spinlock_t *ptl;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct map_info {
|
|
+ struct map_info *next;
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int vaddr;
|
|
+};
|
|
+
|
|
+struct user_return_notifier {
|
|
+ void (*on_user_return)(struct user_return_notifier *);
|
|
+ struct hlist_node link;
|
|
+};
|
|
+
|
|
+struct parallel_data;
|
|
+
|
|
+struct padata_priv {
|
|
+ struct list_head list;
|
|
+ struct parallel_data *pd;
|
|
+ int cb_cpu;
|
|
+ int cpu;
|
|
+ int info;
|
|
+ void (*parallel)(struct padata_priv *);
|
|
+ void (*serial)(struct padata_priv *);
|
|
+};
|
|
+
|
|
+struct padata_cpumask {
|
|
+ cpumask_var_t pcpu;
|
|
+ cpumask_var_t cbcpu;
|
|
+};
|
|
+
|
|
+struct padata_instance;
|
|
+
|
|
+struct padata_parallel_queue;
|
|
+
|
|
+struct padata_serial_queue;
|
|
+
|
|
+struct parallel_data {
|
|
+ struct padata_instance *pinst;
|
|
+ struct padata_parallel_queue *pqueue;
|
|
+ struct padata_serial_queue *squeue;
|
|
+ atomic_t reorder_objects;
|
|
+ atomic_t refcnt;
|
|
+ atomic_t seq_nr;
|
|
+ int cpu;
|
|
+ struct padata_cpumask cpumask;
|
|
+ struct work_struct reorder_work;
|
|
+ long: 64;
|
|
+ spinlock_t lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct padata_list {
|
|
+ struct list_head list;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct padata_serial_queue {
|
|
+ struct padata_list serial;
|
|
+ struct work_struct work;
|
|
+ struct parallel_data *pd;
|
|
+};
|
|
+
|
|
+struct padata_parallel_queue {
|
|
+ struct padata_list parallel;
|
|
+ struct padata_list reorder;
|
|
+ struct work_struct work;
|
|
+ atomic_t num_obj;
|
|
+ int cpu_index;
|
|
+};
|
|
+
|
|
+struct padata_instance {
|
|
+ struct hlist_node node;
|
|
+ struct workqueue_struct *wq;
|
|
+ struct parallel_data *pd;
|
|
+ struct padata_cpumask cpumask;
|
|
+ struct blocking_notifier_head___2 cpumask_change_notifier;
|
|
+ struct kobject___3 kobj;
|
|
+ struct mutex lock;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+struct padata_sysfs_entry {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
|
|
+ ssize_t (*store)(struct padata_instance *, struct attribute *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct static_key_mod {
|
|
+ struct static_key_mod *next;
|
|
+ struct jump_entry *entries;
|
|
+ struct module___2 *mod;
|
|
+};
|
|
+
|
|
+struct static_key_deferred {
|
|
+ struct static_key key;
|
|
+ long unsigned int timeout;
|
|
+ struct delayed_work work;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_context_tracking_user {
|
|
+ struct trace_entry ent;
|
|
+ int dummy;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_context_tracking_user {};
|
|
+
|
|
+typedef enum ctx_state pto_T_____26;
|
|
+
|
|
+enum rseq_cpu_id_state {
|
|
+ RSEQ_CPU_ID_UNINITIALIZED = -1,
|
|
+ RSEQ_CPU_ID_REGISTRATION_FAILED = -2,
|
|
+};
|
|
+
|
|
+enum rseq_flags {
|
|
+ RSEQ_FLAG_UNREGISTER = 1,
|
|
+};
|
|
+
|
|
+enum rseq_cs_flags {
|
|
+ RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT = 1,
|
|
+ RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL = 2,
|
|
+ RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE = 4,
|
|
+};
|
|
+
|
|
+struct rseq_cs {
|
|
+ __u32 version;
|
|
+ __u32 flags;
|
|
+ __u64 start_ip;
|
|
+ __u64 post_commit_offset;
|
|
+ __u64 abort_ip;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rseq_update {
|
|
+ struct trace_entry ent;
|
|
+ s32 cpu_id;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rseq_ip_fixup {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int regs_ip;
|
|
+ long unsigned int start_ip;
|
|
+ long unsigned int post_commit_offset;
|
|
+ long unsigned int abort_ip;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_rseq_update {};
|
|
+
|
|
+struct trace_event_data_offsets_rseq_ip_fixup {};
|
|
+
|
|
+struct __key_reference_with_attributes;
|
|
+
|
|
+typedef struct __key_reference_with_attributes *key_ref_t;
|
|
+
|
|
+typedef int (*request_key_actor_t___2)(struct key___2 *, void *);
|
|
+
|
|
+struct pkcs7_message;
|
|
+
|
|
+struct radix_tree_iter {
|
|
+ long unsigned int index;
|
|
+ long unsigned int next_index;
|
|
+ long unsigned int tags;
|
|
+ struct radix_tree_node *node;
|
|
+ unsigned int shift;
|
|
+};
|
|
+
|
|
+typedef void (*radix_tree_update_node_t)(struct radix_tree_node *);
|
|
+
|
|
+enum {
|
|
+ RADIX_TREE_ITER_TAG_MASK = 15,
|
|
+ RADIX_TREE_ITER_TAGGED = 16,
|
|
+ RADIX_TREE_ITER_CONTIG = 32,
|
|
+};
|
|
+
|
|
+enum positive_aop_returns {
|
|
+ AOP_WRITEPAGE_ACTIVATE = 524288,
|
|
+ AOP_TRUNCATED_PAGE = 524289,
|
|
+};
|
|
+
|
|
+enum mapping_flags {
|
|
+ AS_EIO = 0,
|
|
+ AS_ENOSPC = 1,
|
|
+ AS_MM_ALL_LOCKS = 2,
|
|
+ AS_UNEVICTABLE = 3,
|
|
+ AS_EXITING = 4,
|
|
+ AS_NO_WRITEBACK_TAGS = 5,
|
|
+ AS_PERCPU_REF = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ITER_IOVEC = 0,
|
|
+ ITER_KVEC = 2,
|
|
+ ITER_BVEC = 4,
|
|
+ ITER_PIPE = 8,
|
|
+};
|
|
+
|
|
+struct pagevec {
|
|
+ unsigned char nr;
|
|
+ bool percpu_pvec_drained;
|
|
+ struct page *pages[15];
|
|
+};
|
|
+
|
|
+struct fid {
|
|
+ union {
|
|
+ struct {
|
|
+ u32 ino;
|
|
+ u32 gen;
|
|
+ u32 parent_ino;
|
|
+ u32 parent_gen;
|
|
+ } i32;
|
|
+ struct {
|
|
+ u32 block;
|
|
+ u16 partref;
|
|
+ u16 parent_partref;
|
|
+ u32 generation;
|
|
+ u32 parent_block;
|
|
+ u32 parent_generation;
|
|
+ } udf;
|
|
+ __u32 raw[0];
|
|
+ };
|
|
+};
|
|
+
|
|
+typedef void (*poll_queue_proc___3)(struct file___2 *, wait_queue_head_t *, struct poll_table_struct *);
|
|
+
|
|
+struct trace_event_raw_mm_filemap_op_page_cache {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ long unsigned int i_ino;
|
|
+ long unsigned int index;
|
|
+ dev_t s_dev;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_filemap_set_wb_err {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int i_ino;
|
|
+ dev_t s_dev;
|
|
+ errseq_t errseq;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_file_check_and_advance_wb_err {
|
|
+ struct trace_entry ent;
|
|
+ struct file___2 *file;
|
|
+ long unsigned int i_ino;
|
|
+ dev_t s_dev;
|
|
+ errseq_t old;
|
|
+ errseq_t new;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mm_filemap_op_page_cache {};
|
|
+
|
|
+struct trace_event_data_offsets_filemap_set_wb_err {};
|
|
+
|
|
+struct trace_event_data_offsets_file_check_and_advance_wb_err {};
|
|
+
|
|
+struct percpu_page {
|
|
+ struct percpu_ref ref;
|
|
+ struct page *page;
|
|
+};
|
|
+
|
|
+struct wait_page_key {
|
|
+ struct page *page;
|
|
+ int bit_nr;
|
|
+ int page_match;
|
|
+};
|
|
+
|
|
+struct wait_page_queue {
|
|
+ struct page *page;
|
|
+ int bit_nr;
|
|
+ wait_queue_entry_t wait;
|
|
+};
|
|
+
|
|
+struct kmem_cache_order_objects {
|
|
+ unsigned int x;
|
|
+};
|
|
+
|
|
+struct memcg_cache_array;
|
|
+
|
|
+struct memcg_cache_params {
|
|
+ struct kmem_cache *root_cache;
|
|
+ union {
|
|
+ struct {
|
|
+ struct memcg_cache_array *memcg_caches;
|
|
+ struct list_head __root_caches_node;
|
|
+ struct list_head children;
|
|
+ bool dying;
|
|
+ };
|
|
+ struct {
|
|
+ struct mem_cgroup *memcg;
|
|
+ struct list_head children_node;
|
|
+ struct list_head kmem_caches_node;
|
|
+ void (*deact_fn)(struct kmem_cache *);
|
|
+ union {
|
|
+ struct callback_head deact_rcu_head;
|
|
+ struct work_struct deact_work;
|
|
+ };
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct kmem_cache_cpu;
|
|
+
|
|
+struct kmem_cache_node;
|
|
+
|
|
+struct kmem_cache {
|
|
+ struct kmem_cache_cpu *cpu_slab;
|
|
+ slab_flags_t flags;
|
|
+ long unsigned int min_partial;
|
|
+ unsigned int size;
|
|
+ unsigned int object_size;
|
|
+ unsigned int offset;
|
|
+ unsigned int cpu_partial;
|
|
+ struct kmem_cache_order_objects oo;
|
|
+ struct kmem_cache_order_objects max;
|
|
+ struct kmem_cache_order_objects min;
|
|
+ gfp_t allocflags;
|
|
+ int refcount;
|
|
+ void (*ctor)(void *);
|
|
+ unsigned int inuse;
|
|
+ unsigned int align;
|
|
+ unsigned int red_left_pad;
|
|
+ const char *name;
|
|
+ struct list_head list;
|
|
+ struct kobject kobj;
|
|
+ struct work_struct kobj_remove_work;
|
|
+ struct memcg_cache_params memcg_params;
|
|
+ unsigned int max_attr_size;
|
|
+ struct kset *memcg_kset;
|
|
+ unsigned int remote_node_defrag_ratio;
|
|
+ unsigned int *random_seq;
|
|
+ unsigned int useroffset;
|
|
+ unsigned int usersize;
|
|
+ struct kmem_cache_node *node[1024];
|
|
+};
|
|
+
|
|
+struct memcg_cache_array {
|
|
+ struct callback_head rcu;
|
|
+ struct kmem_cache *entries[0];
|
|
+};
|
|
+
|
|
+struct kmem_cache_cpu {
|
|
+ void **freelist;
|
|
+ long unsigned int tid;
|
|
+ struct page *page;
|
|
+ struct page *partial;
|
|
+};
|
|
+
|
|
+struct kmem_cache_node {
|
|
+ spinlock_t list_lock;
|
|
+ long unsigned int nr_partial;
|
|
+ struct list_head partial;
|
|
+ atomic_long_t nr_slabs;
|
|
+ atomic_long_t total_objects;
|
|
+ struct list_head full;
|
|
+};
|
|
+
|
|
+enum slab_state {
|
|
+ DOWN = 0,
|
|
+ PARTIAL = 1,
|
|
+ PARTIAL_NODE = 2,
|
|
+ UP = 3,
|
|
+ FULL = 4,
|
|
+};
|
|
+
|
|
+struct kmalloc_info_struct {
|
|
+ const char *name;
|
|
+ unsigned int size;
|
|
+};
|
|
+
|
|
+enum oom_constraint {
|
|
+ CONSTRAINT_NONE = 0,
|
|
+ CONSTRAINT_CPUSET = 1,
|
|
+ CONSTRAINT_MEMORY_POLICY = 2,
|
|
+ CONSTRAINT_MEMCG = 3,
|
|
+};
|
|
+
|
|
+struct oom_control {
|
|
+ struct zonelist *zonelist;
|
|
+ nodemask_t *nodemask;
|
|
+ struct mem_cgroup *memcg;
|
|
+ const gfp_t gfp_mask;
|
|
+ const int order;
|
|
+ long unsigned int totalpages;
|
|
+ struct task_struct *chosen;
|
|
+ long unsigned int chosen_points;
|
|
+ enum oom_constraint constraint;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_extension {
|
|
+ int memcg_priority;
|
|
+ struct mem_cgroup_stat_cpu *vmstats_local;
|
|
+ spinlock_t split_queue_lock;
|
|
+ struct list_head split_queue;
|
|
+ long unsigned int split_queue_len;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct mem_cgroup memcg;
|
|
+};
|
|
+
|
|
+struct sp_proc_stat {
|
|
+ atomic_t use_count;
|
|
+ int tgid;
|
|
+ struct mm_struct *mm;
|
|
+ struct mutex lock;
|
|
+ struct hlist_head hash[16];
|
|
+ char comm[16];
|
|
+ atomic64_t alloc_size;
|
|
+ atomic64_t k2u_size;
|
|
+};
|
|
+
|
|
+enum compact_priority {
|
|
+ COMPACT_PRIO_SYNC_FULL = 0,
|
|
+ MIN_COMPACT_PRIORITY = 0,
|
|
+ COMPACT_PRIO_SYNC_LIGHT = 1,
|
|
+ MIN_COMPACT_COSTLY_PRIORITY = 1,
|
|
+ DEF_COMPACT_PRIORITY = 1,
|
|
+ COMPACT_PRIO_ASYNC = 2,
|
|
+ INIT_COMPACT_PRIORITY = 2,
|
|
+};
|
|
+
|
|
+enum compact_result {
|
|
+ COMPACT_NOT_SUITABLE_ZONE = 0,
|
|
+ COMPACT_SKIPPED = 1,
|
|
+ COMPACT_DEFERRED = 2,
|
|
+ COMPACT_INACTIVE = 2,
|
|
+ COMPACT_NO_SUITABLE_PAGE = 3,
|
|
+ COMPACT_CONTINUE = 4,
|
|
+ COMPACT_COMPLETE = 5,
|
|
+ COMPACT_PARTIAL_SKIPPED = 6,
|
|
+ COMPACT_CONTENDED = 7,
|
|
+ COMPACT_SUCCESS = 8,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_oom_score_adj_update {
|
|
+ struct trace_entry ent;
|
|
+ pid_t pid;
|
|
+ char comm[16];
|
|
+ short int oom_score_adj;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_reclaim_retry_zone {
|
|
+ struct trace_entry ent;
|
|
+ int node;
|
|
+ int zone_idx;
|
|
+ int order;
|
|
+ long unsigned int reclaimable;
|
|
+ long unsigned int available;
|
|
+ long unsigned int min_wmark;
|
|
+ int no_progress_loops;
|
|
+ bool wmark_check;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mark_victim {
|
|
+ struct trace_entry ent;
|
|
+ int pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_wake_reaper {
|
|
+ struct trace_entry ent;
|
|
+ int pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_start_task_reaping {
|
|
+ struct trace_entry ent;
|
|
+ int pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_finish_task_reaping {
|
|
+ struct trace_entry ent;
|
|
+ int pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_skip_task_reaping {
|
|
+ struct trace_entry ent;
|
|
+ int pid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_compact_retry {
|
|
+ struct trace_entry ent;
|
|
+ int order;
|
|
+ int priority;
|
|
+ int result;
|
|
+ int retries;
|
|
+ int max_retries;
|
|
+ bool ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_oom_score_adj_update {};
|
|
+
|
|
+struct trace_event_data_offsets_reclaim_retry_zone {};
|
|
+
|
|
+struct trace_event_data_offsets_mark_victim {};
|
|
+
|
|
+struct trace_event_data_offsets_wake_reaper {};
|
|
+
|
|
+struct trace_event_data_offsets_start_task_reaping {};
|
|
+
|
|
+struct trace_event_data_offsets_finish_task_reaping {};
|
|
+
|
|
+struct trace_event_data_offsets_skip_task_reaping {};
|
|
+
|
|
+struct trace_event_data_offsets_compact_retry {};
|
|
+
|
|
+enum wb_congested_state {
|
|
+ WB_async_congested = 0,
|
|
+ WB_sync_congested = 1,
|
|
+};
|
|
+
|
|
+enum pageblock_bits {
|
|
+ PB_migrate = 0,
|
|
+ PB_migrate_end = 2,
|
|
+ PB_migrate_skip = 3,
|
|
+ NR_PAGEBLOCK_BITS = 4,
|
|
+};
|
|
+
|
|
+struct mminit_pfnnid_cache {
|
|
+ long unsigned int last_start;
|
|
+ long unsigned int last_end;
|
|
+ int last_nid;
|
|
+};
|
|
+
|
|
+struct page_frag_cache {
|
|
+ void *va;
|
|
+ __u16 offset;
|
|
+ __u16 size;
|
|
+ unsigned int pagecnt_bias;
|
|
+ bool pfmemalloc;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BLK_RW_ASYNC = 0,
|
|
+ BLK_RW_SYNC = 1,
|
|
+};
|
|
+
|
|
+struct ktask_node {
|
|
+ void *kn_start;
|
|
+ size_t kn_task_size;
|
|
+ int kn_nid;
|
|
+ void *kn_position;
|
|
+ size_t kn_remaining_size;
|
|
+ struct list_head kn_failed_works;
|
|
+};
|
|
+
|
|
+typedef int (*ktask_thread_func)(void *, void *, void *);
|
|
+
|
|
+typedef void (*ktask_undo_func)(void *, void *, void *);
|
|
+
|
|
+typedef void * (*ktask_iter_func)(void *, size_t);
|
|
+
|
|
+struct ktask_ctl {
|
|
+ ktask_thread_func kc_thread_func;
|
|
+ ktask_undo_func kc_undo_func;
|
|
+ void *kc_func_arg;
|
|
+ size_t kc_min_chunk_size;
|
|
+ ktask_iter_func kc_iter_func;
|
|
+ size_t kc_max_threads;
|
|
+};
|
|
+
|
|
+struct alloc_context {
|
|
+ struct zonelist *zonelist;
|
|
+ nodemask_t *nodemask;
|
|
+ struct zoneref *preferred_zoneref;
|
|
+ int migratetype;
|
|
+ enum zone_type high_zoneidx;
|
|
+ bool spread_dirty_pages;
|
|
+};
|
|
+
|
|
+struct compact_control {
|
|
+ struct list_head freepages;
|
|
+ struct list_head migratepages;
|
|
+ struct zone *zone;
|
|
+ long unsigned int nr_freepages;
|
|
+ long unsigned int nr_migratepages;
|
|
+ long unsigned int total_migrate_scanned;
|
|
+ long unsigned int total_free_scanned;
|
|
+ long unsigned int free_pfn;
|
|
+ long unsigned int migrate_pfn;
|
|
+ long unsigned int last_migrated_pfn;
|
|
+ const gfp_t gfp_mask;
|
|
+ int order;
|
|
+ int migratetype;
|
|
+ const unsigned int alloc_flags;
|
|
+ const int classzone_idx;
|
|
+ enum migrate_mode mode;
|
|
+ bool ignore_skip_hint;
|
|
+ bool no_set_skip_hint;
|
|
+ bool ignore_block_suitable;
|
|
+ bool direct_compaction;
|
|
+ bool whole_zone;
|
|
+ bool contended;
|
|
+ bool finishing_block;
|
|
+};
|
|
+
|
|
+enum mminit_level {
|
|
+ MMINIT_WARNING = 0,
|
|
+ MMINIT_VERIFY = 1,
|
|
+ MMINIT_TRACE = 2,
|
|
+};
|
|
+
|
|
+typedef int fpi_t;
|
|
+
|
|
+struct deferred_args {
|
|
+ int nid;
|
|
+ int zid;
|
|
+ atomic64_t nr_pages;
|
|
+};
|
|
+
|
|
+enum wb_state {
|
|
+ WB_registered = 0,
|
|
+ WB_writeback_running = 1,
|
|
+ WB_has_dirty_io = 2,
|
|
+ WB_start_all = 3,
|
|
+};
|
|
+
|
|
+struct wb_lock_cookie {
|
|
+ bool locked;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+typedef int (*writepage_t)(struct page *, struct writeback_control *, void *);
|
|
+
|
|
+struct dirty_throttle_control {
|
|
+ struct wb_domain *dom;
|
|
+ struct dirty_throttle_control *gdtc;
|
|
+ struct bdi_writeback *wb;
|
|
+ struct fprop_local_percpu *wb_completions;
|
|
+ long unsigned int avail;
|
|
+ long unsigned int dirty;
|
|
+ long unsigned int thresh;
|
|
+ long unsigned int bg_thresh;
|
|
+ long unsigned int wb_dirty;
|
|
+ long unsigned int wb_thresh;
|
|
+ long unsigned int wb_bg_thresh;
|
|
+ long unsigned int pos_ratio;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_lru_insertion {
|
|
+ struct trace_entry ent;
|
|
+ struct page *page;
|
|
+ long unsigned int pfn;
|
|
+ int lru;
|
|
+ long unsigned int flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_lru_activate {
|
|
+ struct trace_entry ent;
|
|
+ struct page *page;
|
|
+ long unsigned int pfn;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mm_lru_insertion {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_lru_activate {};
|
|
+
|
|
+enum pgdat_flags {
|
|
+ PGDAT_CONGESTED = 0,
|
|
+ PGDAT_DIRTY = 1,
|
|
+ PGDAT_WRITEBACK = 2,
|
|
+ PGDAT_RECLAIM_LOCKED = 3,
|
|
+};
|
|
+
|
|
+struct reclaim_stat {
|
|
+ unsigned int nr_dirty;
|
|
+ unsigned int nr_unqueued_dirty;
|
|
+ unsigned int nr_congested;
|
|
+ unsigned int nr_writeback;
|
|
+ unsigned int nr_immediate;
|
|
+ unsigned int nr_activate;
|
|
+ unsigned int nr_ref_keep;
|
|
+ unsigned int nr_unmap_fail;
|
|
+ unsigned int nr_lazyfree_fail;
|
|
+};
|
|
+
|
|
+enum mem_cgroup_protection {
|
|
+ MEMCG_PROT_NONE = 0,
|
|
+ MEMCG_PROT_LOW = 1,
|
|
+ MEMCG_PROT_MIN = 2,
|
|
+};
|
|
+
|
|
+struct mem_cgroup_reclaim_cookie {
|
|
+ pg_data_t *pgdat;
|
|
+ int priority;
|
|
+ unsigned int generation;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_per_node_extension {
|
|
+ struct mem_cgroup_per_node pn;
|
|
+ struct lruvec_stat *lruvec_stat_local;
|
|
+};
|
|
+
|
|
+enum ttu_flags {
|
|
+ TTU_MIGRATION = 1,
|
|
+ TTU_MUNLOCK = 2,
|
|
+ TTU_SPLIT_HUGE_PMD = 4,
|
|
+ TTU_IGNORE_MLOCK = 8,
|
|
+ TTU_IGNORE_ACCESS = 16,
|
|
+ TTU_IGNORE_HWPOISON = 32,
|
|
+ TTU_BATCH_FLUSH = 64,
|
|
+ TTU_RMAP_LOCKED = 128,
|
|
+ TTU_SPLIT_FREEZE = 256,
|
|
+ TTU_SYNC = 512,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_kswapd_sleep {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_kswapd_wake {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ int zid;
|
|
+ int order;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_wakeup_kswapd {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ int zid;
|
|
+ int order;
|
|
+ gfp_t gfp_flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_direct_reclaim_begin_template {
|
|
+ struct trace_entry ent;
|
|
+ int order;
|
|
+ int may_writepage;
|
|
+ gfp_t gfp_flags;
|
|
+ int classzone_idx;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_direct_reclaim_end_template {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int nr_reclaimed;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_shrink_slab_start {
|
|
+ struct trace_entry ent;
|
|
+ struct shrinker *shr;
|
|
+ void *shrink;
|
|
+ int nid;
|
|
+ long int nr_objects_to_shrink;
|
|
+ gfp_t gfp_flags;
|
|
+ long unsigned int cache_items;
|
|
+ long long unsigned int delta;
|
|
+ long unsigned int total_scan;
|
|
+ int priority;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_shrink_slab_end {
|
|
+ struct trace_entry ent;
|
|
+ struct shrinker *shr;
|
|
+ int nid;
|
|
+ void *shrink;
|
|
+ long int unused_scan;
|
|
+ long int new_scan;
|
|
+ int retval;
|
|
+ long int total_scan;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_lru_isolate {
|
|
+ struct trace_entry ent;
|
|
+ int classzone_idx;
|
|
+ int order;
|
|
+ long unsigned int nr_requested;
|
|
+ long unsigned int nr_scanned;
|
|
+ long unsigned int nr_skipped;
|
|
+ long unsigned int nr_taken;
|
|
+ isolate_mode_t isolate_mode;
|
|
+ int lru;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_writepage {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ int reclaim_flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_lru_shrink_inactive {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ long unsigned int nr_scanned;
|
|
+ long unsigned int nr_reclaimed;
|
|
+ long unsigned int nr_dirty;
|
|
+ long unsigned int nr_writeback;
|
|
+ long unsigned int nr_congested;
|
|
+ long unsigned int nr_immediate;
|
|
+ long unsigned int nr_activate;
|
|
+ long unsigned int nr_ref_keep;
|
|
+ long unsigned int nr_unmap_fail;
|
|
+ int priority;
|
|
+ int reclaim_flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_lru_shrink_active {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ long unsigned int nr_taken;
|
|
+ long unsigned int nr_active;
|
|
+ long unsigned int nr_deactivated;
|
|
+ long unsigned int nr_referenced;
|
|
+ int priority;
|
|
+ int reclaim_flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_vmscan_inactive_list_is_low {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ int reclaim_idx;
|
|
+ long unsigned int total_inactive;
|
|
+ long unsigned int inactive;
|
|
+ long unsigned int total_active;
|
|
+ long unsigned int active;
|
|
+ long unsigned int ratio;
|
|
+ int reclaim_flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_kswapd_sleep {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_kswapd_wake {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_wakeup_kswapd {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_direct_reclaim_begin_template {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_direct_reclaim_end_template {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_shrink_slab_start {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_shrink_slab_end {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_lru_isolate {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_writepage {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_lru_shrink_inactive {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_lru_shrink_active {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_vmscan_inactive_list_is_low {};
|
|
+
|
|
+struct scan_control {
|
|
+ long unsigned int nr_to_reclaim;
|
|
+ nodemask_t *nodemask;
|
|
+ struct mem_cgroup *target_mem_cgroup;
|
|
+ unsigned int may_writepage: 1;
|
|
+ unsigned int may_unmap: 1;
|
|
+ unsigned int may_swap: 1;
|
|
+ unsigned int memcg_low_reclaim: 1;
|
|
+ unsigned int memcg_low_skipped: 1;
|
|
+ unsigned int hibernation_mode: 1;
|
|
+ unsigned int compaction_ready: 1;
|
|
+ s8 order;
|
|
+ s8 priority;
|
|
+ s8 reclaim_idx;
|
|
+ gfp_t gfp_mask;
|
|
+ long unsigned int nr_scanned;
|
|
+ long unsigned int nr_reclaimed;
|
|
+ struct {
|
|
+ unsigned int dirty;
|
|
+ unsigned int unqueued_dirty;
|
|
+ unsigned int congested;
|
|
+ unsigned int writeback;
|
|
+ unsigned int immediate;
|
|
+ unsigned int file_taken;
|
|
+ unsigned int taken;
|
|
+ } nr;
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ PAGE_KEEP = 0,
|
|
+ PAGE_ACTIVATE = 1,
|
|
+ PAGE_SUCCESS = 2,
|
|
+ PAGE_CLEAN = 3,
|
|
+} pageout_t;
|
|
+
|
|
+enum page_references {
|
|
+ PAGEREF_RECLAIM = 0,
|
|
+ PAGEREF_RECLAIM_CLEAN = 1,
|
|
+ PAGEREF_KEEP = 2,
|
|
+ PAGEREF_ACTIVATE = 3,
|
|
+};
|
|
+
|
|
+enum scan_balance {
|
|
+ SCAN_EQUAL = 0,
|
|
+ SCAN_FRACT = 1,
|
|
+ SCAN_ANON = 2,
|
|
+ SCAN_FILE = 3,
|
|
+};
|
|
+
|
|
+typedef __kernel_ulong_t __kernel_ino_t;
|
|
+
|
|
+typedef __kernel_ino_t ino_t;
|
|
+
|
|
+enum transparent_hugepage_flag {
|
|
+ TRANSPARENT_HUGEPAGE_FLAG = 0,
|
|
+ TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG = 1,
|
|
+ TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG = 2,
|
|
+ TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG = 3,
|
|
+ TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG = 4,
|
|
+ TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG = 5,
|
|
+ TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG = 6,
|
|
+ TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG = 7,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MPOL_DEFAULT = 0,
|
|
+ MPOL_PREFERRED = 1,
|
|
+ MPOL_BIND = 2,
|
|
+ MPOL_INTERLEAVE = 3,
|
|
+ MPOL_LOCAL = 4,
|
|
+ MPOL_MAX = 5,
|
|
+};
|
|
+
|
|
+struct shared_policy {
|
|
+ struct rb_root root;
|
|
+ rwlock_t lock;
|
|
+};
|
|
+
|
|
+struct xattr {
|
|
+ const char *name;
|
|
+ void *value;
|
|
+ size_t value_len;
|
|
+};
|
|
+
|
|
+struct simple_xattrs {
|
|
+ struct list_head head;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct simple_xattr {
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+ size_t size;
|
|
+ char value[0];
|
|
+};
|
|
+
|
|
+enum fid_type {
|
|
+ FILEID_ROOT = 0,
|
|
+ FILEID_INO32_GEN = 1,
|
|
+ FILEID_INO32_GEN_PARENT = 2,
|
|
+ FILEID_BTRFS_WITHOUT_PARENT = 77,
|
|
+ FILEID_BTRFS_WITH_PARENT = 78,
|
|
+ FILEID_BTRFS_WITH_PARENT_ROOT = 79,
|
|
+ FILEID_UDF_WITHOUT_PARENT = 81,
|
|
+ FILEID_UDF_WITH_PARENT = 82,
|
|
+ FILEID_NILFS_WITHOUT_PARENT = 97,
|
|
+ FILEID_NILFS_WITH_PARENT = 98,
|
|
+ FILEID_FAT_WITHOUT_PARENT = 113,
|
|
+ FILEID_FAT_WITH_PARENT = 114,
|
|
+ FILEID_LUSTRE = 151,
|
|
+ FILEID_INVALID = 255,
|
|
+};
|
|
+
|
|
+struct shmem_inode_info {
|
|
+ spinlock_t lock;
|
|
+ unsigned int seals;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int alloced;
|
|
+ long unsigned int swapped;
|
|
+ struct list_head shrinklist;
|
|
+ struct list_head swaplist;
|
|
+ struct shared_policy policy;
|
|
+ struct simple_xattrs xattrs;
|
|
+ struct inode___2 vfs_inode;
|
|
+};
|
|
+
|
|
+struct shmem_sb_info {
|
|
+ long unsigned int max_blocks;
|
|
+ struct percpu_counter used_blocks;
|
|
+ long unsigned int max_inodes;
|
|
+ long unsigned int free_inodes;
|
|
+ spinlock_t stat_lock;
|
|
+ umode_t mode;
|
|
+ unsigned char huge;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ bool full_inums;
|
|
+ ino_t next_ino;
|
|
+ ino_t *ino_batch;
|
|
+ struct mempolicy *mpol;
|
|
+ spinlock_t shrinklist_lock;
|
|
+ struct list_head shrinklist;
|
|
+ long unsigned int shrinklist_len;
|
|
+};
|
|
+
|
|
+enum sgp_type {
|
|
+ SGP_READ = 0,
|
|
+ SGP_CACHE = 1,
|
|
+ SGP_NOHUGE = 2,
|
|
+ SGP_HUGE = 3,
|
|
+ SGP_WRITE = 4,
|
|
+ SGP_FALLOC = 5,
|
|
+};
|
|
+
|
|
+struct shmem_falloc {
|
|
+ wait_queue_head_t *waitq;
|
|
+ long unsigned int start;
|
|
+ long unsigned int next;
|
|
+ long unsigned int nr_falloced;
|
|
+ long unsigned int nr_unswapped;
|
|
+};
|
|
+
|
|
+struct contig_page_info {
|
|
+ long unsigned int free_pages;
|
|
+ long unsigned int free_blocks_total;
|
|
+ long unsigned int free_blocks_suitable;
|
|
+};
|
|
+
|
|
+enum writeback_stat_item {
|
|
+ NR_DIRTY_THRESHOLD = 0,
|
|
+ NR_DIRTY_BG_THRESHOLD = 1,
|
|
+ NR_VM_WRITEBACK_STAT_ITEMS = 2,
|
|
+};
|
|
+
|
|
+typedef s8 pto_T_____27;
|
|
+
|
|
+struct pcpu_group_info {
|
|
+ int nr_units;
|
|
+ long unsigned int base_offset;
|
|
+ unsigned int *cpu_map;
|
|
+};
|
|
+
|
|
+struct pcpu_alloc_info {
|
|
+ size_t static_size;
|
|
+ size_t reserved_size;
|
|
+ size_t dyn_size;
|
|
+ size_t unit_size;
|
|
+ size_t atom_size;
|
|
+ size_t alloc_size;
|
|
+ size_t __ai_size;
|
|
+ int nr_groups;
|
|
+ struct pcpu_group_info groups[0];
|
|
+};
|
|
+
|
|
+typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int, size_t, size_t);
|
|
+
|
|
+typedef void (*pcpu_fc_free_fn_t)(void *, size_t);
|
|
+
|
|
+typedef void (*pcpu_fc_populate_pte_fn_t)(long unsigned int);
|
|
+
|
|
+typedef int pcpu_fc_cpu_distance_fn_t(unsigned int, unsigned int);
|
|
+
|
|
+struct trace_event_raw_percpu_alloc_percpu {
|
|
+ struct trace_entry ent;
|
|
+ bool reserved;
|
|
+ bool is_atomic;
|
|
+ size_t size;
|
|
+ size_t align;
|
|
+ void *base_addr;
|
|
+ int off;
|
|
+ void *ptr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_percpu_free_percpu {
|
|
+ struct trace_entry ent;
|
|
+ void *base_addr;
|
|
+ int off;
|
|
+ void *ptr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_percpu_alloc_percpu_fail {
|
|
+ struct trace_entry ent;
|
|
+ bool reserved;
|
|
+ bool is_atomic;
|
|
+ size_t size;
|
|
+ size_t align;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_percpu_create_chunk {
|
|
+ struct trace_entry ent;
|
|
+ void *base_addr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_percpu_destroy_chunk {
|
|
+ struct trace_entry ent;
|
|
+ void *base_addr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_percpu_alloc_percpu {};
|
|
+
|
|
+struct trace_event_data_offsets_percpu_free_percpu {};
|
|
+
|
|
+struct trace_event_data_offsets_percpu_alloc_percpu_fail {};
|
|
+
|
|
+struct trace_event_data_offsets_percpu_create_chunk {};
|
|
+
|
|
+struct trace_event_data_offsets_percpu_destroy_chunk {};
|
|
+
|
|
+struct pcpu_block_md {
|
|
+ int contig_hint;
|
|
+ int contig_hint_start;
|
|
+ int left_free;
|
|
+ int right_free;
|
|
+ int first_free;
|
|
+};
|
|
+
|
|
+struct pcpu_chunk {
|
|
+ struct list_head list;
|
|
+ int free_bytes;
|
|
+ int contig_bits;
|
|
+ int contig_bits_start;
|
|
+ void *base_addr;
|
|
+ long unsigned int *alloc_map;
|
|
+ long unsigned int *bound_map;
|
|
+ struct pcpu_block_md *md_blocks;
|
|
+ void *data;
|
|
+ int first_bit;
|
|
+ bool immutable;
|
|
+ int start_offset;
|
|
+ int end_offset;
|
|
+ int nr_pages;
|
|
+ int nr_populated;
|
|
+ int nr_empty_pop_pages;
|
|
+ long unsigned int populated[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_kmem_alloc {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int call_site;
|
|
+ const void *ptr;
|
|
+ size_t bytes_req;
|
|
+ size_t bytes_alloc;
|
|
+ gfp_t gfp_flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_kmem_alloc_node {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int call_site;
|
|
+ const void *ptr;
|
|
+ size_t bytes_req;
|
|
+ size_t bytes_alloc;
|
|
+ gfp_t gfp_flags;
|
|
+ int node;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_kmem_free {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int call_site;
|
|
+ const void *ptr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_page_free {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ unsigned int order;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_page_free_batched {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_page_alloc {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ unsigned int order;
|
|
+ gfp_t gfp_flags;
|
|
+ int migratetype;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_page {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ unsigned int order;
|
|
+ int migratetype;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_page_pcpu_drain {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ unsigned int order;
|
|
+ int migratetype;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_page_alloc_extfrag {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ int alloc_order;
|
|
+ int fallback_order;
|
|
+ int alloc_migratetype;
|
|
+ int fallback_migratetype;
|
|
+ int change_ownership;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_kmem_alloc {};
|
|
+
|
|
+struct trace_event_data_offsets_kmem_alloc_node {};
|
|
+
|
|
+struct trace_event_data_offsets_kmem_free {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_page_free {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_page_free_batched {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_page_alloc {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_page {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_page_pcpu_drain {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_page_alloc_extfrag {};
|
|
+
|
|
+struct slabinfo {
|
|
+ long unsigned int active_objs;
|
|
+ long unsigned int num_objs;
|
|
+ long unsigned int active_slabs;
|
|
+ long unsigned int num_slabs;
|
|
+ long unsigned int shared_avail;
|
|
+ unsigned int limit;
|
|
+ unsigned int batchcount;
|
|
+ unsigned int shared;
|
|
+ unsigned int objects_per_slab;
|
|
+ unsigned int cache_order;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_compaction_isolate_template {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int start_pfn;
|
|
+ long unsigned int end_pfn;
|
|
+ long unsigned int nr_scanned;
|
|
+ long unsigned int nr_taken;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_compaction_migratepages {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int nr_migrated;
|
|
+ long unsigned int nr_failed;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_compaction_begin {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int zone_start;
|
|
+ long unsigned int migrate_pfn;
|
|
+ long unsigned int free_pfn;
|
|
+ long unsigned int zone_end;
|
|
+ bool sync;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_compaction_end {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int zone_start;
|
|
+ long unsigned int migrate_pfn;
|
|
+ long unsigned int free_pfn;
|
|
+ long unsigned int zone_end;
|
|
+ bool sync;
|
|
+ int status;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_compaction_try_to_compact_pages {
|
|
+ struct trace_entry ent;
|
|
+ int order;
|
|
+ gfp_t gfp_mask;
|
|
+ int prio;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_compaction_suitable_template {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ enum zone_type idx;
|
|
+ int order;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_compaction_defer_template {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ enum zone_type idx;
|
|
+ int order;
|
|
+ unsigned int considered;
|
|
+ unsigned int defer_shift;
|
|
+ int order_failed;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_compaction_kcompactd_sleep {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_kcompactd_wake_template {
|
|
+ struct trace_entry ent;
|
|
+ int nid;
|
|
+ int order;
|
|
+ enum zone_type classzone_idx;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mm_compaction_isolate_template {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_compaction_migratepages {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_compaction_begin {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_compaction_end {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_compaction_try_to_compact_pages {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_compaction_suitable_template {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_compaction_defer_template {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_compaction_kcompactd_sleep {};
|
|
+
|
|
+struct trace_event_data_offsets_kcompactd_wake_template {};
|
|
+
|
|
+typedef enum {
|
|
+ ISOLATE_ABORT = 0,
|
|
+ ISOLATE_NONE = 1,
|
|
+ ISOLATE_SUCCESS = 2,
|
|
+} isolate_migrate_t;
|
|
+
|
|
+struct anon_vma_chain {
|
|
+ struct vm_area_struct *vma;
|
|
+ struct anon_vma *anon_vma;
|
|
+ struct list_head same_vma;
|
|
+ struct rb_node rb;
|
|
+ long unsigned int rb_subtree_last;
|
|
+};
|
|
+
|
|
+enum lru_status {
|
|
+ LRU_REMOVED = 0,
|
|
+ LRU_REMOVED_RETRY = 1,
|
|
+ LRU_ROTATE = 2,
|
|
+ LRU_SKIP = 3,
|
|
+ LRU_RETRY = 4,
|
|
+};
|
|
+
|
|
+typedef enum lru_status (*list_lru_walk_cb)(struct list_head *, struct list_lru_one *, spinlock_t *, void *);
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int pd;
|
|
+} hugepd_t;
|
|
+
|
|
+struct zap_details {
|
|
+ struct address_space *check_mapping;
|
|
+ long unsigned int first_index;
|
|
+ long unsigned int last_index;
|
|
+ struct page *single_page;
|
|
+};
|
|
+
|
|
+typedef int (*pte_fn_t)(pte_t *, pgtable_t, long unsigned int, void *);
|
|
+
|
|
+enum {
|
|
+ SWP_USED = 1,
|
|
+ SWP_WRITEOK = 2,
|
|
+ SWP_DISCARDABLE = 4,
|
|
+ SWP_DISCARDING = 8,
|
|
+ SWP_SOLIDSTATE = 16,
|
|
+ SWP_CONTINUED = 32,
|
|
+ SWP_BLKDEV = 64,
|
|
+ SWP_ACTIVATED = 128,
|
|
+ SWP_FS = 256,
|
|
+ SWP_AREA_DISCARD = 512,
|
|
+ SWP_PAGE_DISCARD = 1024,
|
|
+ SWP_STABLE_WRITES = 2048,
|
|
+ SWP_SYNCHRONOUS_IO = 4096,
|
|
+ SWP_VALID = 8192,
|
|
+ SWP_SCANNING = 16384,
|
|
+};
|
|
+
|
|
+struct cgp_args {
|
|
+ struct page *base_page;
|
|
+ long unsigned int addr;
|
|
+};
|
|
+
|
|
+struct copy_subpage_arg {
|
|
+ struct page *dst;
|
|
+ struct page *src;
|
|
+ struct vm_area_struct *vma;
|
|
+};
|
|
+
|
|
+struct mm_walk {
|
|
+ int (*pud_entry)(pud_t *, long unsigned int, long unsigned int, struct mm_walk *);
|
|
+ int (*pmd_entry)(pmd_t *, long unsigned int, long unsigned int, struct mm_walk *);
|
|
+ int (*pte_entry)(pte_t *, long unsigned int, long unsigned int, struct mm_walk *);
|
|
+ int (*pte_hole)(long unsigned int, long unsigned int, struct mm_walk *);
|
|
+ int (*hugetlb_entry)(pte_t *, long unsigned int, long unsigned int, long unsigned int, struct mm_walk *);
|
|
+ int (*test_walk)(long unsigned int, long unsigned int, struct mm_walk *);
|
|
+ struct mm_struct *mm;
|
|
+ struct vm_area_struct *vma;
|
|
+ void *private;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ HUGETLB_SHMFS_INODE = 1,
|
|
+ HUGETLB_ANONHUGE_INODE = 2,
|
|
+};
|
|
+
|
|
+struct attribute_group___3;
|
|
+
|
|
+struct rmap_walk_control {
|
|
+ void *arg;
|
|
+ bool (*rmap_one)(struct page *, struct vm_area_struct *, long unsigned int, void *);
|
|
+ int (*done)(struct page *);
|
|
+ struct anon_vma * (*anon_lock)(struct page *);
|
|
+ bool (*invalid_vma)(struct vm_area_struct *, void *);
|
|
+};
|
|
+
|
|
+struct page_referenced_arg {
|
|
+ int mapcount;
|
|
+ int referenced;
|
|
+ long unsigned int vm_flags;
|
|
+ struct mem_cgroup *memcg;
|
|
+};
|
|
+
|
|
+struct vmap_area {
|
|
+ long unsigned int va_start;
|
|
+ long unsigned int va_end;
|
|
+ long unsigned int subtree_max_size;
|
|
+ long unsigned int flags;
|
|
+ struct rb_node rb_node;
|
|
+ struct list_head list;
|
|
+ struct llist_node purge_list;
|
|
+ struct vm_struct___2 *vm;
|
|
+};
|
|
+
|
|
+struct vfree_deferred {
|
|
+ struct llist_head list;
|
|
+ struct work_struct wq;
|
|
+};
|
|
+
|
|
+enum fit_type {
|
|
+ NOTHING_FIT = 0,
|
|
+ FL_FIT_TYPE = 1,
|
|
+ LE_FIT_TYPE = 2,
|
|
+ RE_FIT_TYPE = 3,
|
|
+ NE_FIT_TYPE = 4,
|
|
+};
|
|
+
|
|
+struct vmap_block_queue {
|
|
+ spinlock_t lock;
|
|
+ struct list_head free;
|
|
+};
|
|
+
|
|
+struct vmap_block {
|
|
+ spinlock_t lock;
|
|
+ struct vmap_area *va;
|
|
+ long unsigned int free;
|
|
+ long unsigned int dirty;
|
|
+ long unsigned int dirty_min;
|
|
+ long unsigned int dirty_max;
|
|
+ struct list_head free_list;
|
|
+ struct callback_head callback_head;
|
|
+ struct list_head purge;
|
|
+};
|
|
+
|
|
+struct vma_swap_readahead {
|
|
+ short unsigned int win;
|
|
+ short unsigned int offset;
|
|
+ short unsigned int nr_pte;
|
|
+ pte_t *ptes;
|
|
+};
|
|
+
|
|
+union swap_header {
|
|
+ struct {
|
|
+ char reserved[4086];
|
|
+ char magic[10];
|
|
+ } magic;
|
|
+ struct {
|
|
+ char bootbits[1024];
|
|
+ __u32 version;
|
|
+ __u32 last_page;
|
|
+ __u32 nr_badpages;
|
|
+ unsigned char sws_uuid[16];
|
|
+ unsigned char sws_volume[16];
|
|
+ __u32 padding[117];
|
|
+ __u32 badpages[1];
|
|
+ } info;
|
|
+};
|
|
+
|
|
+struct swap_slots_cache {
|
|
+ bool lock_initialized;
|
|
+ struct mutex alloc_lock;
|
|
+ swp_entry_t *slots;
|
|
+ int nr;
|
|
+ int cur;
|
|
+ spinlock_t free_lock;
|
|
+ swp_entry_t *slots_ret;
|
|
+ int n_ret;
|
|
+};
|
|
+
|
|
+struct frontswap_ops {
|
|
+ void (*init)(unsigned int);
|
|
+ int (*store)(unsigned int, long unsigned int, struct page *);
|
|
+ int (*load)(unsigned int, long unsigned int, struct page *);
|
|
+ void (*invalidate_page)(unsigned int, long unsigned int);
|
|
+ void (*invalidate_area)(unsigned int);
|
|
+ struct frontswap_ops *next;
|
|
+};
|
|
+
|
|
+struct crypto_comp {
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct zpool;
|
|
+
|
|
+struct zpool_ops {
|
|
+ int (*evict)(struct zpool *, long unsigned int);
|
|
+};
|
|
+
|
|
+enum zpool_mapmode {
|
|
+ ZPOOL_MM_RW = 0,
|
|
+ ZPOOL_MM_RO = 1,
|
|
+ ZPOOL_MM_WO = 2,
|
|
+ ZPOOL_MM_DEFAULT = 0,
|
|
+};
|
|
+
|
|
+struct zswap_pool {
|
|
+ struct zpool *zpool;
|
|
+ struct crypto_comp **tfm;
|
|
+ struct kref kref;
|
|
+ struct list_head list;
|
|
+ struct work_struct work;
|
|
+ struct hlist_node node;
|
|
+ char tfm_name[128];
|
|
+};
|
|
+
|
|
+struct zswap_entry {
|
|
+ struct rb_node rbnode;
|
|
+ long unsigned int offset;
|
|
+ int refcount;
|
|
+ unsigned int length;
|
|
+ struct zswap_pool *pool;
|
|
+ union {
|
|
+ long unsigned int handle;
|
|
+ long unsigned int value;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct zswap_header {
|
|
+ swp_entry_t swpentry;
|
|
+};
|
|
+
|
|
+struct zswap_tree {
|
|
+ struct rb_root rbroot;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+enum zswap_get_swap_ret {
|
|
+ ZSWAP_SWAPCACHE_NEW = 0,
|
|
+ ZSWAP_SWAPCACHE_EXIST = 1,
|
|
+ ZSWAP_SWAPCACHE_FAIL = 2,
|
|
+};
|
|
+
|
|
+typedef void (*dr_release_t___2)(struct device___2 *, void *);
|
|
+
|
|
+struct dma_pool {
|
|
+ struct list_head page_list;
|
|
+ spinlock_t lock;
|
|
+ size_t size;
|
|
+ struct device___2 *dev;
|
|
+ size_t allocation;
|
|
+ size_t boundary;
|
|
+ char name[32];
|
|
+ struct list_head pools;
|
|
+};
|
|
+
|
|
+struct dma_page {
|
|
+ struct list_head page_list;
|
|
+ void *vaddr;
|
|
+ dma_addr_t dma;
|
|
+ unsigned int in_use;
|
|
+ unsigned int offset;
|
|
+};
|
|
+
|
|
+enum string_size_units {
|
|
+ STRING_UNITS_10 = 0,
|
|
+ STRING_UNITS_2 = 1,
|
|
+};
|
|
+
|
|
+struct resv_map {
|
|
+ struct kref refs;
|
|
+ spinlock_t lock;
|
|
+ struct list_head regions;
|
|
+ long int adds_in_progress;
|
|
+ struct list_head region_cache;
|
|
+ long int region_cache_count;
|
|
+};
|
|
+
|
|
+struct huge_bootmem_page {
|
|
+ struct list_head list;
|
|
+ struct hstate *hstate;
|
|
+};
|
|
+
|
|
+struct file_region {
|
|
+ struct list_head link;
|
|
+ long int from;
|
|
+ long int to;
|
|
+};
|
|
+
|
|
+enum vma_resv_mode {
|
|
+ VMA_NEEDS_RESV = 0,
|
|
+ VMA_COMMIT_RESV = 1,
|
|
+ VMA_END_RESV = 2,
|
|
+ VMA_ADD_RESV = 3,
|
|
+};
|
|
+
|
|
+struct node_hstate {
|
|
+ struct kobject *hugepages_kobj;
|
|
+ struct kobject *hstate_kobjs[2];
|
|
+};
|
|
+
|
|
+struct hugetlb_cgroup;
|
|
+
|
|
+struct nodemask_scratch {
|
|
+ nodemask_t mask1;
|
|
+ nodemask_t mask2;
|
|
+};
|
|
+
|
|
+struct sp_node {
|
|
+ struct rb_node nd;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+ struct mempolicy *policy;
|
|
+};
|
|
+
|
|
+struct mempolicy_operations {
|
|
+ int (*create)(struct mempolicy *, const nodemask_t *);
|
|
+ void (*rebind)(struct mempolicy *, const nodemask_t *);
|
|
+};
|
|
+
|
|
+struct queue_pages {
|
|
+ struct list_head *pagelist;
|
|
+ long unsigned int flags;
|
|
+ nodemask_t *nmask;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+ struct vm_area_struct *first;
|
|
+};
|
|
+
|
|
+struct mmu_notifier;
|
|
+
|
|
+struct mmu_notifier_ops {
|
|
+ int flags;
|
|
+ void (*release)(struct mmu_notifier *, struct mm_struct___2 *);
|
|
+ int (*clear_flush_young)(struct mmu_notifier *, struct mm_struct___2 *, long unsigned int, long unsigned int);
|
|
+ int (*clear_young)(struct mmu_notifier *, struct mm_struct___2 *, long unsigned int, long unsigned int);
|
|
+ int (*test_young)(struct mmu_notifier *, struct mm_struct___2 *, long unsigned int);
|
|
+ void (*change_pte)(struct mmu_notifier *, struct mm_struct___2 *, long unsigned int, pte_t);
|
|
+ int (*invalidate_range_start)(struct mmu_notifier *, struct mm_struct___2 *, long unsigned int, long unsigned int, bool);
|
|
+ void (*invalidate_range_end)(struct mmu_notifier *, struct mm_struct___2 *, long unsigned int, long unsigned int);
|
|
+ void (*invalidate_range)(struct mmu_notifier *, struct mm_struct___2 *, long unsigned int, long unsigned int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct mmu_notifier {
|
|
+ struct hlist_node hlist;
|
|
+ const struct mmu_notifier_ops *ops;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct memory_notify {
|
|
+ long unsigned int start_pfn;
|
|
+ long unsigned int nr_pages;
|
|
+ int status_change_nid_normal;
|
|
+ int status_change_nid_high;
|
|
+ int status_change_nid;
|
|
+};
|
|
+
|
|
+struct rmap_item;
|
|
+
|
|
+struct mm_slot {
|
|
+ struct hlist_node link;
|
|
+ struct list_head mm_list;
|
|
+ struct rmap_item *rmap_list;
|
|
+ struct mm_struct *mm;
|
|
+};
|
|
+
|
|
+struct stable_node;
|
|
+
|
|
+struct rmap_item {
|
|
+ struct rmap_item *rmap_list;
|
|
+ union {
|
|
+ struct anon_vma *anon_vma;
|
|
+ int nid;
|
|
+ };
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int address;
|
|
+ unsigned int oldchecksum;
|
|
+ union {
|
|
+ struct rb_node node;
|
|
+ struct {
|
|
+ struct stable_node *head;
|
|
+ struct hlist_node hlist;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct ksm_scan {
|
|
+ struct mm_slot *mm_slot;
|
|
+ long unsigned int address;
|
|
+ struct rmap_item **rmap_list;
|
|
+ long unsigned int seqnr;
|
|
+};
|
|
+
|
|
+struct stable_node {
|
|
+ union {
|
|
+ struct rb_node node;
|
|
+ struct {
|
|
+ struct list_head *head;
|
|
+ struct {
|
|
+ struct hlist_node hlist_dup;
|
|
+ struct list_head list;
|
|
+ };
|
|
+ };
|
|
+ };
|
|
+ struct hlist_head hlist;
|
|
+ union {
|
|
+ long unsigned int kpfn;
|
|
+ long unsigned int chain_prune_time;
|
|
+ };
|
|
+ int rmap_hlist_len;
|
|
+ int nid;
|
|
+};
|
|
+
|
|
+enum get_ksm_page_flags {
|
|
+ GET_KSM_PAGE_NOLOCK = 0,
|
|
+ GET_KSM_PAGE_LOCK = 1,
|
|
+ GET_KSM_PAGE_TRYLOCK = 2,
|
|
+};
|
|
+
|
|
+enum stat_item {
|
|
+ ALLOC_FASTPATH = 0,
|
|
+ ALLOC_SLOWPATH = 1,
|
|
+ FREE_FASTPATH = 2,
|
|
+ FREE_SLOWPATH = 3,
|
|
+ FREE_FROZEN = 4,
|
|
+ FREE_ADD_PARTIAL = 5,
|
|
+ FREE_REMOVE_PARTIAL = 6,
|
|
+ ALLOC_FROM_PARTIAL = 7,
|
|
+ ALLOC_SLAB = 8,
|
|
+ ALLOC_REFILL = 9,
|
|
+ ALLOC_NODE_MISMATCH = 10,
|
|
+ FREE_SLAB = 11,
|
|
+ CPUSLAB_FLUSH = 12,
|
|
+ DEACTIVATE_FULL = 13,
|
|
+ DEACTIVATE_EMPTY = 14,
|
|
+ DEACTIVATE_TO_HEAD = 15,
|
|
+ DEACTIVATE_TO_TAIL = 16,
|
|
+ DEACTIVATE_REMOTE_FREES = 17,
|
|
+ DEACTIVATE_BYPASS = 18,
|
|
+ ORDER_FALLBACK = 19,
|
|
+ CMPXCHG_DOUBLE_CPU_FAIL = 20,
|
|
+ CMPXCHG_DOUBLE_FAIL = 21,
|
|
+ CPU_PARTIAL_ALLOC = 22,
|
|
+ CPU_PARTIAL_FREE = 23,
|
|
+ CPU_PARTIAL_NODE = 24,
|
|
+ CPU_PARTIAL_DRAIN = 25,
|
|
+ NR_SLUB_STAT_ITEMS = 26,
|
|
+};
|
|
+
|
|
+struct track {
|
|
+ long unsigned int addr;
|
|
+ long unsigned int addrs[16];
|
|
+ int cpu;
|
|
+ int pid;
|
|
+ long unsigned int when;
|
|
+};
|
|
+
|
|
+enum track_item {
|
|
+ TRACK_ALLOC = 0,
|
|
+ TRACK_FREE = 1,
|
|
+};
|
|
+
|
|
+struct detached_freelist {
|
|
+ struct page *page;
|
|
+ void *tail;
|
|
+ void *freelist;
|
|
+ int cnt;
|
|
+ struct kmem_cache *s;
|
|
+};
|
|
+
|
|
+struct location {
|
|
+ long unsigned int count;
|
|
+ long unsigned int addr;
|
|
+ long long int sum_time;
|
|
+ long int min_time;
|
|
+ long int max_time;
|
|
+ long int min_pid;
|
|
+ long int max_pid;
|
|
+ long unsigned int cpus[128];
|
|
+ nodemask_t nodes;
|
|
+};
|
|
+
|
|
+struct loc_track {
|
|
+ long unsigned int max;
|
|
+ long unsigned int count;
|
|
+ struct location *loc;
|
|
+};
|
|
+
|
|
+enum slab_stat_type {
|
|
+ SL_ALL = 0,
|
|
+ SL_PARTIAL = 1,
|
|
+ SL_CPU = 2,
|
|
+ SL_OBJECTS = 3,
|
|
+ SL_TOTAL = 4,
|
|
+};
|
|
+
|
|
+struct slab_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct kmem_cache *, char *);
|
|
+ ssize_t (*store)(struct kmem_cache *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct saved_alias {
|
|
+ struct kmem_cache *s;
|
|
+ const char *name;
|
|
+ struct saved_alias *next;
|
|
+};
|
|
+
|
|
+enum slab_modes {
|
|
+ M_NONE = 0,
|
|
+ M_PARTIAL = 1,
|
|
+ M_FULL = 2,
|
|
+ M_FREE = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MMOP_OFFLINE = -1,
|
|
+ MMOP_ONLINE_KEEP = 0,
|
|
+ MMOP_ONLINE_KERNEL = 1,
|
|
+ MMOP_ONLINE_MOVABLE = 2,
|
|
+};
|
|
+
|
|
+typedef void (*online_page_callback_t)(struct page *, unsigned int);
|
|
+
|
|
+struct memory_block {
|
|
+ long unsigned int start_section_nr;
|
|
+ long unsigned int end_section_nr;
|
|
+ long unsigned int state;
|
|
+ int section_count;
|
|
+ int online_type;
|
|
+ int phys_device;
|
|
+ void *hw;
|
|
+ int (*phys_callback)(struct memory_block *);
|
|
+ struct device dev;
|
|
+ int nid;
|
|
+};
|
|
+
|
|
+struct buffer_head;
|
|
+
|
|
+typedef void bh_end_io_t(struct buffer_head *, int);
|
|
+
|
|
+struct buffer_head {
|
|
+ long unsigned int b_state;
|
|
+ struct buffer_head *b_this_page;
|
|
+ struct page *b_page;
|
|
+ sector_t b_blocknr;
|
|
+ size_t b_size;
|
|
+ char *b_data;
|
|
+ struct block_device *b_bdev;
|
|
+ bh_end_io_t *b_end_io;
|
|
+ void *b_private;
|
|
+ struct list_head b_assoc_buffers;
|
|
+ struct address_space *b_assoc_map;
|
|
+ atomic_t b_count;
|
|
+};
|
|
+
|
|
+typedef struct page *new_page_t(struct page *, long unsigned int);
|
|
+
|
|
+typedef void free_page_t(struct page *, long unsigned int);
|
|
+
|
|
+struct migrate_vma_ops {
|
|
+ void (*alloc_and_copy)(struct vm_area_struct *, const long unsigned int *, long unsigned int *, long unsigned int, long unsigned int, void *);
|
|
+ void (*finalize_and_map)(struct vm_area_struct *, const long unsigned int *, const long unsigned int *, long unsigned int, long unsigned int, void *);
|
|
+};
|
|
+
|
|
+enum bh_state_bits {
|
|
+ BH_Uptodate = 0,
|
|
+ BH_Dirty = 1,
|
|
+ BH_Lock = 2,
|
|
+ BH_Req = 3,
|
|
+ BH_Uptodate_Lock = 4,
|
|
+ BH_Mapped = 5,
|
|
+ BH_New = 6,
|
|
+ BH_Async_Read = 7,
|
|
+ BH_Async_Write = 8,
|
|
+ BH_Delay = 9,
|
|
+ BH_Boundary = 10,
|
|
+ BH_Write_EIO = 11,
|
|
+ BH_Unwritten = 12,
|
|
+ BH_Quiet = 13,
|
|
+ BH_Meta = 14,
|
|
+ BH_Prio = 15,
|
|
+ BH_Defer_Completion = 16,
|
|
+ BH_PrivateStart = 17,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_migrate_pages {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int succeeded;
|
|
+ long unsigned int failed;
|
|
+ enum migrate_mode mode;
|
|
+ int reason;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mm_migrate_pages {};
|
|
+
|
|
+struct migrate_vma {
|
|
+ struct vm_area_struct *vma;
|
|
+ long unsigned int *dst;
|
|
+ long unsigned int *src;
|
|
+ long unsigned int cpages;
|
|
+ long unsigned int npages;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+};
|
|
+
|
|
+struct deferred_split {
|
|
+ spinlock_t *split_queue_lock;
|
|
+ struct list_head *split_queue;
|
|
+ long unsigned int *split_queue_len;
|
|
+};
|
|
+
|
|
+enum scan_result {
|
|
+ SCAN_FAIL = 0,
|
|
+ SCAN_SUCCEED = 1,
|
|
+ SCAN_PMD_NULL = 2,
|
|
+ SCAN_EXCEED_NONE_PTE = 3,
|
|
+ SCAN_PTE_NON_PRESENT = 4,
|
|
+ SCAN_PAGE_RO = 5,
|
|
+ SCAN_LACK_REFERENCED_PAGE = 6,
|
|
+ SCAN_PAGE_NULL = 7,
|
|
+ SCAN_SCAN_ABORT = 8,
|
|
+ SCAN_PAGE_COUNT = 9,
|
|
+ SCAN_PAGE_LRU = 10,
|
|
+ SCAN_PAGE_LOCK = 11,
|
|
+ SCAN_PAGE_ANON = 12,
|
|
+ SCAN_PAGE_COMPOUND = 13,
|
|
+ SCAN_ANY_PROCESS = 14,
|
|
+ SCAN_VMA_NULL = 15,
|
|
+ SCAN_VMA_CHECK = 16,
|
|
+ SCAN_ADDRESS_RANGE = 17,
|
|
+ SCAN_SWAP_CACHE_PAGE = 18,
|
|
+ SCAN_DEL_PAGE_LRU = 19,
|
|
+ SCAN_ALLOC_HUGE_PAGE_FAIL = 20,
|
|
+ SCAN_CGROUP_CHARGE_FAIL = 21,
|
|
+ SCAN_EXCEED_SWAP_PTE = 22,
|
|
+ SCAN_TRUNCATED = 23,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_khugepaged_scan_pmd {
|
|
+ struct trace_entry ent;
|
|
+ struct mm_struct *mm;
|
|
+ long unsigned int pfn;
|
|
+ bool writable;
|
|
+ int referenced;
|
|
+ int none_or_zero;
|
|
+ int status;
|
|
+ int unmapped;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_collapse_huge_page {
|
|
+ struct trace_entry ent;
|
|
+ struct mm_struct *mm;
|
|
+ int isolated;
|
|
+ int status;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_collapse_huge_page_isolate {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ int none_or_zero;
|
|
+ int referenced;
|
|
+ bool writable;
|
|
+ int status;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mm_collapse_huge_page_swapin {
|
|
+ struct trace_entry ent;
|
|
+ struct mm_struct *mm;
|
|
+ int swapped_in;
|
|
+ int referenced;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mm_khugepaged_scan_pmd {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_collapse_huge_page {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_collapse_huge_page_isolate {};
|
|
+
|
|
+struct trace_event_data_offsets_mm_collapse_huge_page_swapin {};
|
|
+
|
|
+struct mm_slot___2 {
|
|
+ struct hlist_node hash;
|
|
+ struct list_head mm_node;
|
|
+ struct mm_struct *mm;
|
|
+};
|
|
+
|
|
+struct khugepaged_scan {
|
|
+ struct list_head mm_head;
|
|
+ struct mm_slot___2 *mm_slot;
|
|
+ long unsigned int address;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_tree_per_node {
|
|
+ struct rb_root rb_root;
|
|
+ struct rb_node *rb_rightmost;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_tree {
|
|
+ struct mem_cgroup_tree_per_node *rb_tree_per_node[1024];
|
|
+};
|
|
+
|
|
+struct mem_cgroup_eventfd_list {
|
|
+ struct list_head list;
|
|
+ struct eventfd_ctx *eventfd;
|
|
+};
|
|
+
|
|
+struct mem_cgroup_event {
|
|
+ struct mem_cgroup *memcg;
|
|
+ struct eventfd_ctx *eventfd;
|
|
+ struct list_head list;
|
|
+ int (*register_event)(struct mem_cgroup *, struct eventfd_ctx *, const char *);
|
|
+ void (*unregister_event)(struct mem_cgroup *, struct eventfd_ctx *);
|
|
+ poll_table pt;
|
|
+ wait_queue_head_t *wqh;
|
|
+ wait_queue_entry_t wait;
|
|
+ struct work_struct remove;
|
|
+};
|
|
+
|
|
+struct move_charge_struct {
|
|
+ spinlock_t lock;
|
|
+ struct mm_struct *mm;
|
|
+ struct mem_cgroup *from;
|
|
+ struct mem_cgroup *to;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int precharge;
|
|
+ long unsigned int moved_charge;
|
|
+ long unsigned int moved_swap;
|
|
+ struct task_struct *moving_task;
|
|
+ wait_queue_head_t waitq;
|
|
+};
|
|
+
|
|
+enum res_type {
|
|
+ _MEM = 0,
|
|
+ _MEMSWAP = 1,
|
|
+ _OOM_TYPE = 2,
|
|
+ _KMEM = 3,
|
|
+ _TCP = 4,
|
|
+};
|
|
+
|
|
+struct oom_wait_info {
|
|
+ struct mem_cgroup *memcg;
|
|
+ wait_queue_entry_t wait;
|
|
+};
|
|
+
|
|
+enum oom_status {
|
|
+ OOM_SUCCESS = 0,
|
|
+ OOM_FAILED = 1,
|
|
+ OOM_ASYNC = 2,
|
|
+ OOM_SKIPPED = 3,
|
|
+};
|
|
+
|
|
+struct memcg_stock_pcp {
|
|
+ struct mem_cgroup *cached;
|
|
+ unsigned int nr_pages;
|
|
+ struct work_struct work;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct memcg_kmem_cache_create_work {
|
|
+ struct mem_cgroup *memcg;
|
|
+ struct kmem_cache *cachep;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ RES_USAGE = 0,
|
|
+ RES_LIMIT = 1,
|
|
+ RES_MAX_USAGE = 2,
|
|
+ RES_FAILCNT = 3,
|
|
+ RES_SOFT_LIMIT = 4,
|
|
+};
|
|
+
|
|
+union mc_target {
|
|
+ struct page *page;
|
|
+ swp_entry_t ent;
|
|
+};
|
|
+
|
|
+enum mc_target_type {
|
|
+ MC_TARGET_NONE = 0,
|
|
+ MC_TARGET_PAGE = 1,
|
|
+ MC_TARGET_SWAP = 2,
|
|
+ MC_TARGET_DEVICE = 3,
|
|
+};
|
|
+
|
|
+struct uncharge_gather {
|
|
+ struct mem_cgroup *memcg;
|
|
+ long unsigned int pgpgout;
|
|
+ long unsigned int nr_anon;
|
|
+ long unsigned int nr_file;
|
|
+ long unsigned int nr_kmem;
|
|
+ long unsigned int nr_huge;
|
|
+ long unsigned int nr_shmem;
|
|
+ struct page *dummy_page;
|
|
+};
|
|
+
|
|
+struct numa_stat {
|
|
+ const char *name;
|
|
+ unsigned int lru_mask;
|
|
+};
|
|
+
|
|
+typedef long int pao_T_____6;
|
|
+
|
|
+typedef long int pto_T_____28;
|
|
+
|
|
+enum vmpressure_levels {
|
|
+ VMPRESSURE_LOW = 0,
|
|
+ VMPRESSURE_MEDIUM = 1,
|
|
+ VMPRESSURE_CRITICAL = 2,
|
|
+ VMPRESSURE_NUM_LEVELS = 3,
|
|
+};
|
|
+
|
|
+enum vmpressure_modes {
|
|
+ VMPRESSURE_NO_PASSTHROUGH = 0,
|
|
+ VMPRESSURE_HIERARCHY = 1,
|
|
+ VMPRESSURE_LOCAL = 2,
|
|
+ VMPRESSURE_NUM_MODES = 3,
|
|
+};
|
|
+
|
|
+struct vmpressure_event {
|
|
+ struct eventfd_ctx *efd;
|
|
+ enum vmpressure_levels level;
|
|
+ enum vmpressure_modes mode;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct swap_cgroup_ctrl {
|
|
+ struct page **map;
|
|
+ long unsigned int length;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct swap_cgroup {
|
|
+ short unsigned int id;
|
|
+};
|
|
+
|
|
+struct hugetlb_cgroup___2 {
|
|
+ struct cgroup_subsys_state___2 css;
|
|
+ struct page_counter hugepage[2];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ RES_USAGE___2 = 0,
|
|
+ RES_LIMIT___2 = 1,
|
|
+ RES_MAX_USAGE___2 = 2,
|
|
+ RES_FAILCNT___2 = 3,
|
|
+};
|
|
+
|
|
+enum mf_result {
|
|
+ MF_IGNORED = 0,
|
|
+ MF_FAILED = 1,
|
|
+ MF_DELAYED = 2,
|
|
+ MF_RECOVERED = 3,
|
|
+};
|
|
+
|
|
+enum mf_action_page_type {
|
|
+ MF_MSG_KERNEL = 0,
|
|
+ MF_MSG_KERNEL_HIGH_ORDER = 1,
|
|
+ MF_MSG_SLAB = 2,
|
|
+ MF_MSG_DIFFERENT_COMPOUND = 3,
|
|
+ MF_MSG_POISONED_HUGE = 4,
|
|
+ MF_MSG_HUGE = 5,
|
|
+ MF_MSG_FREE_HUGE = 6,
|
|
+ MF_MSG_NON_PMD_HUGE = 7,
|
|
+ MF_MSG_UNMAP_FAILED = 8,
|
|
+ MF_MSG_DIRTY_SWAPCACHE = 9,
|
|
+ MF_MSG_CLEAN_SWAPCACHE = 10,
|
|
+ MF_MSG_DIRTY_MLOCKED_LRU = 11,
|
|
+ MF_MSG_CLEAN_MLOCKED_LRU = 12,
|
|
+ MF_MSG_DIRTY_UNEVICTABLE_LRU = 13,
|
|
+ MF_MSG_CLEAN_UNEVICTABLE_LRU = 14,
|
|
+ MF_MSG_DIRTY_LRU = 15,
|
|
+ MF_MSG_CLEAN_LRU = 16,
|
|
+ MF_MSG_TRUNCATED_LRU = 17,
|
|
+ MF_MSG_BUDDY = 18,
|
|
+ MF_MSG_BUDDY_2ND = 19,
|
|
+ MF_MSG_DAX = 20,
|
|
+ MF_MSG_UNKNOWN = 21,
|
|
+};
|
|
+
|
|
+struct __kfifo {
|
|
+ unsigned int in;
|
|
+ unsigned int out;
|
|
+ unsigned int mask;
|
|
+ unsigned int esize;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct to_kill {
|
|
+ struct list_head nd;
|
|
+ struct task_struct *tsk;
|
|
+ long unsigned int addr;
|
|
+ short int size_shift;
|
|
+};
|
|
+
|
|
+struct page_state {
|
|
+ long unsigned int mask;
|
|
+ long unsigned int res;
|
|
+ enum mf_action_page_type type;
|
|
+ int (*action)(struct page *, long unsigned int);
|
|
+};
|
|
+
|
|
+struct memory_failure_entry {
|
|
+ long unsigned int pfn;
|
|
+ int flags;
|
|
+};
|
|
+
|
|
+struct memory_failure_cpu {
|
|
+ struct {
|
|
+ union {
|
|
+ struct __kfifo kfifo;
|
|
+ struct memory_failure_entry *type;
|
|
+ const struct memory_failure_entry *const_type;
|
|
+ char (*rectype)[0];
|
|
+ struct memory_failure_entry *ptr;
|
|
+ const struct memory_failure_entry *ptr_const;
|
|
+ };
|
|
+ struct memory_failure_entry buf[16];
|
|
+ } fifo;
|
|
+ spinlock_t lock;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+struct cleancache_filekey {
|
|
+ union {
|
|
+ ino_t ino;
|
|
+ __u32 fh[6];
|
|
+ u32 key[6];
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct cleancache_ops {
|
|
+ int (*init_fs)(size_t);
|
|
+ int (*init_shared_fs)(uuid_t *, size_t);
|
|
+ int (*get_page)(int, struct cleancache_filekey, long unsigned int, struct page___2 *);
|
|
+ void (*put_page)(int, struct cleancache_filekey, long unsigned int, struct page___2 *);
|
|
+ void (*invalidate_page)(int, struct cleancache_filekey, long unsigned int);
|
|
+ void (*invalidate_inode)(int, struct cleancache_filekey);
|
|
+ void (*invalidate_fs)(int);
|
|
+};
|
|
+
|
|
+struct memory_isolate_notify {
|
|
+ long unsigned int start_pfn;
|
|
+ unsigned int nr_pages;
|
|
+ unsigned int pages_found;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_test_pages_isolated {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int start_pfn;
|
|
+ long unsigned int end_pfn;
|
|
+ long unsigned int fin_pfn;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_test_pages_isolated {};
|
|
+
|
|
+struct zpool_driver;
|
|
+
|
|
+struct zpool {
|
|
+ struct zpool_driver *driver;
|
|
+ void *pool;
|
|
+ const struct zpool_ops *ops;
|
|
+ bool evictable;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct zpool_driver {
|
|
+ char *type;
|
|
+ struct module___2 *owner;
|
|
+ atomic_t refcount;
|
|
+ struct list_head list;
|
|
+ void * (*create)(const char *, gfp_t, const struct zpool_ops *, struct zpool *);
|
|
+ void (*destroy)(void *);
|
|
+ int (*malloc)(void *, size_t, gfp_t, long unsigned int *);
|
|
+ void (*free)(void *, long unsigned int);
|
|
+ int (*shrink)(void *, unsigned int, unsigned int *);
|
|
+ void * (*map)(void *, long unsigned int, enum zpool_mapmode);
|
|
+ void (*unmap)(void *, long unsigned int);
|
|
+ u64 (*total_size)(void *);
|
|
+};
|
|
+
|
|
+struct zbud_pool;
|
|
+
|
|
+struct zbud_ops {
|
|
+ int (*evict)(struct zbud_pool *, long unsigned int);
|
|
+};
|
|
+
|
|
+struct zbud_pool {
|
|
+ spinlock_t lock;
|
|
+ struct list_head unbuddied[63];
|
|
+ struct list_head buddied;
|
|
+ struct list_head lru;
|
|
+ u64 pages_nr;
|
|
+ const struct zbud_ops *ops;
|
|
+ struct zpool *zpool;
|
|
+ const struct zpool_ops *zpool_ops;
|
|
+};
|
|
+
|
|
+struct zbud_header {
|
|
+ struct list_head buddy;
|
|
+ struct list_head lru;
|
|
+ unsigned int first_chunks;
|
|
+ unsigned int last_chunks;
|
|
+ bool under_reclaim;
|
|
+};
|
|
+
|
|
+enum buddy {
|
|
+ FIRST = 0,
|
|
+ LAST = 1,
|
|
+};
|
|
+
|
|
+enum zs_mapmode {
|
|
+ ZS_MM_RW = 0,
|
|
+ ZS_MM_RO = 1,
|
|
+ ZS_MM_WO = 2,
|
|
+};
|
|
+
|
|
+struct zs_pool_stats {
|
|
+ long unsigned int pages_compacted;
|
|
+};
|
|
+
|
|
+enum fullness_group {
|
|
+ ZS_EMPTY = 0,
|
|
+ ZS_ALMOST_EMPTY = 1,
|
|
+ ZS_ALMOST_FULL = 2,
|
|
+ ZS_FULL = 3,
|
|
+ NR_ZS_FULLNESS = 4,
|
|
+};
|
|
+
|
|
+enum zs_stat_type {
|
|
+ CLASS_EMPTY = 0,
|
|
+ CLASS_ALMOST_EMPTY = 1,
|
|
+ CLASS_ALMOST_FULL = 2,
|
|
+ CLASS_FULL = 3,
|
|
+ OBJ_ALLOCATED = 4,
|
|
+ OBJ_USED = 5,
|
|
+ NR_ZS_STAT_TYPE = 6,
|
|
+};
|
|
+
|
|
+struct zs_size_stat {
|
|
+ long unsigned int objs[6];
|
|
+};
|
|
+
|
|
+struct size_class {
|
|
+ spinlock_t lock;
|
|
+ struct list_head fullness_list[4];
|
|
+ int size;
|
|
+ int objs_per_zspage;
|
|
+ int pages_per_zspage;
|
|
+ unsigned int index;
|
|
+ struct zs_size_stat stats;
|
|
+};
|
|
+
|
|
+struct link_free {
|
|
+ union {
|
|
+ long unsigned int next;
|
|
+ long unsigned int handle;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct zs_pool {
|
|
+ const char *name;
|
|
+ struct size_class *size_class[255];
|
|
+ struct kmem_cache *handle_cachep;
|
|
+ struct kmem_cache *zspage_cachep;
|
|
+ atomic_long_t pages_allocated;
|
|
+ struct zs_pool_stats stats;
|
|
+ struct shrinker shrinker;
|
|
+ struct dentry___2 *stat_dentry;
|
|
+ struct inode___2 *inode;
|
|
+ struct work_struct free_work;
|
|
+ struct wait_queue_head migration_wait;
|
|
+ atomic_long_t isolated_pages;
|
|
+ bool destroying;
|
|
+};
|
|
+
|
|
+struct zspage {
|
|
+ struct {
|
|
+ unsigned int fullness: 2;
|
|
+ unsigned int class: 9;
|
|
+ unsigned int isolated: 3;
|
|
+ unsigned int magic: 8;
|
|
+ };
|
|
+ unsigned int inuse;
|
|
+ unsigned int freeobj;
|
|
+ struct page___2 *first_page;
|
|
+ struct list_head list;
|
|
+ rwlock_t lock;
|
|
+};
|
|
+
|
|
+struct mapping_area {
|
|
+ char *vm_buf;
|
|
+ char *vm_addr;
|
|
+ enum zs_mapmode vm_mm;
|
|
+};
|
|
+
|
|
+struct zs_compact_control {
|
|
+ struct page___2 *s_page;
|
|
+ struct page___2 *d_page;
|
|
+ int obj_idx;
|
|
+};
|
|
+
|
|
+struct balloon_dev_info {
|
|
+ long unsigned int isolated_pages;
|
|
+ spinlock_t pages_lock;
|
|
+ struct list_head pages;
|
|
+ int (*migratepage)(struct balloon_dev_info *, struct page___2 *, struct page___2 *, enum migrate_mode);
|
|
+ struct inode___2 *inode;
|
|
+};
|
|
+
|
|
+struct frame_vector {
|
|
+ unsigned int nr_allocated;
|
|
+ unsigned int nr_frames;
|
|
+ bool got_ref;
|
|
+ bool is_pfns;
|
|
+ void *ptrs[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BAD_STACK = -1,
|
|
+ NOT_STACK = 0,
|
|
+ GOOD_FRAME = 1,
|
|
+ GOOD_STACK = 2,
|
|
+};
|
|
+
|
|
+struct hmm {
|
|
+ struct mm_struct *mm;
|
|
+ spinlock_t lock;
|
|
+ atomic_t sequence;
|
|
+ struct list_head ranges;
|
|
+ struct list_head mirrors;
|
|
+ struct mmu_notifier mmu_notifier;
|
|
+ struct rw_semaphore mirrors_sem;
|
|
+};
|
|
+
|
|
+enum hmm_pfn_flag_e {
|
|
+ HMM_PFN_VALID = 0,
|
|
+ HMM_PFN_WRITE = 1,
|
|
+ HMM_PFN_DEVICE_PRIVATE = 2,
|
|
+ HMM_PFN_FLAG_MAX = 3,
|
|
+};
|
|
+
|
|
+enum hmm_pfn_value_e {
|
|
+ HMM_PFN_ERROR = 0,
|
|
+ HMM_PFN_NONE = 1,
|
|
+ HMM_PFN_SPECIAL = 2,
|
|
+ HMM_PFN_VALUE_MAX = 3,
|
|
+};
|
|
+
|
|
+struct hmm_range {
|
|
+ struct vm_area_struct *vma;
|
|
+ struct list_head list;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+ uint64_t *pfns;
|
|
+ const uint64_t *flags;
|
|
+ const uint64_t *values;
|
|
+ uint8_t pfn_shift;
|
|
+ bool valid;
|
|
+};
|
|
+
|
|
+enum hmm_update_type {
|
|
+ HMM_UPDATE_INVALIDATE = 0,
|
|
+};
|
|
+
|
|
+struct hmm_mirror;
|
|
+
|
|
+struct hmm_mirror_ops {
|
|
+ void (*release)(struct hmm_mirror *);
|
|
+ void (*sync_cpu_device_pagetables)(struct hmm_mirror *, enum hmm_update_type, long unsigned int, long unsigned int);
|
|
+};
|
|
+
|
|
+struct hmm_mirror {
|
|
+ struct hmm *hmm;
|
|
+ const struct hmm_mirror_ops *ops;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct hmm_devmem;
|
|
+
|
|
+struct hmm_devmem_ops {
|
|
+ void (*free)(struct hmm_devmem *, struct page *);
|
|
+ int (*fault)(struct hmm_devmem *, struct vm_area_struct *, long unsigned int, const struct page *, unsigned int, pmd_t *);
|
|
+};
|
|
+
|
|
+struct hmm_devmem {
|
|
+ struct completion completion;
|
|
+ long unsigned int pfn_first;
|
|
+ long unsigned int pfn_last;
|
|
+ struct resource *resource;
|
|
+ struct device *device;
|
|
+ struct dev_pagemap pagemap;
|
|
+ const struct hmm_devmem_ops *ops;
|
|
+ struct percpu_ref ref;
|
|
+};
|
|
+
|
|
+struct hmm_device {
|
|
+ struct device device;
|
|
+ unsigned int minor;
|
|
+};
|
|
+
|
|
+struct hmm_vma_walk {
|
|
+ struct hmm_range *range;
|
|
+ long unsigned int last;
|
|
+ bool fault;
|
|
+ bool block;
|
|
+};
|
|
+
|
|
+struct hugetlbfs_inode_info {
|
|
+ struct shared_policy policy;
|
|
+ struct inode___2 vfs_inode;
|
|
+ unsigned int seals;
|
|
+};
|
|
+
|
|
+typedef s32 compat_off_t;
|
|
+
|
|
+struct open_flags {
|
|
+ int open_flag;
|
|
+ umode_t mode;
|
|
+ int acc_mode;
|
|
+ int intent;
|
|
+ int lookup_flags;
|
|
+};
|
|
+
|
|
+typedef __kernel_long_t __kernel_off_t;
|
|
+
|
|
+typedef __kernel_off_t off_t;
|
|
+
|
|
+struct file_dedupe_range_info {
|
|
+ __s64 dest_fd;
|
|
+ __u64 dest_offset;
|
|
+ __u64 bytes_deduped;
|
|
+ __s32 status;
|
|
+ __u32 reserved;
|
|
+};
|
|
+
|
|
+struct file_dedupe_range {
|
|
+ __u64 src_offset;
|
|
+ __u64 src_length;
|
|
+ __u16 dest_count;
|
|
+ __u16 reserved1;
|
|
+ __u32 reserved2;
|
|
+ struct file_dedupe_range_info info[0];
|
|
+};
|
|
+
|
|
+typedef int __kernel_rwf_t;
|
|
+
|
|
+typedef __kernel_rwf_t rwf_t;
|
|
+
|
|
+struct fs_file_read_ctx {
|
|
+ const unsigned char *name;
|
|
+ unsigned int f_mode;
|
|
+ unsigned int rsvd;
|
|
+ unsigned int clr_f_mode;
|
|
+ unsigned int set_f_mode;
|
|
+ long unsigned int key;
|
|
+ long long int i_size;
|
|
+ long long int prev_index;
|
|
+ long long int index;
|
|
+};
|
|
+
|
|
+typedef s32 compat_ssize_t;
|
|
+
|
|
+typedef int filler_t___2(void *, struct page___2 *);
|
|
+
|
|
+struct kobj_map;
|
|
+
|
|
+struct char_device_struct {
|
|
+ struct char_device_struct *next;
|
|
+ unsigned int major;
|
|
+ unsigned int baseminor;
|
|
+ int minorct;
|
|
+ char name[64];
|
|
+ struct cdev *cdev;
|
|
+};
|
|
+
|
|
+struct stat {
|
|
+ __kernel_ulong_t st_dev;
|
|
+ __kernel_ulong_t st_ino;
|
|
+ __kernel_ulong_t st_nlink;
|
|
+ unsigned int st_mode;
|
|
+ unsigned int st_uid;
|
|
+ unsigned int st_gid;
|
|
+ unsigned int __pad0;
|
|
+ __kernel_ulong_t st_rdev;
|
|
+ __kernel_long_t st_size;
|
|
+ __kernel_long_t st_blksize;
|
|
+ __kernel_long_t st_blocks;
|
|
+ __kernel_ulong_t st_atime;
|
|
+ __kernel_ulong_t st_atime_nsec;
|
|
+ __kernel_ulong_t st_mtime;
|
|
+ __kernel_ulong_t st_mtime_nsec;
|
|
+ __kernel_ulong_t st_ctime;
|
|
+ __kernel_ulong_t st_ctime_nsec;
|
|
+ __kernel_long_t __unused[3];
|
|
+};
|
|
+
|
|
+struct __old_kernel_stat {
|
|
+ short unsigned int st_dev;
|
|
+ short unsigned int st_ino;
|
|
+ short unsigned int st_mode;
|
|
+ short unsigned int st_nlink;
|
|
+ short unsigned int st_uid;
|
|
+ short unsigned int st_gid;
|
|
+ short unsigned int st_rdev;
|
|
+ unsigned int st_size;
|
|
+ unsigned int st_atime;
|
|
+ unsigned int st_mtime;
|
|
+ unsigned int st_ctime;
|
|
+};
|
|
+
|
|
+struct statx_timestamp {
|
|
+ __s64 tv_sec;
|
|
+ __u32 tv_nsec;
|
|
+ __s32 __reserved;
|
|
+};
|
|
+
|
|
+struct statx {
|
|
+ __u32 stx_mask;
|
|
+ __u32 stx_blksize;
|
|
+ __u64 stx_attributes;
|
|
+ __u32 stx_nlink;
|
|
+ __u32 stx_uid;
|
|
+ __u32 stx_gid;
|
|
+ __u16 stx_mode;
|
|
+ __u16 __spare0[1];
|
|
+ __u64 stx_ino;
|
|
+ __u64 stx_size;
|
|
+ __u64 stx_blocks;
|
|
+ __u64 stx_attributes_mask;
|
|
+ struct statx_timestamp stx_atime;
|
|
+ struct statx_timestamp stx_btime;
|
|
+ struct statx_timestamp stx_ctime;
|
|
+ struct statx_timestamp stx_mtime;
|
|
+ __u32 stx_rdev_major;
|
|
+ __u32 stx_rdev_minor;
|
|
+ __u32 stx_dev_major;
|
|
+ __u32 stx_dev_minor;
|
|
+ __u64 __spare2[14];
|
|
+};
|
|
+
|
|
+typedef u16 __compat_uid_t;
|
|
+
|
|
+typedef u16 __compat_gid_t;
|
|
+
|
|
+typedef u16 compat_mode_t;
|
|
+
|
|
+typedef u32 compat_ino_t;
|
|
+
|
|
+typedef u16 compat_dev_t;
|
|
+
|
|
+typedef u16 compat_nlink_t;
|
|
+
|
|
+struct compat_stat {
|
|
+ compat_dev_t st_dev;
|
|
+ u16 __pad1;
|
|
+ compat_ino_t st_ino;
|
|
+ compat_mode_t st_mode;
|
|
+ compat_nlink_t st_nlink;
|
|
+ __compat_uid_t st_uid;
|
|
+ __compat_gid_t st_gid;
|
|
+ compat_dev_t st_rdev;
|
|
+ u16 __pad2;
|
|
+ u32 st_size;
|
|
+ u32 st_blksize;
|
|
+ u32 st_blocks;
|
|
+ u32 st_atime;
|
|
+ u32 st_atime_nsec;
|
|
+ u32 st_mtime;
|
|
+ u32 st_mtime_nsec;
|
|
+ u32 st_ctime;
|
|
+ u32 st_ctime_nsec;
|
|
+ u32 __unused4;
|
|
+ u32 __unused5;
|
|
+};
|
|
+
|
|
+typedef short unsigned int ushort;
|
|
+
|
|
+struct user_arg_ptr {
|
|
+ bool is_compat;
|
|
+ union {
|
|
+ const char * const *native;
|
|
+ const compat_uptr_t *compat;
|
|
+ } ptr;
|
|
+};
|
|
+
|
|
+enum inode_i_mutex_lock_class {
|
|
+ I_MUTEX_NORMAL = 0,
|
|
+ I_MUTEX_PARENT = 1,
|
|
+ I_MUTEX_CHILD = 2,
|
|
+ I_MUTEX_XATTR = 3,
|
|
+ I_MUTEX_NONDIR2 = 4,
|
|
+ I_MUTEX_PARENT2 = 5,
|
|
+};
|
|
+
|
|
+struct name_snapshot {
|
|
+ const unsigned char *name;
|
|
+ unsigned char inline_name[32];
|
|
+};
|
|
+
|
|
+struct saved {
|
|
+ struct path___2 link;
|
|
+ struct delayed_call done;
|
|
+ const char *name;
|
|
+ unsigned int seq;
|
|
+};
|
|
+
|
|
+struct nameidata {
|
|
+ struct path___2 path;
|
|
+ struct qstr last;
|
|
+ struct path___2 root;
|
|
+ struct inode___2 *inode;
|
|
+ unsigned int flags;
|
|
+ unsigned int seq;
|
|
+ unsigned int m_seq;
|
|
+ int last_type;
|
|
+ unsigned int depth;
|
|
+ int total_link_count;
|
|
+ struct saved *stack;
|
|
+ struct saved internal[2];
|
|
+ struct filename *name;
|
|
+ struct nameidata *saved;
|
|
+ struct inode___2 *link_inode;
|
|
+ unsigned int root_seq;
|
|
+ int dfd;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LAST_NORM = 0,
|
|
+ LAST_ROOT = 1,
|
|
+ LAST_DOT = 2,
|
|
+ LAST_DOTDOT = 3,
|
|
+ LAST_BIND = 4,
|
|
+};
|
|
+
|
|
+struct mount;
|
|
+
|
|
+struct mnt_namespace {
|
|
+ atomic_t count;
|
|
+ struct ns_common___2 ns;
|
|
+ struct mount *root;
|
|
+ struct list_head list;
|
|
+ struct user_namespace___2 *user_ns;
|
|
+ struct ucounts___2 *ucounts;
|
|
+ u64 seq;
|
|
+ wait_queue_head_t poll;
|
|
+ u64 event;
|
|
+ unsigned int mounts;
|
|
+ unsigned int pending_mounts;
|
|
+};
|
|
+
|
|
+struct mnt_pcp;
|
|
+
|
|
+struct mountpoint;
|
|
+
|
|
+struct mount {
|
|
+ struct hlist_node mnt_hash;
|
|
+ struct mount *mnt_parent;
|
|
+ struct dentry___2 *mnt_mountpoint;
|
|
+ struct vfsmount___2 mnt;
|
|
+ union {
|
|
+ struct callback_head mnt_rcu;
|
|
+ struct llist_node mnt_llist;
|
|
+ };
|
|
+ struct mnt_pcp *mnt_pcp;
|
|
+ struct list_head mnt_mounts;
|
|
+ struct list_head mnt_child;
|
|
+ struct list_head mnt_instance;
|
|
+ const char *mnt_devname;
|
|
+ struct list_head mnt_list;
|
|
+ struct list_head mnt_expire;
|
|
+ struct list_head mnt_share;
|
|
+ struct list_head mnt_slave_list;
|
|
+ struct list_head mnt_slave;
|
|
+ struct mount *mnt_master;
|
|
+ struct mnt_namespace *mnt_ns;
|
|
+ struct mountpoint *mnt_mp;
|
|
+ struct hlist_node mnt_mp_list;
|
|
+ struct list_head mnt_umounting;
|
|
+ struct fsnotify_mark_connector *mnt_fsnotify_marks;
|
|
+ __u32 mnt_fsnotify_mask;
|
|
+ int mnt_id;
|
|
+ int mnt_group_id;
|
|
+ int mnt_expiry_mark;
|
|
+ struct hlist_head mnt_pins;
|
|
+ struct fs_pin mnt_umount;
|
|
+ struct dentry___2 *mnt_ex_mountpoint;
|
|
+};
|
|
+
|
|
+struct mnt_pcp {
|
|
+ int mnt_count;
|
|
+ int mnt_writers;
|
|
+};
|
|
+
|
|
+struct mountpoint {
|
|
+ struct hlist_node m_hash;
|
|
+ struct dentry___2 *m_dentry;
|
|
+ struct hlist_head m_list;
|
|
+ int m_count;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ WALK_FOLLOW = 1,
|
|
+ WALK_MORE = 2,
|
|
+};
|
|
+
|
|
+struct word_at_a_time {
|
|
+ const long unsigned int one_bits;
|
|
+ const long unsigned int high_bits;
|
|
+};
|
|
+
|
|
+struct f_owner_ex {
|
|
+ int type;
|
|
+ __kernel_pid_t pid;
|
|
+};
|
|
+
|
|
+struct flock {
|
|
+ short int l_type;
|
|
+ short int l_whence;
|
|
+ __kernel_off_t l_start;
|
|
+ __kernel_off_t l_len;
|
|
+ __kernel_pid_t l_pid;
|
|
+};
|
|
+
|
|
+struct compat_flock {
|
|
+ short int l_type;
|
|
+ short int l_whence;
|
|
+ compat_off_t l_start;
|
|
+ compat_off_t l_len;
|
|
+ compat_pid_t l_pid;
|
|
+};
|
|
+
|
|
+struct compat_flock64 {
|
|
+ short int l_type;
|
|
+ short int l_whence;
|
|
+ compat_loff_t l_start;
|
|
+ compat_loff_t l_len;
|
|
+ compat_pid_t l_pid;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct fiemap {
|
|
+ __u64 fm_start;
|
|
+ __u64 fm_length;
|
|
+ __u32 fm_flags;
|
|
+ __u32 fm_mapped_extents;
|
|
+ __u32 fm_extent_count;
|
|
+ __u32 fm_reserved;
|
|
+ struct fiemap_extent fm_extents[0];
|
|
+};
|
|
+
|
|
+struct file_clone_range {
|
|
+ __s64 src_fd;
|
|
+ __u64 src_offset;
|
|
+ __u64 src_length;
|
|
+ __u64 dest_offset;
|
|
+};
|
|
+
|
|
+typedef int get_block_t(struct inode___2 *, sector_t, struct buffer_head *, int);
|
|
+
|
|
+struct space_resv {
|
|
+ __s16 l_type;
|
|
+ __s16 l_whence;
|
|
+ __s64 l_start;
|
|
+ __s64 l_len;
|
|
+ __s32 l_sysid;
|
|
+ __u32 l_pid;
|
|
+ __s32 l_pad[4];
|
|
+};
|
|
+
|
|
+struct linux_dirent64 {
|
|
+ u64 d_ino;
|
|
+ s64 d_off;
|
|
+ short unsigned int d_reclen;
|
|
+ unsigned char d_type;
|
|
+ char d_name[0];
|
|
+};
|
|
+
|
|
+struct old_linux_dirent {
|
|
+ long unsigned int d_ino;
|
|
+ long unsigned int d_offset;
|
|
+ short unsigned int d_namlen;
|
|
+ char d_name[1];
|
|
+};
|
|
+
|
|
+struct readdir_callback {
|
|
+ struct dir_context ctx;
|
|
+ struct old_linux_dirent *dirent;
|
|
+ int result;
|
|
+};
|
|
+
|
|
+struct linux_dirent {
|
|
+ long unsigned int d_ino;
|
|
+ long unsigned int d_off;
|
|
+ short unsigned int d_reclen;
|
|
+ char d_name[1];
|
|
+};
|
|
+
|
|
+struct getdents_callback {
|
|
+ struct dir_context ctx;
|
|
+ struct linux_dirent *current_dir;
|
|
+ struct linux_dirent *previous;
|
|
+ int count;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+struct getdents_callback64 {
|
|
+ struct dir_context ctx;
|
|
+ struct linux_dirent64 *current_dir;
|
|
+ struct linux_dirent64 *previous;
|
|
+ int count;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+struct compat_old_linux_dirent {
|
|
+ compat_ulong_t d_ino;
|
|
+ compat_ulong_t d_offset;
|
|
+ short unsigned int d_namlen;
|
|
+ char d_name[1];
|
|
+};
|
|
+
|
|
+struct compat_readdir_callback {
|
|
+ struct dir_context ctx;
|
|
+ struct compat_old_linux_dirent *dirent;
|
|
+ int result;
|
|
+};
|
|
+
|
|
+struct compat_linux_dirent {
|
|
+ compat_ulong_t d_ino;
|
|
+ compat_ulong_t d_off;
|
|
+ short unsigned int d_reclen;
|
|
+ char d_name[1];
|
|
+};
|
|
+
|
|
+struct compat_getdents_callback {
|
|
+ struct dir_context ctx;
|
|
+ struct compat_linux_dirent *current_dir;
|
|
+ struct compat_linux_dirent *previous;
|
|
+ int count;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int fds_bits[16];
|
|
+} __kernel_fd_set;
|
|
+
|
|
+typedef __kernel_fd_set fd_set;
|
|
+
|
|
+struct poll_table_entry {
|
|
+ struct file *filp;
|
|
+ __poll_t key;
|
|
+ wait_queue_entry_t wait;
|
|
+ wait_queue_head_t *wait_address;
|
|
+};
|
|
+
|
|
+struct poll_table_page;
|
|
+
|
|
+struct poll_wqueues {
|
|
+ poll_table pt;
|
|
+ struct poll_table_page *table;
|
|
+ struct task_struct *polling_task;
|
|
+ int triggered;
|
|
+ int error;
|
|
+ int inline_index;
|
|
+ struct poll_table_entry inline_entries[9];
|
|
+};
|
|
+
|
|
+struct poll_table_page {
|
|
+ struct poll_table_page *next;
|
|
+ struct poll_table_entry *entry;
|
|
+ struct poll_table_entry entries[0];
|
|
+};
|
|
+
|
|
+enum poll_time_type {
|
|
+ PT_TIMEVAL = 0,
|
|
+ PT_OLD_TIMEVAL = 1,
|
|
+ PT_TIMESPEC = 2,
|
|
+ PT_OLD_TIMESPEC = 3,
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int *in;
|
|
+ long unsigned int *out;
|
|
+ long unsigned int *ex;
|
|
+ long unsigned int *res_in;
|
|
+ long unsigned int *res_out;
|
|
+ long unsigned int *res_ex;
|
|
+} fd_set_bits;
|
|
+
|
|
+struct poll_list {
|
|
+ struct poll_list *next;
|
|
+ int len;
|
|
+ struct pollfd entries[0];
|
|
+};
|
|
+
|
|
+struct compat_sel_arg_struct {
|
|
+ compat_ulong_t n;
|
|
+ compat_uptr_t inp;
|
|
+ compat_uptr_t outp;
|
|
+ compat_uptr_t exp;
|
|
+ compat_uptr_t tvp;
|
|
+};
|
|
+
|
|
+enum dentry_d_lock_class {
|
|
+ DENTRY_D_LOCK_NORMAL = 0,
|
|
+ DENTRY_D_LOCK_NESTED = 1,
|
|
+};
|
|
+
|
|
+struct external_name {
|
|
+ union {
|
|
+ atomic_t count;
|
|
+ struct callback_head head;
|
|
+ } u;
|
|
+ unsigned char name[0];
|
|
+};
|
|
+
|
|
+enum d_walk_ret {
|
|
+ D_WALK_CONTINUE = 0,
|
|
+ D_WALK_QUIT = 1,
|
|
+ D_WALK_NORETRY = 2,
|
|
+ D_WALK_SKIP = 3,
|
|
+};
|
|
+
|
|
+struct check_mount {
|
|
+ struct vfsmount___2 *mnt;
|
|
+ unsigned int mounted;
|
|
+};
|
|
+
|
|
+struct select_data {
|
|
+ struct dentry___2 *start;
|
|
+ struct list_head dispose;
|
|
+ int found;
|
|
+};
|
|
+
|
|
+enum file_time_flags {
|
|
+ S_ATIME = 1,
|
|
+ S_MTIME = 2,
|
|
+ S_CTIME = 4,
|
|
+ S_VERSION = 8,
|
|
+};
|
|
+
|
|
+struct mnt_namespace_wrapper {
|
|
+ struct mnt_namespace ns;
|
|
+ spinlock_t ns_lock;
|
|
+};
|
|
+
|
|
+struct proc_mounts {
|
|
+ struct mnt_namespace *ns;
|
|
+ struct path___2 root;
|
|
+ int (*show)(struct seq_file___2 *, struct vfsmount___2 *);
|
|
+ struct mount cursor;
|
|
+};
|
|
+
|
|
+enum umount_tree_flags {
|
|
+ UMOUNT_SYNC = 1,
|
|
+ UMOUNT_PROPAGATE = 2,
|
|
+ UMOUNT_CONNECTED = 4,
|
|
+};
|
|
+
|
|
+struct simple_transaction_argresp {
|
|
+ ssize_t size;
|
|
+ char data[0];
|
|
+};
|
|
+
|
|
+struct simple_attr {
|
|
+ int (*get)(void *, u64 *);
|
|
+ int (*set)(void *, u64);
|
|
+ char get_buf[24];
|
|
+ char set_buf[24];
|
|
+ void *data;
|
|
+ const char *fmt;
|
|
+ struct mutex mutex;
|
|
+};
|
|
+
|
|
+struct wb_completion {
|
|
+ atomic_t cnt;
|
|
+};
|
|
+
|
|
+struct wb_writeback_work {
|
|
+ long int nr_pages;
|
|
+ struct super_block___2 *sb;
|
|
+ enum writeback_sync_modes sync_mode;
|
|
+ unsigned int tagged_writepages: 1;
|
|
+ unsigned int for_kupdate: 1;
|
|
+ unsigned int range_cyclic: 1;
|
|
+ unsigned int for_background: 1;
|
|
+ unsigned int for_sync: 1;
|
|
+ unsigned int auto_free: 1;
|
|
+ enum wb_reason reason;
|
|
+ struct list_head list;
|
|
+ struct wb_completion *done;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_dirty_page {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long unsigned int ino;
|
|
+ long unsigned int index;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_dirty_inode_template {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long unsigned int ino;
|
|
+ long unsigned int state;
|
|
+ long unsigned int flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_write_inode_template {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long unsigned int ino;
|
|
+ int sync_mode;
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_work_class {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long int nr_pages;
|
|
+ dev_t sb_dev;
|
|
+ int sync_mode;
|
|
+ int for_kupdate;
|
|
+ int range_cyclic;
|
|
+ int for_background;
|
|
+ int reason;
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_pages_written {
|
|
+ struct trace_entry ent;
|
|
+ long int pages;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_class {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_bdi_register {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_wbc_class {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long int nr_to_write;
|
|
+ long int pages_skipped;
|
|
+ int sync_mode;
|
|
+ int for_kupdate;
|
|
+ int for_background;
|
|
+ int for_reclaim;
|
|
+ int range_cyclic;
|
|
+ long int range_start;
|
|
+ long int range_end;
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_queue_io {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long unsigned int older;
|
|
+ long int age;
|
|
+ int moved;
|
|
+ int reason;
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_global_dirty_state {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int nr_dirty;
|
|
+ long unsigned int nr_writeback;
|
|
+ long unsigned int nr_unstable;
|
|
+ long unsigned int background_thresh;
|
|
+ long unsigned int dirty_thresh;
|
|
+ long unsigned int dirty_limit;
|
|
+ long unsigned int nr_dirtied;
|
|
+ long unsigned int nr_written;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_bdi_dirty_ratelimit {
|
|
+ struct trace_entry ent;
|
|
+ char bdi[32];
|
|
+ long unsigned int write_bw;
|
|
+ long unsigned int avg_write_bw;
|
|
+ long unsigned int dirty_rate;
|
|
+ long unsigned int dirty_ratelimit;
|
|
+ long unsigned int task_ratelimit;
|
|
+ long unsigned int balanced_dirty_ratelimit;
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_balance_dirty_pages {
|
|
+ struct trace_entry ent;
|
|
+ char bdi[32];
|
|
+ long unsigned int limit;
|
|
+ long unsigned int setpoint;
|
|
+ long unsigned int dirty;
|
|
+ long unsigned int bdi_setpoint;
|
|
+ long unsigned int bdi_dirty;
|
|
+ long unsigned int dirty_ratelimit;
|
|
+ long unsigned int task_ratelimit;
|
|
+ unsigned int dirtied;
|
|
+ unsigned int dirtied_pause;
|
|
+ long unsigned int paused;
|
|
+ long int pause;
|
|
+ long unsigned int period;
|
|
+ long int think;
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_sb_inodes_requeue {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long unsigned int ino;
|
|
+ long unsigned int state;
|
|
+ long unsigned int dirtied_when;
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_congest_waited_template {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int usec_timeout;
|
|
+ unsigned int usec_delayed;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_single_inode_template {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long unsigned int ino;
|
|
+ long unsigned int state;
|
|
+ long unsigned int dirtied_when;
|
|
+ long unsigned int writeback_index;
|
|
+ long int nr_to_write;
|
|
+ long unsigned int wrote;
|
|
+ unsigned int cgroup_ino;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_writeback_inode_template {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ long unsigned int ino;
|
|
+ long unsigned int state;
|
|
+ __u16 mode;
|
|
+ long unsigned int dirtied_when;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_dirty_page {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_dirty_inode_template {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_write_inode_template {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_work_class {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_pages_written {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_class {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_bdi_register {};
|
|
+
|
|
+struct trace_event_data_offsets_wbc_class {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_queue_io {};
|
|
+
|
|
+struct trace_event_data_offsets_global_dirty_state {};
|
|
+
|
|
+struct trace_event_data_offsets_bdi_dirty_ratelimit {};
|
|
+
|
|
+struct trace_event_data_offsets_balance_dirty_pages {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_sb_inodes_requeue {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_congest_waited_template {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_single_inode_template {};
|
|
+
|
|
+struct trace_event_data_offsets_writeback_inode_template {};
|
|
+
|
|
+struct inode_switch_wbs_context {
|
|
+ struct inode___2 *inode;
|
|
+ struct bdi_writeback *new_wb;
|
|
+ struct callback_head callback_head;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+struct splice_desc {
|
|
+ size_t total_len;
|
|
+ unsigned int len;
|
|
+ unsigned int flags;
|
|
+ union {
|
|
+ void *userptr;
|
|
+ struct file___2 *file;
|
|
+ void *data;
|
|
+ } u;
|
|
+ loff_t pos;
|
|
+ loff_t *opos;
|
|
+ size_t num_spliced;
|
|
+ bool need_wakeup;
|
|
+};
|
|
+
|
|
+typedef int splice_actor(struct pipe_inode_info *, struct pipe_buffer *, struct splice_desc *);
|
|
+
|
|
+typedef int splice_direct_actor(struct pipe_inode_info *, struct splice_desc *);
|
|
+
|
|
+struct utimbuf {
|
|
+ __kernel_time_t actime;
|
|
+ __kernel_time_t modtime;
|
|
+};
|
|
+
|
|
+struct compat_utimbuf {
|
|
+ compat_time_t actime;
|
|
+ compat_time_t modtime;
|
|
+};
|
|
+
|
|
+typedef int __kernel_daddr_t;
|
|
+
|
|
+struct ustat {
|
|
+ __kernel_daddr_t f_tfree;
|
|
+ __kernel_ino_t f_tinode;
|
|
+ char f_fname[6];
|
|
+ char f_fpack[6];
|
|
+};
|
|
+
|
|
+typedef s32 compat_daddr_t;
|
|
+
|
|
+typedef __kernel_fsid_t compat_fsid_t;
|
|
+
|
|
+struct compat_statfs {
|
|
+ int f_type;
|
|
+ int f_bsize;
|
|
+ int f_blocks;
|
|
+ int f_bfree;
|
|
+ int f_bavail;
|
|
+ int f_files;
|
|
+ int f_ffree;
|
|
+ compat_fsid_t f_fsid;
|
|
+ int f_namelen;
|
|
+ int f_frsize;
|
|
+ int f_flags;
|
|
+ int f_spare[4];
|
|
+};
|
|
+
|
|
+struct compat_ustat {
|
|
+ compat_daddr_t f_tfree;
|
|
+ compat_ino_t f_tinode;
|
|
+ char f_fname[6];
|
|
+ char f_fpack[6];
|
|
+};
|
|
+
|
|
+struct statfs {
|
|
+ __kernel_long_t f_type;
|
|
+ __kernel_long_t f_bsize;
|
|
+ __kernel_long_t f_blocks;
|
|
+ __kernel_long_t f_bfree;
|
|
+ __kernel_long_t f_bavail;
|
|
+ __kernel_long_t f_files;
|
|
+ __kernel_long_t f_ffree;
|
|
+ __kernel_fsid_t f_fsid;
|
|
+ __kernel_long_t f_namelen;
|
|
+ __kernel_long_t f_frsize;
|
|
+ __kernel_long_t f_flags;
|
|
+ __kernel_long_t f_spare[4];
|
|
+};
|
|
+
|
|
+struct statfs64 {
|
|
+ __kernel_long_t f_type;
|
|
+ __kernel_long_t f_bsize;
|
|
+ __u64 f_blocks;
|
|
+ __u64 f_bfree;
|
|
+ __u64 f_bavail;
|
|
+ __u64 f_files;
|
|
+ __u64 f_ffree;
|
|
+ __kernel_fsid_t f_fsid;
|
|
+ __kernel_long_t f_namelen;
|
|
+ __kernel_long_t f_frsize;
|
|
+ __kernel_long_t f_flags;
|
|
+ __kernel_long_t f_spare[4];
|
|
+};
|
|
+
|
|
+struct compat_statfs64 {
|
|
+ __u32 f_type;
|
|
+ __u32 f_bsize;
|
|
+ __u64 f_blocks;
|
|
+ __u64 f_bfree;
|
|
+ __u64 f_bavail;
|
|
+ __u64 f_files;
|
|
+ __u64 f_ffree;
|
|
+ __kernel_fsid_t f_fsid;
|
|
+ __u32 f_namelen;
|
|
+ __u32 f_frsize;
|
|
+ __u32 f_flags;
|
|
+ __u32 f_spare[4];
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef struct ns_common___2 *ns_get_path_helper_t(void *);
|
|
+
|
|
+struct ns_get_path_task_args {
|
|
+ const struct proc_ns_operations___2 *ns_ops;
|
|
+ struct task_struct___2 *task;
|
|
+};
|
|
+
|
|
+struct dax_device;
|
|
+
|
|
+struct iomap___2 {
|
|
+ u64 addr;
|
|
+ loff_t offset;
|
|
+ u64 length;
|
|
+ u16 type;
|
|
+ u16 flags;
|
|
+ struct block_device *bdev;
|
|
+ struct dax_device *dax_dev;
|
|
+ void *inline_data;
|
|
+ void *private;
|
|
+ void (*page_done)(struct inode___2 *, loff_t, unsigned int, struct page *, struct iomap___2 *);
|
|
+};
|
|
+
|
|
+struct bh_lru {
|
|
+ struct buffer_head *bhs[16];
|
|
+};
|
|
+
|
|
+struct bh_accounting {
|
|
+ int nr;
|
|
+ int ratelimit;
|
|
+};
|
|
+
|
|
+typedef struct buffer_head *pto_T_____29;
|
|
+
|
|
+enum {
|
|
+ DISK_EVENT_MEDIA_CHANGE = 1,
|
|
+ DISK_EVENT_EJECT_REQUEST = 2,
|
|
+};
|
|
+
|
|
+struct badblocks {
|
|
+ struct device *dev;
|
|
+ int count;
|
|
+ int unacked_exist;
|
|
+ int shift;
|
|
+ u64 *page;
|
|
+ int changed;
|
|
+ seqlock_t lock;
|
|
+ sector_t sector;
|
|
+ sector_t size;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BIOSET_NEED_BVECS = 1,
|
|
+ BIOSET_NEED_RESCUER = 2,
|
|
+};
|
|
+
|
|
+struct bdev_inode {
|
|
+ struct block_device bdev;
|
|
+ struct inode___2 vfs_inode;
|
|
+};
|
|
+
|
|
+struct blkdev_dio {
|
|
+ union {
|
|
+ struct kiocb *iocb;
|
|
+ struct task_struct *waiter;
|
|
+ };
|
|
+ size_t size;
|
|
+ atomic_t ref;
|
|
+ bool multi_bio: 1;
|
|
+ bool should_dirty: 1;
|
|
+ bool is_sync: 1;
|
|
+ struct bio bio;
|
|
+};
|
|
+
|
|
+struct bd_holder_disk {
|
|
+ struct list_head list;
|
|
+ struct gendisk *disk;
|
|
+ int refcnt;
|
|
+};
|
|
+
|
|
+typedef int dio_iodone_t(struct kiocb *, loff_t, ssize_t, void *);
|
|
+
|
|
+typedef void dio_submit_t(struct bio *, struct inode___2 *, loff_t);
|
|
+
|
|
+enum {
|
|
+ DIO_LOCKING = 1,
|
|
+ DIO_SKIP_HOLES = 2,
|
|
+};
|
|
+
|
|
+struct dio_submit {
|
|
+ struct bio *bio;
|
|
+ unsigned int blkbits;
|
|
+ unsigned int blkfactor;
|
|
+ unsigned int start_zero_done;
|
|
+ int pages_in_io;
|
|
+ sector_t block_in_file;
|
|
+ unsigned int blocks_available;
|
|
+ int reap_counter;
|
|
+ sector_t final_block_in_request;
|
|
+ int boundary;
|
|
+ get_block_t *get_block;
|
|
+ dio_submit_t *submit_io;
|
|
+ loff_t logical_offset_in_bio;
|
|
+ sector_t final_block_in_bio;
|
|
+ sector_t next_block_for_io;
|
|
+ struct page___2 *cur_page;
|
|
+ unsigned int cur_page_offset;
|
|
+ unsigned int cur_page_len;
|
|
+ sector_t cur_page_block;
|
|
+ loff_t cur_page_fs_offset;
|
|
+ struct iov_iter *iter;
|
|
+ unsigned int head;
|
|
+ unsigned int tail;
|
|
+ size_t from;
|
|
+ size_t to;
|
|
+};
|
|
+
|
|
+struct dio {
|
|
+ int flags;
|
|
+ int op;
|
|
+ int op_flags;
|
|
+ blk_qc_t bio_cookie;
|
|
+ struct gendisk *bio_disk;
|
|
+ struct inode___2 *inode;
|
|
+ loff_t i_size;
|
|
+ dio_iodone_t *end_io;
|
|
+ void *private;
|
|
+ spinlock_t bio_lock;
|
|
+ int page_errors;
|
|
+ int is_async;
|
|
+ bool defer_completion;
|
|
+ bool should_dirty;
|
|
+ int io_error;
|
|
+ long unsigned int refcount;
|
|
+ struct bio *bio_list;
|
|
+ struct task_struct___2 *waiter;
|
|
+ struct kiocb *iocb;
|
|
+ ssize_t result;
|
|
+ union {
|
|
+ struct page___2 *pages[64];
|
|
+ struct work_struct complete_work;
|
|
+ };
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct mpage_readpage_args {
|
|
+ struct bio *bio;
|
|
+ struct page *page;
|
|
+ unsigned int nr_pages;
|
|
+ bool is_readahead;
|
|
+ sector_t last_block_in_bio;
|
|
+ struct buffer_head map_bh;
|
|
+ long unsigned int first_logical_block;
|
|
+ get_block_t *get_block;
|
|
+};
|
|
+
|
|
+struct mpage_data {
|
|
+ struct bio *bio;
|
|
+ sector_t last_block_in_bio;
|
|
+ get_block_t *get_block;
|
|
+ unsigned int use_writepage;
|
|
+};
|
|
+
|
|
+typedef u32 nlink_t;
|
|
+
|
|
+typedef int (*proc_write_t)(struct file___2 *, char *, size_t);
|
|
+
|
|
+struct proc_dir_entry {
|
|
+ atomic_t in_use;
|
|
+ refcount_t refcnt;
|
|
+ struct list_head pde_openers;
|
|
+ spinlock_t pde_unload_lock;
|
|
+ struct completion *pde_unload_completion;
|
|
+ const struct inode_operations___2 *proc_iops;
|
|
+ const struct file_operations___2 *proc_fops;
|
|
+ const struct dentry_operations *proc_dops;
|
|
+ union {
|
|
+ const struct seq_operations *seq_ops;
|
|
+ int (*single_show)(struct seq_file___2 *, void *);
|
|
+ };
|
|
+ proc_write_t write;
|
|
+ void *data;
|
|
+ unsigned int state_size;
|
|
+ unsigned int low_ino;
|
|
+ nlink_t nlink;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ loff_t size;
|
|
+ struct proc_dir_entry *parent;
|
|
+ struct rb_root subdir;
|
|
+ struct rb_node subdir_node;
|
|
+ char *name;
|
|
+ umode_t mode;
|
|
+ u8 namelen;
|
|
+ char inline_name[0];
|
|
+};
|
|
+
|
|
+union proc_op {
|
|
+ int (*proc_get_link)(struct dentry___2 *, struct path___2 *);
|
|
+ int (*proc_show)(struct seq_file___2 *, struct pid_namespace *, struct pid *, struct task_struct___2 *);
|
|
+};
|
|
+
|
|
+struct proc_inode {
|
|
+ struct pid *pid;
|
|
+ unsigned int fd;
|
|
+ union proc_op op;
|
|
+ struct proc_dir_entry *pde;
|
|
+ struct ctl_table_header *sysctl;
|
|
+ struct ctl_table *sysctl_entry;
|
|
+ struct hlist_node sysctl_inodes;
|
|
+ const struct proc_ns_operations *ns_ops;
|
|
+ struct inode___2 vfs_inode;
|
|
+};
|
|
+
|
|
+struct proc_fs_info {
|
|
+ int flag;
|
|
+ const char *str;
|
|
+};
|
|
+
|
|
+struct file_handle {
|
|
+ __u32 handle_bytes;
|
|
+ int handle_type;
|
|
+ unsigned char f_handle[0];
|
|
+};
|
|
+
|
|
+struct inotify_inode_mark {
|
|
+ struct fsnotify_mark fsn_mark;
|
|
+ int wd;
|
|
+};
|
|
+
|
|
+struct dnotify_struct {
|
|
+ struct dnotify_struct *dn_next;
|
|
+ __u32 dn_mask;
|
|
+ int dn_fd;
|
|
+ struct file___2 *dn_filp;
|
|
+ fl_owner_t dn_owner;
|
|
+};
|
|
+
|
|
+struct dnotify_mark {
|
|
+ struct fsnotify_mark fsn_mark;
|
|
+ struct dnotify_struct *dn;
|
|
+};
|
|
+
|
|
+struct inotify_event_info {
|
|
+ struct fsnotify_event fse;
|
|
+ int wd;
|
|
+ u32 sync_cookie;
|
|
+ int name_len;
|
|
+ char name[0];
|
|
+};
|
|
+
|
|
+struct inotify_event {
|
|
+ __s32 wd;
|
|
+ __u32 mask;
|
|
+ __u32 cookie;
|
|
+ __u32 len;
|
|
+ char name[0];
|
|
+};
|
|
+
|
|
+typedef int (*dev_page_fault_t___3)(struct vm_area_struct___2 *, long unsigned int, const struct page *, unsigned int, pmd_t *);
|
|
+
|
|
+struct fanotify_event_info {
|
|
+ struct fsnotify_event fse;
|
|
+ struct path___2 path;
|
|
+ struct pid___2 *tgid;
|
|
+};
|
|
+
|
|
+struct fanotify_perm_event_info {
|
|
+ struct fanotify_event_info fae;
|
|
+ int response;
|
|
+ int fd;
|
|
+};
|
|
+
|
|
+struct fanotify_event_metadata {
|
|
+ __u32 event_len;
|
|
+ __u8 vers;
|
|
+ __u8 reserved;
|
|
+ __u16 metadata_len;
|
|
+ __u64 mask;
|
|
+ __s32 fd;
|
|
+ __s32 pid;
|
|
+};
|
|
+
|
|
+struct fanotify_response {
|
|
+ __s32 fd;
|
|
+ __u32 response;
|
|
+};
|
|
+
|
|
+struct epoll_event {
|
|
+ __poll_t events;
|
|
+ __u64 data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct epoll_filefd {
|
|
+ struct file *file;
|
|
+ int fd;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct nested_call_node {
|
|
+ struct list_head llink;
|
|
+ void *cookie;
|
|
+ void *ctx;
|
|
+};
|
|
+
|
|
+struct nested_calls {
|
|
+ struct list_head tasks_call_list;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct eventpoll;
|
|
+
|
|
+struct epitem {
|
|
+ union {
|
|
+ struct rb_node rbn;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ struct list_head rdllink;
|
|
+ struct epitem *next;
|
|
+ struct epoll_filefd ffd;
|
|
+ int nwait;
|
|
+ struct list_head pwqlist;
|
|
+ struct eventpoll *ep;
|
|
+ struct list_head fllink;
|
|
+ struct wakeup_source *ws;
|
|
+ struct epoll_event event;
|
|
+};
|
|
+
|
|
+struct eventpoll {
|
|
+ struct mutex mtx;
|
|
+ wait_queue_head_t wq;
|
|
+ wait_queue_head_t poll_wait;
|
|
+ struct list_head rdllist;
|
|
+ struct rb_root_cached rbr;
|
|
+ struct epitem *ovflist;
|
|
+ struct wakeup_source *ws;
|
|
+ struct user_struct *user;
|
|
+ struct file *file;
|
|
+ u64 gen;
|
|
+ unsigned int napi_id;
|
|
+};
|
|
+
|
|
+struct eppoll_entry {
|
|
+ struct list_head llink;
|
|
+ struct epitem *base;
|
|
+ wait_queue_entry_t wait;
|
|
+ wait_queue_head_t *whead;
|
|
+};
|
|
+
|
|
+struct ep_pqueue {
|
|
+ poll_table pt;
|
|
+ struct epitem *epi;
|
|
+};
|
|
+
|
|
+struct ep_send_events_data {
|
|
+ int maxevents;
|
|
+ struct epoll_event *events;
|
|
+ int res;
|
|
+};
|
|
+
|
|
+struct signalfd_siginfo {
|
|
+ __u32 ssi_signo;
|
|
+ __s32 ssi_errno;
|
|
+ __s32 ssi_code;
|
|
+ __u32 ssi_pid;
|
|
+ __u32 ssi_uid;
|
|
+ __s32 ssi_fd;
|
|
+ __u32 ssi_tid;
|
|
+ __u32 ssi_band;
|
|
+ __u32 ssi_overrun;
|
|
+ __u32 ssi_trapno;
|
|
+ __s32 ssi_status;
|
|
+ __s32 ssi_int;
|
|
+ __u64 ssi_ptr;
|
|
+ __u64 ssi_utime;
|
|
+ __u64 ssi_stime;
|
|
+ __u64 ssi_addr;
|
|
+ __u16 ssi_addr_lsb;
|
|
+ __u16 __pad2;
|
|
+ __s32 ssi_syscall;
|
|
+ __u64 ssi_call_addr;
|
|
+ __u32 ssi_arch;
|
|
+ __u8 __pad[28];
|
|
+};
|
|
+
|
|
+struct signalfd_ctx {
|
|
+ sigset_t sigmask;
|
|
+};
|
|
+
|
|
+struct timerfd_ctx {
|
|
+ union {
|
|
+ struct hrtimer tmr;
|
|
+ struct alarm alarm;
|
|
+ } t;
|
|
+ ktime_t tintv;
|
|
+ ktime_t moffs;
|
|
+ wait_queue_head_t wqh;
|
|
+ u64 ticks;
|
|
+ int clockid;
|
|
+ short unsigned int expired;
|
|
+ short unsigned int settime_flags;
|
|
+ struct callback_head rcu;
|
|
+ struct list_head clist;
|
|
+ spinlock_t cancel_lock;
|
|
+ bool might_cancel;
|
|
+};
|
|
+
|
|
+struct eventfd_ctx___2 {
|
|
+ struct kref kref;
|
|
+ wait_queue_head_t wqh;
|
|
+ __u64 count;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct userfaultfd_ctx {
|
|
+ wait_queue_head_t fault_pending_wqh;
|
|
+ wait_queue_head_t fault_wqh;
|
|
+ wait_queue_head_t fd_wqh;
|
|
+ wait_queue_head_t event_wqh;
|
|
+ struct seqcount refile_seq;
|
|
+ atomic_t refcount;
|
|
+ unsigned int flags;
|
|
+ unsigned int features;
|
|
+ bool released;
|
|
+ bool mmap_changing;
|
|
+ struct mm_struct___2 *mm;
|
|
+};
|
|
+
|
|
+struct uffd_msg {
|
|
+ __u8 event;
|
|
+ __u8 reserved1;
|
|
+ __u16 reserved2;
|
|
+ __u32 reserved3;
|
|
+ union {
|
|
+ struct {
|
|
+ __u64 flags;
|
|
+ __u64 address;
|
|
+ union {
|
|
+ __u32 ptid;
|
|
+ } feat;
|
|
+ } pagefault;
|
|
+ struct {
|
|
+ __u32 ufd;
|
|
+ } fork;
|
|
+ struct {
|
|
+ __u64 from;
|
|
+ __u64 to;
|
|
+ __u64 len;
|
|
+ } remap;
|
|
+ struct {
|
|
+ __u64 start;
|
|
+ __u64 end;
|
|
+ } remove;
|
|
+ struct {
|
|
+ __u64 reserved1;
|
|
+ __u64 reserved2;
|
|
+ __u64 reserved3;
|
|
+ } reserved;
|
|
+ } arg;
|
|
+};
|
|
+
|
|
+struct uffdio_api {
|
|
+ __u64 api;
|
|
+ __u64 features;
|
|
+ __u64 ioctls;
|
|
+};
|
|
+
|
|
+struct uffdio_range {
|
|
+ __u64 start;
|
|
+ __u64 len;
|
|
+};
|
|
+
|
|
+struct uffdio_register {
|
|
+ struct uffdio_range range;
|
|
+ __u64 mode;
|
|
+ __u64 ioctls;
|
|
+};
|
|
+
|
|
+struct uffdio_copy {
|
|
+ __u64 dst;
|
|
+ __u64 src;
|
|
+ __u64 len;
|
|
+ __u64 mode;
|
|
+ __s64 copy;
|
|
+};
|
|
+
|
|
+struct uffdio_zeropage {
|
|
+ struct uffdio_range range;
|
|
+ __u64 mode;
|
|
+ __s64 zeropage;
|
|
+};
|
|
+
|
|
+struct userfaultfd_fork_ctx {
|
|
+ struct userfaultfd_ctx *orig;
|
|
+ struct userfaultfd_ctx *new;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct userfaultfd_unmap_ctx {
|
|
+ struct userfaultfd_ctx *ctx;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct userfaultfd_wait_queue {
|
|
+ struct uffd_msg msg;
|
|
+ wait_queue_entry_t wq;
|
|
+ struct userfaultfd_ctx *ctx;
|
|
+ bool waken;
|
|
+};
|
|
+
|
|
+struct userfaultfd_wake_range {
|
|
+ long unsigned int start;
|
|
+ long unsigned int len;
|
|
+};
|
|
+
|
|
+struct kioctx;
|
|
+
|
|
+struct kioctx_table {
|
|
+ struct callback_head rcu;
|
|
+ unsigned int nr;
|
|
+ struct kioctx *table[0];
|
|
+};
|
|
+
|
|
+typedef __kernel_ulong_t aio_context_t;
|
|
+
|
|
+enum {
|
|
+ IOCB_CMD_PREAD = 0,
|
|
+ IOCB_CMD_PWRITE = 1,
|
|
+ IOCB_CMD_FSYNC = 2,
|
|
+ IOCB_CMD_FDSYNC = 3,
|
|
+ IOCB_CMD_POLL = 5,
|
|
+ IOCB_CMD_NOOP = 6,
|
|
+ IOCB_CMD_PREADV = 7,
|
|
+ IOCB_CMD_PWRITEV = 8,
|
|
+};
|
|
+
|
|
+struct io_event {
|
|
+ __u64 data;
|
|
+ __u64 obj;
|
|
+ __s64 res;
|
|
+ __s64 res2;
|
|
+};
|
|
+
|
|
+struct iocb {
|
|
+ __u64 aio_data;
|
|
+ __u32 aio_key;
|
|
+ __kernel_rwf_t aio_rw_flags;
|
|
+ __u16 aio_lio_opcode;
|
|
+ __s16 aio_reqprio;
|
|
+ __u32 aio_fildes;
|
|
+ __u64 aio_buf;
|
|
+ __u64 aio_nbytes;
|
|
+ __s64 aio_offset;
|
|
+ __u64 aio_reserved2;
|
|
+ __u32 aio_flags;
|
|
+ __u32 aio_resfd;
|
|
+};
|
|
+
|
|
+typedef compat_ulong_t compat_aio_context_t;
|
|
+
|
|
+typedef int kiocb_cancel_fn(struct kiocb *);
|
|
+
|
|
+struct aio_ring {
|
|
+ unsigned int id;
|
|
+ unsigned int nr;
|
|
+ unsigned int head;
|
|
+ unsigned int tail;
|
|
+ unsigned int magic;
|
|
+ unsigned int compat_features;
|
|
+ unsigned int incompat_features;
|
|
+ unsigned int header_length;
|
|
+ struct io_event io_events[0];
|
|
+};
|
|
+
|
|
+struct kioctx_cpu;
|
|
+
|
|
+struct ctx_rq_wait;
|
|
+
|
|
+struct kioctx {
|
|
+ struct percpu_ref users;
|
|
+ atomic_t dead;
|
|
+ struct percpu_ref reqs;
|
|
+ long unsigned int user_id;
|
|
+ struct kioctx_cpu *cpu;
|
|
+ unsigned int req_batch;
|
|
+ unsigned int max_reqs;
|
|
+ unsigned int nr_events;
|
|
+ long unsigned int mmap_base;
|
|
+ long unsigned int mmap_size;
|
|
+ struct page **ring_pages;
|
|
+ long int nr_pages;
|
|
+ struct rcu_work free_rwork;
|
|
+ struct ctx_rq_wait *rq_wait;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct {
|
|
+ atomic_t reqs_available;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ };
|
|
+ struct {
|
|
+ spinlock_t ctx_lock;
|
|
+ struct list_head active_reqs;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ };
|
|
+ struct {
|
|
+ struct mutex ring_lock;
|
|
+ wait_queue_head_t wait;
|
|
+ long: 64;
|
|
+ };
|
|
+ struct {
|
|
+ unsigned int tail;
|
|
+ unsigned int completed_events;
|
|
+ spinlock_t completion_lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ };
|
|
+ struct page *internal_pages[8];
|
|
+ struct file___2 *aio_ring_file;
|
|
+ unsigned int id;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct kioctx_cpu {
|
|
+ unsigned int reqs_available;
|
|
+};
|
|
+
|
|
+struct ctx_rq_wait {
|
|
+ struct completion comp;
|
|
+ atomic_t count;
|
|
+};
|
|
+
|
|
+struct fsync_iocb {
|
|
+ struct file___2 *file;
|
|
+ struct work_struct work;
|
|
+ bool datasync;
|
|
+ struct cred *creds;
|
|
+};
|
|
+
|
|
+struct poll_iocb {
|
|
+ struct file___2 *file;
|
|
+ struct wait_queue_head *head;
|
|
+ __poll_t events;
|
|
+ bool done;
|
|
+ bool cancelled;
|
|
+ struct wait_queue_entry wait;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+struct aio_kiocb {
|
|
+ union {
|
|
+ struct file___2 *ki_filp;
|
|
+ struct kiocb rw;
|
|
+ struct fsync_iocb fsync;
|
|
+ struct poll_iocb poll;
|
|
+ };
|
|
+ struct kioctx *ki_ctx;
|
|
+ kiocb_cancel_fn *ki_cancel;
|
|
+ struct io_event ki_res;
|
|
+ struct list_head ki_list;
|
|
+ refcount_t ki_refcnt;
|
|
+ struct eventfd_ctx *ki_eventfd;
|
|
+};
|
|
+
|
|
+struct aio_poll_table {
|
|
+ struct poll_table_struct pt;
|
|
+ struct aio_kiocb *iocb;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+struct __aio_sigset {
|
|
+ const sigset_t *sigmask;
|
|
+ size_t sigsetsize;
|
|
+};
|
|
+
|
|
+struct __compat_aio_sigset {
|
|
+ compat_sigset_t *sigmask;
|
|
+ compat_size_t sigsetsize;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PERCPU_REF_INIT_ATOMIC = 1,
|
|
+ PERCPU_REF_INIT_DEAD = 2,
|
|
+ PERCPU_REF_ALLOW_REINIT = 4,
|
|
+};
|
|
+
|
|
+struct user_msghdr {
|
|
+ void *msg_name;
|
|
+ int msg_namelen;
|
|
+ struct iovec *msg_iov;
|
|
+ __kernel_size_t msg_iovlen;
|
|
+ void *msg_control;
|
|
+ __kernel_size_t msg_controllen;
|
|
+ unsigned int msg_flags;
|
|
+};
|
|
+
|
|
+struct compat_msghdr {
|
|
+ compat_uptr_t msg_name;
|
|
+ compat_int_t msg_namelen;
|
|
+ compat_uptr_t msg_iov;
|
|
+ compat_size_t msg_iovlen;
|
|
+ compat_uptr_t msg_control;
|
|
+ compat_size_t msg_controllen;
|
|
+ compat_uint_t msg_flags;
|
|
+};
|
|
+
|
|
+struct scm_fp_list {
|
|
+ short int count;
|
|
+ short int max;
|
|
+ struct user_struct *user;
|
|
+ struct file *fp[253];
|
|
+};
|
|
+
|
|
+struct unix_skb_parms {
|
|
+ struct pid *pid;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ struct scm_fp_list *fp;
|
|
+ u32 secid;
|
|
+ u32 consumed;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_create {
|
|
+ struct trace_entry ent;
|
|
+ int fd;
|
|
+ void *ctx;
|
|
+ u32 sq_entries;
|
|
+ u32 cq_entries;
|
|
+ u32 flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_register {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ unsigned int opcode;
|
|
+ unsigned int nr_files;
|
|
+ unsigned int nr_bufs;
|
|
+ bool eventfd;
|
|
+ long int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_file_get {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ int fd;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct io_wq_work;
|
|
+
|
|
+struct trace_event_raw_io_uring_queue_async_work {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ int rw;
|
|
+ void *req;
|
|
+ struct io_wq_work *work;
|
|
+ unsigned int flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct io_wq_work_node {
|
|
+ struct io_wq_work_node *next;
|
|
+};
|
|
+
|
|
+struct io_wq_work {
|
|
+ struct io_wq_work_node list;
|
|
+ struct files_struct *files;
|
|
+ struct mm_struct *mm;
|
|
+ const struct cred *creds;
|
|
+ struct fs_struct *fs;
|
|
+ long unsigned int fsize;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_defer {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ void *req;
|
|
+ long long unsigned int data;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_link {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ void *req;
|
|
+ void *target_req;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_add_to_prev {
|
|
+ struct trace_entry ent;
|
|
+ void *req;
|
|
+ bool ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_cqring_wait {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ int min_events;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_fail_link {
|
|
+ struct trace_entry ent;
|
|
+ void *req;
|
|
+ void *link;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_complete {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ u64 user_data;
|
|
+ long int res;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_submit_sqe {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ u8 opcode;
|
|
+ u64 user_data;
|
|
+ bool force_nonblock;
|
|
+ bool sq_thread;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_poll_arm {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ u8 opcode;
|
|
+ u64 user_data;
|
|
+ int mask;
|
|
+ int events;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_poll_wake {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ u8 opcode;
|
|
+ u64 user_data;
|
|
+ int mask;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_task_add {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ u8 opcode;
|
|
+ u64 user_data;
|
|
+ int mask;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_io_uring_task_run {
|
|
+ struct trace_entry ent;
|
|
+ void *ctx;
|
|
+ u8 opcode;
|
|
+ u64 user_data;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_create {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_register {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_file_get {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_queue_async_work {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_defer {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_link {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_add_to_prev {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_cqring_wait {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_fail_link {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_complete {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_submit_sqe {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_poll_arm {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_poll_wake {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_task_add {};
|
|
+
|
|
+struct trace_event_data_offsets_io_uring_task_run {};
|
|
+
|
|
+struct io_uring_sqe {
|
|
+ __u8 opcode;
|
|
+ __u8 flags;
|
|
+ __u16 ioprio;
|
|
+ __s32 fd;
|
|
+ union {
|
|
+ __u64 off;
|
|
+ __u64 addr2;
|
|
+ };
|
|
+ union {
|
|
+ __u64 addr;
|
|
+ __u64 splice_off_in;
|
|
+ };
|
|
+ __u32 len;
|
|
+ union {
|
|
+ __kernel_rwf_t rw_flags;
|
|
+ __u32 fsync_flags;
|
|
+ __u16 poll_events;
|
|
+ __u32 poll32_events;
|
|
+ __u32 sync_range_flags;
|
|
+ __u32 msg_flags;
|
|
+ __u32 timeout_flags;
|
|
+ __u32 accept_flags;
|
|
+ __u32 cancel_flags;
|
|
+ __u32 open_flags;
|
|
+ __u32 statx_flags;
|
|
+ __u32 fadvise_advice;
|
|
+ __u32 splice_flags;
|
|
+ };
|
|
+ __u64 user_data;
|
|
+ union {
|
|
+ struct {
|
|
+ union {
|
|
+ __u16 buf_index;
|
|
+ __u16 buf_group;
|
|
+ };
|
|
+ __u16 personality;
|
|
+ __s32 splice_fd_in;
|
|
+ };
|
|
+ __u64 __pad2[3];
|
|
+ };
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IOSQE_FIXED_FILE_BIT = 0,
|
|
+ IOSQE_IO_DRAIN_BIT = 1,
|
|
+ IOSQE_IO_LINK_BIT = 2,
|
|
+ IOSQE_IO_HARDLINK_BIT = 3,
|
|
+ IOSQE_ASYNC_BIT = 4,
|
|
+ IOSQE_BUFFER_SELECT_BIT = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IORING_OP_NOP = 0,
|
|
+ IORING_OP_READV = 1,
|
|
+ IORING_OP_WRITEV = 2,
|
|
+ IORING_OP_FSYNC = 3,
|
|
+ IORING_OP_READ_FIXED = 4,
|
|
+ IORING_OP_WRITE_FIXED = 5,
|
|
+ IORING_OP_POLL_ADD = 6,
|
|
+ IORING_OP_POLL_REMOVE = 7,
|
|
+ IORING_OP_SYNC_FILE_RANGE = 8,
|
|
+ IORING_OP_SENDMSG = 9,
|
|
+ IORING_OP_RECVMSG = 10,
|
|
+ IORING_OP_TIMEOUT = 11,
|
|
+ IORING_OP_TIMEOUT_REMOVE = 12,
|
|
+ IORING_OP_ACCEPT = 13,
|
|
+ IORING_OP_ASYNC_CANCEL = 14,
|
|
+ IORING_OP_LINK_TIMEOUT = 15,
|
|
+ IORING_OP_CONNECT = 16,
|
|
+ IORING_OP_FALLOCATE = 17,
|
|
+ IORING_OP_OPENAT = 18,
|
|
+ IORING_OP_CLOSE = 19,
|
|
+ IORING_OP_FILES_UPDATE = 20,
|
|
+ IORING_OP_STATX = 21,
|
|
+ IORING_OP_READ = 22,
|
|
+ IORING_OP_WRITE = 23,
|
|
+ IORING_OP_FADVISE = 24,
|
|
+ IORING_OP_MADVISE = 25,
|
|
+ IORING_OP_SEND = 26,
|
|
+ IORING_OP_RECV = 27,
|
|
+ IORING_OP_OPENAT2 = 28,
|
|
+ IORING_OP_EPOLL_CTL = 29,
|
|
+ IORING_OP_SPLICE = 30,
|
|
+ IORING_OP_PROVIDE_BUFFERS = 31,
|
|
+ IORING_OP_REMOVE_BUFFERS = 32,
|
|
+ IORING_OP_TEE = 33,
|
|
+ IORING_OP_LAST = 34,
|
|
+};
|
|
+
|
|
+struct io_uring_cqe {
|
|
+ __u64 user_data;
|
|
+ __s32 res;
|
|
+ __u32 flags;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IORING_CQE_BUFFER_SHIFT = 16,
|
|
+};
|
|
+
|
|
+struct io_sqring_offsets {
|
|
+ __u32 head;
|
|
+ __u32 tail;
|
|
+ __u32 ring_mask;
|
|
+ __u32 ring_entries;
|
|
+ __u32 flags;
|
|
+ __u32 dropped;
|
|
+ __u32 array;
|
|
+ __u32 resv1;
|
|
+ __u64 resv2;
|
|
+};
|
|
+
|
|
+struct io_cqring_offsets {
|
|
+ __u32 head;
|
|
+ __u32 tail;
|
|
+ __u32 ring_mask;
|
|
+ __u32 ring_entries;
|
|
+ __u32 overflow;
|
|
+ __u32 cqes;
|
|
+ __u32 flags;
|
|
+ __u32 resv1;
|
|
+ __u64 resv2;
|
|
+};
|
|
+
|
|
+struct io_uring_params {
|
|
+ __u32 sq_entries;
|
|
+ __u32 cq_entries;
|
|
+ __u32 flags;
|
|
+ __u32 sq_thread_cpu;
|
|
+ __u32 sq_thread_idle;
|
|
+ __u32 features;
|
|
+ __u32 wq_fd;
|
|
+ __u32 resv[3];
|
|
+ struct io_sqring_offsets sq_off;
|
|
+ struct io_cqring_offsets cq_off;
|
|
+};
|
|
+
|
|
+struct io_uring_files_update {
|
|
+ __u32 offset;
|
|
+ __u32 resv;
|
|
+ __u64 fds;
|
|
+};
|
|
+
|
|
+struct io_uring_probe_op {
|
|
+ __u8 op;
|
|
+ __u8 resv;
|
|
+ __u16 flags;
|
|
+ __u32 resv2;
|
|
+};
|
|
+
|
|
+struct io_uring_probe {
|
|
+ __u8 last_op;
|
|
+ __u8 ops_len;
|
|
+ __u16 resv;
|
|
+ __u32 resv2[3];
|
|
+ struct io_uring_probe_op ops[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IO_WQ_WORK_CANCEL = 1,
|
|
+ IO_WQ_WORK_HASHED = 2,
|
|
+ IO_WQ_WORK_UNBOUND = 4,
|
|
+ IO_WQ_WORK_NO_CANCEL = 8,
|
|
+ IO_WQ_WORK_CONCURRENT = 16,
|
|
+ IO_WQ_HASH_SHIFT = 24,
|
|
+};
|
|
+
|
|
+enum io_wq_cancel {
|
|
+ IO_WQ_CANCEL_OK = 0,
|
|
+ IO_WQ_CANCEL_RUNNING = 1,
|
|
+ IO_WQ_CANCEL_NOTFOUND = 2,
|
|
+};
|
|
+
|
|
+typedef void free_work_fn(struct io_wq_work *);
|
|
+
|
|
+typedef struct io_wq_work *io_wq_work_fn(struct io_wq_work *);
|
|
+
|
|
+struct io_wq_data {
|
|
+ struct user_struct *user;
|
|
+ io_wq_work_fn *do_work;
|
|
+ free_work_fn *free_work;
|
|
+};
|
|
+
|
|
+struct io_uring {
|
|
+ u32 head;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ u32 tail;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct io_rings {
|
|
+ struct io_uring sq;
|
|
+ struct io_uring cq;
|
|
+ u32 sq_ring_mask;
|
|
+ u32 cq_ring_mask;
|
|
+ u32 sq_ring_entries;
|
|
+ u32 cq_ring_entries;
|
|
+ u32 sq_dropped;
|
|
+ u32 sq_flags;
|
|
+ u32 cq_flags;
|
|
+ u32 cq_overflow;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct io_uring_cqe cqes[0];
|
|
+};
|
|
+
|
|
+struct io_mapped_ubuf {
|
|
+ u64 ubuf;
|
|
+ size_t len;
|
|
+ struct bio_vec *bvec;
|
|
+ unsigned int nr_bvecs;
|
|
+};
|
|
+
|
|
+struct fixed_file_table {
|
|
+ struct file **files;
|
|
+};
|
|
+
|
|
+struct fixed_file_data;
|
|
+
|
|
+struct fixed_file_ref_node {
|
|
+ struct percpu_ref refs;
|
|
+ struct list_head node;
|
|
+ struct list_head file_list;
|
|
+ struct fixed_file_data *file_data;
|
|
+ struct llist_node llist;
|
|
+ bool done;
|
|
+};
|
|
+
|
|
+struct io_ring_ctx;
|
|
+
|
|
+struct fixed_file_data {
|
|
+ struct fixed_file_table *table;
|
|
+ struct io_ring_ctx *ctx;
|
|
+ struct fixed_file_ref_node *node;
|
|
+ struct percpu_ref refs;
|
|
+ struct completion done;
|
|
+ struct list_head ref_list;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct io_wq;
|
|
+
|
|
+struct io_kiocb;
|
|
+
|
|
+struct io_ring_ctx {
|
|
+ struct {
|
|
+ struct percpu_ref refs;
|
|
+ long: 64;
|
|
+ };
|
|
+ struct {
|
|
+ unsigned int flags;
|
|
+ unsigned int compat: 1;
|
|
+ unsigned int limit_mem: 1;
|
|
+ unsigned int cq_overflow_flushed: 1;
|
|
+ unsigned int drain_next: 1;
|
|
+ unsigned int eventfd_async: 1;
|
|
+ u32 *sq_array;
|
|
+ unsigned int cached_sq_head;
|
|
+ unsigned int sq_entries;
|
|
+ unsigned int sq_mask;
|
|
+ unsigned int sq_thread_idle;
|
|
+ unsigned int cached_sq_dropped;
|
|
+ atomic_t cached_cq_overflow;
|
|
+ long unsigned int sq_check_overflow;
|
|
+ struct list_head defer_list;
|
|
+ struct list_head timeout_list;
|
|
+ struct list_head cq_overflow_list;
|
|
+ wait_queue_head_t inflight_wait;
|
|
+ struct io_uring_sqe *sq_sqes;
|
|
+ };
|
|
+ struct io_rings *rings;
|
|
+ struct io_wq *io_wq;
|
|
+ struct task_struct *sqo_thread;
|
|
+ struct task_struct *sqo_task;
|
|
+ struct mm_struct *mm_account;
|
|
+ wait_queue_head_t sqo_wait;
|
|
+ struct fixed_file_data *file_data;
|
|
+ unsigned int nr_user_files;
|
|
+ int ring_fd;
|
|
+ struct file *ring_file;
|
|
+ unsigned int nr_user_bufs;
|
|
+ struct io_mapped_ubuf *user_bufs;
|
|
+ struct user_struct *user;
|
|
+ const struct cred *creds;
|
|
+ struct completion ref_comp;
|
|
+ struct completion sq_thread_comp;
|
|
+ struct io_kiocb *fallback_req;
|
|
+ struct socket *ring_sock;
|
|
+ struct idr io_buffer_idr;
|
|
+ struct idr personality_idr;
|
|
+ long: 64;
|
|
+ struct {
|
|
+ unsigned int cached_cq_tail;
|
|
+ unsigned int cq_entries;
|
|
+ unsigned int cq_mask;
|
|
+ atomic_t cq_timeouts;
|
|
+ unsigned int cq_last_tm_flush;
|
|
+ long unsigned int cq_check_overflow;
|
|
+ struct wait_queue_head cq_wait;
|
|
+ struct fasync_struct *cq_fasync;
|
|
+ struct eventfd_ctx *cq_ev_fd;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ };
|
|
+ struct {
|
|
+ struct mutex uring_lock;
|
|
+ wait_queue_head_t wait;
|
|
+ long: 64;
|
|
+ };
|
|
+ struct {
|
|
+ spinlock_t completion_lock;
|
|
+ struct list_head iopoll_list;
|
|
+ struct hlist_head *cancel_hash;
|
|
+ unsigned int cancel_hash_bits;
|
|
+ bool poll_multi_file;
|
|
+ spinlock_t inflight_lock;
|
|
+ struct list_head inflight_list;
|
|
+ };
|
|
+ struct delayed_work file_put_work;
|
|
+ struct llist_head file_put_llist;
|
|
+ struct work_struct exit_work;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct io_buffer {
|
|
+ struct list_head list;
|
|
+ __u64 addr;
|
|
+ __u32 len;
|
|
+ __u16 bid;
|
|
+};
|
|
+
|
|
+struct io_rw {
|
|
+ struct kiocb kiocb;
|
|
+ u64 addr;
|
|
+ u64 len;
|
|
+};
|
|
+
|
|
+struct io_poll_iocb {
|
|
+ struct file *file;
|
|
+ union {
|
|
+ struct wait_queue_head *head;
|
|
+ u64 addr;
|
|
+ };
|
|
+ __poll_t events;
|
|
+ bool done;
|
|
+ bool canceled;
|
|
+ struct wait_queue_entry wait;
|
|
+};
|
|
+
|
|
+struct io_accept {
|
|
+ struct file *file;
|
|
+ struct sockaddr *addr;
|
|
+ int *addr_len;
|
|
+ int flags;
|
|
+ long unsigned int nofile;
|
|
+};
|
|
+
|
|
+struct io_sync {
|
|
+ struct file *file;
|
|
+ loff_t len;
|
|
+ loff_t off;
|
|
+ int flags;
|
|
+ int mode;
|
|
+};
|
|
+
|
|
+struct io_cancel {
|
|
+ struct file *file;
|
|
+ u64 addr;
|
|
+};
|
|
+
|
|
+struct io_timeout {
|
|
+ struct file *file;
|
|
+ u64 addr;
|
|
+ int flags;
|
|
+ u32 off;
|
|
+ u32 target_seq;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct io_connect {
|
|
+ struct file *file;
|
|
+ struct sockaddr *addr;
|
|
+ int addr_len;
|
|
+};
|
|
+
|
|
+struct io_sr_msg {
|
|
+ struct file *file;
|
|
+ union {
|
|
+ struct user_msghdr *umsg;
|
|
+ void *buf;
|
|
+ };
|
|
+ int msg_flags;
|
|
+ int bgid;
|
|
+ size_t len;
|
|
+ struct io_buffer *kbuf;
|
|
+};
|
|
+
|
|
+struct io_open {
|
|
+ struct file *file;
|
|
+ int dfd;
|
|
+ union {
|
|
+ umode_t mode;
|
|
+ };
|
|
+ struct filename *filename;
|
|
+ int flags;
|
|
+ long unsigned int nofile;
|
|
+};
|
|
+
|
|
+struct io_close {
|
|
+ struct file *file;
|
|
+ struct file *put_file;
|
|
+ int fd;
|
|
+};
|
|
+
|
|
+struct io_files_update {
|
|
+ struct file *file;
|
|
+ u64 arg;
|
|
+ u32 nr_args;
|
|
+ u32 offset;
|
|
+};
|
|
+
|
|
+struct io_fadvise {
|
|
+ struct file *file;
|
|
+ u64 offset;
|
|
+ u32 len;
|
|
+ u32 advice;
|
|
+};
|
|
+
|
|
+struct io_madvise {
|
|
+ struct file *file;
|
|
+ u64 addr;
|
|
+ u32 len;
|
|
+ u32 advice;
|
|
+};
|
|
+
|
|
+struct io_epoll {
|
|
+ struct file *file;
|
|
+ int epfd;
|
|
+ int op;
|
|
+ int fd;
|
|
+ struct epoll_event event;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct io_splice {
|
|
+ struct file *file_out;
|
|
+ struct file *file_in;
|
|
+ loff_t off_out;
|
|
+ loff_t off_in;
|
|
+ u64 len;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct io_provide_buf {
|
|
+ struct file *file;
|
|
+ __u64 addr;
|
|
+ __s32 len;
|
|
+ __u32 bgid;
|
|
+ __u16 nbufs;
|
|
+ __u16 bid;
|
|
+};
|
|
+
|
|
+struct io_statx {
|
|
+ struct file *file;
|
|
+ int dfd;
|
|
+ unsigned int mask;
|
|
+ unsigned int flags;
|
|
+ const char *filename;
|
|
+ struct statx *buffer;
|
|
+};
|
|
+
|
|
+struct io_completion {
|
|
+ struct file *file;
|
|
+ struct list_head list;
|
|
+ int cflags;
|
|
+};
|
|
+
|
|
+struct io_async_ctx;
|
|
+
|
|
+struct async_poll;
|
|
+
|
|
+struct io_kiocb {
|
|
+ union {
|
|
+ struct file *file;
|
|
+ struct io_rw rw;
|
|
+ struct io_poll_iocb poll;
|
|
+ struct io_accept accept;
|
|
+ struct io_sync sync;
|
|
+ struct io_cancel cancel;
|
|
+ struct io_timeout timeout;
|
|
+ struct io_connect connect;
|
|
+ struct io_sr_msg sr_msg;
|
|
+ struct io_open open;
|
|
+ struct io_close close;
|
|
+ struct io_files_update files_update;
|
|
+ struct io_fadvise fadvise;
|
|
+ struct io_madvise madvise;
|
|
+ struct io_epoll epoll;
|
|
+ struct io_splice splice;
|
|
+ struct io_provide_buf pbuf;
|
|
+ struct io_statx statx;
|
|
+ struct io_completion compl;
|
|
+ };
|
|
+ struct io_async_ctx *io;
|
|
+ u8 opcode;
|
|
+ u8 iopoll_completed;
|
|
+ u16 buf_index;
|
|
+ u32 result;
|
|
+ struct io_ring_ctx *ctx;
|
|
+ unsigned int flags;
|
|
+ refcount_t refs;
|
|
+ struct task_struct *task;
|
|
+ u64 user_data;
|
|
+ struct list_head link_list;
|
|
+ struct list_head inflight_entry;
|
|
+ struct percpu_ref *fixed_file_refs;
|
|
+ struct callback_head task_work;
|
|
+ struct hlist_node hash_node;
|
|
+ struct async_poll *apoll;
|
|
+ struct io_wq_work work;
|
|
+};
|
|
+
|
|
+struct io_timeout_data {
|
|
+ struct io_kiocb *req;
|
|
+ struct hrtimer timer;
|
|
+ struct timespec64 ts;
|
|
+ enum hrtimer_mode mode;
|
|
+};
|
|
+
|
|
+struct io_async_connect {
|
|
+ struct __kernel_sockaddr_storage address;
|
|
+};
|
|
+
|
|
+struct io_async_msghdr {
|
|
+ struct iovec fast_iov[8];
|
|
+ struct iovec *iov;
|
|
+ struct sockaddr *uaddr;
|
|
+ struct msghdr msg;
|
|
+ struct __kernel_sockaddr_storage addr;
|
|
+};
|
|
+
|
|
+struct io_async_rw {
|
|
+ struct iovec fast_iov[8];
|
|
+ struct iovec *iov;
|
|
+ ssize_t nr_segs;
|
|
+ ssize_t size;
|
|
+};
|
|
+
|
|
+struct io_async_ctx {
|
|
+ union {
|
|
+ struct io_async_rw rw;
|
|
+ struct io_async_msghdr msg;
|
|
+ struct io_async_connect connect;
|
|
+ struct io_timeout_data timeout;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum {
|
|
+ REQ_F_FIXED_FILE_BIT = 0,
|
|
+ REQ_F_IO_DRAIN_BIT = 1,
|
|
+ REQ_F_LINK_BIT = 2,
|
|
+ REQ_F_HARDLINK_BIT = 3,
|
|
+ REQ_F_FORCE_ASYNC_BIT = 4,
|
|
+ REQ_F_BUFFER_SELECT_BIT = 5,
|
|
+ REQ_F_LINK_HEAD_BIT = 6,
|
|
+ REQ_F_FAIL_LINK_BIT = 7,
|
|
+ REQ_F_INFLIGHT_BIT = 8,
|
|
+ REQ_F_CUR_POS_BIT = 9,
|
|
+ REQ_F_NOWAIT_BIT = 10,
|
|
+ REQ_F_LINK_TIMEOUT_BIT = 11,
|
|
+ REQ_F_ISREG_BIT = 12,
|
|
+ REQ_F_COMP_LOCKED_BIT = 13,
|
|
+ REQ_F_NEED_CLEANUP_BIT = 14,
|
|
+ REQ_F_POLLED_BIT = 15,
|
|
+ REQ_F_BUFFER_SELECTED_BIT = 16,
|
|
+ REQ_F_NO_FILE_TABLE_BIT = 17,
|
|
+ REQ_F_WORK_INITIALIZED_BIT = 18,
|
|
+ REQ_F_TASK_PINNED_BIT = 19,
|
|
+ __REQ_F_LAST_BIT = 20,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ REQ_F_FIXED_FILE = 1,
|
|
+ REQ_F_IO_DRAIN = 2,
|
|
+ REQ_F_LINK = 4,
|
|
+ REQ_F_HARDLINK = 8,
|
|
+ REQ_F_FORCE_ASYNC = 16,
|
|
+ REQ_F_BUFFER_SELECT = 32,
|
|
+ REQ_F_LINK_HEAD = 64,
|
|
+ REQ_F_FAIL_LINK = 128,
|
|
+ REQ_F_INFLIGHT = 256,
|
|
+ REQ_F_CUR_POS = 512,
|
|
+ REQ_F_NOWAIT = 1024,
|
|
+ REQ_F_LINK_TIMEOUT = 2048,
|
|
+ REQ_F_ISREG = 4096,
|
|
+ REQ_F_COMP_LOCKED = 8192,
|
|
+ REQ_F_NEED_CLEANUP = 16384,
|
|
+ REQ_F_POLLED = 32768,
|
|
+ REQ_F_BUFFER_SELECTED = 65536,
|
|
+ REQ_F_NO_FILE_TABLE = 131072,
|
|
+ REQ_F_WORK_INITIALIZED = 262144,
|
|
+ REQ_F_TASK_PINNED = 524288,
|
|
+};
|
|
+
|
|
+struct async_poll {
|
|
+ struct io_poll_iocb poll;
|
|
+ struct io_poll_iocb *double_poll;
|
|
+};
|
|
+
|
|
+struct io_defer_entry {
|
|
+ struct list_head list;
|
|
+ struct io_kiocb *req;
|
|
+ u32 seq;
|
|
+};
|
|
+
|
|
+struct io_comp_state {
|
|
+ unsigned int nr;
|
|
+ struct list_head list;
|
|
+ struct io_ring_ctx *ctx;
|
|
+};
|
|
+
|
|
+struct io_submit_state {
|
|
+ struct blk_plug plug;
|
|
+ void *reqs[8];
|
|
+ unsigned int free_reqs;
|
|
+ struct io_comp_state comp;
|
|
+ struct file *file;
|
|
+ unsigned int fd;
|
|
+ unsigned int has_refs;
|
|
+ unsigned int ios_left;
|
|
+};
|
|
+
|
|
+struct io_op_def {
|
|
+ unsigned int async_ctx: 1;
|
|
+ unsigned int needs_mm: 1;
|
|
+ unsigned int needs_file: 1;
|
|
+ unsigned int needs_file_no_error: 1;
|
|
+ unsigned int hash_reg_file: 1;
|
|
+ unsigned int unbound_nonreg_file: 1;
|
|
+ unsigned int not_supported: 1;
|
|
+ unsigned int file_table: 1;
|
|
+ unsigned int needs_fs: 1;
|
|
+ unsigned int pollin: 1;
|
|
+ unsigned int pollout: 1;
|
|
+ unsigned int buffer_select: 1;
|
|
+ unsigned int needs_fsize: 1;
|
|
+};
|
|
+
|
|
+enum io_mem_account {
|
|
+ ACCT_LOCKED = 0,
|
|
+ ACCT_PINNED = 1,
|
|
+};
|
|
+
|
|
+struct req_batch {
|
|
+ void *reqs[8];
|
|
+ int to_free;
|
|
+ struct task_struct *task;
|
|
+ int task_refs;
|
|
+};
|
|
+
|
|
+struct io_poll_table {
|
|
+ struct poll_table_struct pt;
|
|
+ struct io_kiocb *req;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+struct io_wait_queue {
|
|
+ struct wait_queue_entry wq;
|
|
+ struct io_ring_ctx *ctx;
|
|
+ unsigned int to_wait;
|
|
+ unsigned int nr_timeouts;
|
|
+};
|
|
+
|
|
+struct io_file_put {
|
|
+ struct list_head list;
|
|
+ struct file *file;
|
|
+};
|
|
+
|
|
+struct io_wq_work_list {
|
|
+ struct io_wq_work_node *first;
|
|
+ struct io_wq_work_node *last;
|
|
+};
|
|
+
|
|
+typedef bool work_cancel_fn(struct io_wq_work *, void *);
|
|
+
|
|
+enum {
|
|
+ IO_WORKER_F_UP = 1,
|
|
+ IO_WORKER_F_RUNNING = 2,
|
|
+ IO_WORKER_F_FREE = 4,
|
|
+ IO_WORKER_F_EXITING = 8,
|
|
+ IO_WORKER_F_FIXED = 16,
|
|
+ IO_WORKER_F_BOUND = 32,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IO_WQ_BIT_EXIT = 0,
|
|
+ IO_WQ_BIT_CANCEL = 1,
|
|
+ IO_WQ_BIT_ERROR = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IO_WQE_FLAG_STALLED = 1,
|
|
+};
|
|
+
|
|
+struct io_wqe;
|
|
+
|
|
+struct io_worker {
|
|
+ refcount_t ref;
|
|
+ unsigned int flags;
|
|
+ struct hlist_nulls_node nulls_node;
|
|
+ struct list_head all_list;
|
|
+ struct task_struct___2 *task;
|
|
+ struct io_wqe *wqe;
|
|
+ struct io_wq_work *cur_work;
|
|
+ spinlock_t lock;
|
|
+ struct callback_head rcu;
|
|
+ struct mm_struct___2 *mm;
|
|
+ const struct cred___2 *cur_creds;
|
|
+ const struct cred___2 *saved_creds;
|
|
+ struct files_struct *restore_files;
|
|
+ struct fs_struct *restore_fs;
|
|
+};
|
|
+
|
|
+struct io_wqe_acct {
|
|
+ unsigned int nr_workers;
|
|
+ unsigned int max_workers;
|
|
+ atomic_t nr_running;
|
|
+};
|
|
+
|
|
+struct io_wq___2;
|
|
+
|
|
+struct io_wqe {
|
|
+ struct {
|
|
+ spinlock_t lock;
|
|
+ struct io_wq_work_list work_list;
|
|
+ long unsigned int hash_map;
|
|
+ unsigned int flags;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ };
|
|
+ int node;
|
|
+ struct io_wqe_acct acct[2];
|
|
+ struct hlist_nulls_head free_list;
|
|
+ struct list_head all_list;
|
|
+ struct io_wq___2 *wq;
|
|
+ struct io_wq_work *hash_tail[64];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IO_WQ_ACCT_BOUND = 0,
|
|
+ IO_WQ_ACCT_UNBOUND = 1,
|
|
+};
|
|
+
|
|
+struct io_wq___2 {
|
|
+ struct io_wqe **wqes;
|
|
+ long unsigned int state;
|
|
+ free_work_fn *free_work;
|
|
+ io_wq_work_fn *do_work;
|
|
+ struct task_struct___2 *manager;
|
|
+ struct user_struct___2 *user;
|
|
+ refcount_t refs;
|
|
+ struct completion done;
|
|
+ refcount_t use_refs;
|
|
+};
|
|
+
|
|
+struct io_cb_cancel_data {
|
|
+ work_cancel_fn *fn;
|
|
+ void *data;
|
|
+ int nr_running;
|
|
+ int nr_pending;
|
|
+ bool cancel_all;
|
|
+};
|
|
+
|
|
+struct iomap_ops {
|
|
+ int (*iomap_begin)(struct inode___2 *, loff_t, loff_t, unsigned int, struct iomap___2 *);
|
|
+ int (*iomap_end)(struct inode___2 *, loff_t, loff_t, ssize_t, unsigned int, struct iomap___2 *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dax_pmd_fault_class {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ino;
|
|
+ long unsigned int vm_start;
|
|
+ long unsigned int vm_end;
|
|
+ long unsigned int vm_flags;
|
|
+ long unsigned int address;
|
|
+ long unsigned int pgoff;
|
|
+ long unsigned int max_pgoff;
|
|
+ dev_t dev;
|
|
+ unsigned int flags;
|
|
+ int result;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dax_pmd_load_hole_class {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ino;
|
|
+ long unsigned int vm_flags;
|
|
+ long unsigned int address;
|
|
+ struct page *zero_page;
|
|
+ void *radix_entry;
|
|
+ dev_t dev;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dax_pmd_insert_mapping_class {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ino;
|
|
+ long unsigned int vm_flags;
|
|
+ long unsigned int address;
|
|
+ long int length;
|
|
+ u64 pfn_val;
|
|
+ void *radix_entry;
|
|
+ dev_t dev;
|
|
+ int write;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dax_pte_fault_class {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ino;
|
|
+ long unsigned int vm_flags;
|
|
+ long unsigned int address;
|
|
+ long unsigned int pgoff;
|
|
+ dev_t dev;
|
|
+ unsigned int flags;
|
|
+ int result;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dax_insert_mapping {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ino;
|
|
+ long unsigned int vm_flags;
|
|
+ long unsigned int address;
|
|
+ void *radix_entry;
|
|
+ dev_t dev;
|
|
+ int write;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dax_writeback_range_class {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ino;
|
|
+ long unsigned int start_index;
|
|
+ long unsigned int end_index;
|
|
+ dev_t dev;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dax_writeback_one {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int ino;
|
|
+ long unsigned int pgoff;
|
|
+ long unsigned int pglen;
|
|
+ dev_t dev;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_dax_pmd_fault_class {};
|
|
+
|
|
+struct trace_event_data_offsets_dax_pmd_load_hole_class {};
|
|
+
|
|
+struct trace_event_data_offsets_dax_pmd_insert_mapping_class {};
|
|
+
|
|
+struct trace_event_data_offsets_dax_pte_fault_class {};
|
|
+
|
|
+struct trace_event_data_offsets_dax_insert_mapping {};
|
|
+
|
|
+struct trace_event_data_offsets_dax_writeback_range_class {};
|
|
+
|
|
+struct trace_event_data_offsets_dax_writeback_one {};
|
|
+
|
|
+struct exceptional_entry_key {
|
|
+ struct address_space *mapping;
|
|
+ long unsigned int entry_start;
|
|
+};
|
|
+
|
|
+struct wait_exceptional_entry_queue {
|
|
+ wait_queue_entry_t wait;
|
|
+ struct exceptional_entry_key key;
|
|
+};
|
|
+
|
|
+struct flock64 {
|
|
+ short int l_type;
|
|
+ short int l_whence;
|
|
+ __kernel_loff_t l_start;
|
|
+ __kernel_loff_t l_len;
|
|
+ __kernel_pid_t l_pid;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_locks_get_lock_context {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int i_ino;
|
|
+ dev_t s_dev;
|
|
+ unsigned char type;
|
|
+ struct file_lock_context *ctx;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_filelock_lock {
|
|
+ struct trace_entry ent;
|
|
+ struct file_lock *fl;
|
|
+ long unsigned int i_ino;
|
|
+ dev_t s_dev;
|
|
+ struct file_lock *fl_next;
|
|
+ fl_owner_t fl_owner;
|
|
+ unsigned int fl_pid;
|
|
+ unsigned int fl_flags;
|
|
+ unsigned char fl_type;
|
|
+ loff_t fl_start;
|
|
+ loff_t fl_end;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_filelock_lease {
|
|
+ struct trace_entry ent;
|
|
+ struct file_lock *fl;
|
|
+ long unsigned int i_ino;
|
|
+ dev_t s_dev;
|
|
+ struct file_lock *fl_next;
|
|
+ fl_owner_t fl_owner;
|
|
+ unsigned int fl_flags;
|
|
+ unsigned char fl_type;
|
|
+ long unsigned int fl_break_time;
|
|
+ long unsigned int fl_downgrade_time;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_generic_add_lease {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int i_ino;
|
|
+ int wcount;
|
|
+ int dcount;
|
|
+ int icount;
|
|
+ dev_t s_dev;
|
|
+ fl_owner_t fl_owner;
|
|
+ unsigned int fl_flags;
|
|
+ unsigned char fl_type;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_locks_get_lock_context {};
|
|
+
|
|
+struct trace_event_data_offsets_filelock_lock {};
|
|
+
|
|
+struct trace_event_data_offsets_filelock_lease {};
|
|
+
|
|
+struct trace_event_data_offsets_generic_add_lease {};
|
|
+
|
|
+struct file_lock_list_struct {
|
|
+ spinlock_t lock;
|
|
+ struct hlist_head hlist;
|
|
+};
|
|
+
|
|
+struct locks_iterator {
|
|
+ int li_cpu;
|
|
+ loff_t li_pos;
|
|
+};
|
|
+
|
|
+struct nfs_string {
|
|
+ unsigned int len;
|
|
+ const char *data;
|
|
+};
|
|
+
|
|
+struct nfs4_mount_data {
|
|
+ int version;
|
|
+ int flags;
|
|
+ int rsize;
|
|
+ int wsize;
|
|
+ int timeo;
|
|
+ int retrans;
|
|
+ int acregmin;
|
|
+ int acregmax;
|
|
+ int acdirmin;
|
|
+ int acdirmax;
|
|
+ struct nfs_string client_addr;
|
|
+ struct nfs_string mnt_path;
|
|
+ struct nfs_string hostname;
|
|
+ unsigned int host_addrlen;
|
|
+ struct sockaddr *host_addr;
|
|
+ int proto;
|
|
+ int auth_flavourlen;
|
|
+ int *auth_flavours;
|
|
+};
|
|
+
|
|
+struct compat_nfs_string {
|
|
+ compat_uint_t len;
|
|
+ compat_uptr_t data;
|
|
+};
|
|
+
|
|
+struct compat_nfs4_mount_data_v1 {
|
|
+ compat_int_t version;
|
|
+ compat_int_t flags;
|
|
+ compat_int_t rsize;
|
|
+ compat_int_t wsize;
|
|
+ compat_int_t timeo;
|
|
+ compat_int_t retrans;
|
|
+ compat_int_t acregmin;
|
|
+ compat_int_t acregmax;
|
|
+ compat_int_t acdirmin;
|
|
+ compat_int_t acdirmax;
|
|
+ struct compat_nfs_string client_addr;
|
|
+ struct compat_nfs_string mnt_path;
|
|
+ struct compat_nfs_string hostname;
|
|
+ compat_uint_t host_addrlen;
|
|
+ compat_uptr_t host_addr;
|
|
+ compat_int_t proto;
|
|
+ compat_int_t auth_flavourlen;
|
|
+ compat_uptr_t auth_flavours;
|
|
+};
|
|
+
|
|
+typedef u32 compat_caddr_t;
|
|
+
|
|
+typedef int br_should_route_hook_t(struct sk_buff *);
|
|
+
|
|
+struct ppp_idle {
|
|
+ __kernel_time_t xmit_idle;
|
|
+ __kernel_time_t recv_idle;
|
|
+};
|
|
+
|
|
+struct ppp_option_data {
|
|
+ __u8 *ptr;
|
|
+ __u32 length;
|
|
+ int transmit;
|
|
+};
|
|
+
|
|
+struct mtget {
|
|
+ long int mt_type;
|
|
+ long int mt_resid;
|
|
+ long int mt_dsreg;
|
|
+ long int mt_gstat;
|
|
+ long int mt_erreg;
|
|
+ __kernel_daddr_t mt_fileno;
|
|
+ __kernel_daddr_t mt_blkno;
|
|
+};
|
|
+
|
|
+struct mtpos {
|
|
+ long int mt_blkno;
|
|
+};
|
|
+
|
|
+enum v4l2_preemphasis {
|
|
+ V4L2_PREEMPHASIS_DISABLED = 0,
|
|
+ V4L2_PREEMPHASIS_50_uS = 1,
|
|
+ V4L2_PREEMPHASIS_75_uS = 2,
|
|
+};
|
|
+
|
|
+struct atalk_addr {
|
|
+ __be16 s_net;
|
|
+ __u8 s_node;
|
|
+};
|
|
+
|
|
+struct atalk_netrange {
|
|
+ __u8 nr_phase;
|
|
+ __be16 nr_firstnet;
|
|
+ __be16 nr_lastnet;
|
|
+};
|
|
+
|
|
+struct atalk_route {
|
|
+ struct net_device *dev;
|
|
+ struct atalk_addr target;
|
|
+ struct atalk_addr gateway;
|
|
+ int flags;
|
|
+ struct atalk_route *next;
|
|
+};
|
|
+
|
|
+struct atalk_iface {
|
|
+ struct net_device *dev;
|
|
+ struct atalk_addr address;
|
|
+ int status;
|
|
+ struct atalk_netrange nets;
|
|
+ struct atalk_iface *next;
|
|
+};
|
|
+
|
|
+struct datalink_proto;
|
|
+
|
|
+struct sg_iovec {
|
|
+ void *iov_base;
|
|
+ size_t iov_len;
|
|
+};
|
|
+
|
|
+typedef struct sg_iovec sg_iovec_t;
|
|
+
|
|
+struct sg_io_hdr {
|
|
+ int interface_id;
|
|
+ int dxfer_direction;
|
|
+ unsigned char cmd_len;
|
|
+ unsigned char mx_sb_len;
|
|
+ short unsigned int iovec_count;
|
|
+ unsigned int dxfer_len;
|
|
+ void *dxferp;
|
|
+ unsigned char *cmdp;
|
|
+ void *sbp;
|
|
+ unsigned int timeout;
|
|
+ unsigned int flags;
|
|
+ int pack_id;
|
|
+ void *usr_ptr;
|
|
+ unsigned char status;
|
|
+ unsigned char masked_status;
|
|
+ unsigned char msg_status;
|
|
+ unsigned char sb_len_wr;
|
|
+ short unsigned int host_status;
|
|
+ short unsigned int driver_status;
|
|
+ int resid;
|
|
+ unsigned int duration;
|
|
+ unsigned int info;
|
|
+};
|
|
+
|
|
+typedef struct sg_io_hdr sg_io_hdr_t;
|
|
+
|
|
+struct sg_req_info {
|
|
+ char req_state;
|
|
+ char orphan;
|
|
+ char sg_io_owned;
|
|
+ char problem;
|
|
+ int pack_id;
|
|
+ void *usr_ptr;
|
|
+ unsigned int duration;
|
|
+ int unused;
|
|
+};
|
|
+
|
|
+typedef struct sg_req_info sg_req_info_t;
|
|
+
|
|
+struct atm_blli {
|
|
+ unsigned char l2_proto;
|
|
+ union {
|
|
+ struct {
|
|
+ unsigned char mode;
|
|
+ unsigned char window;
|
|
+ } itu;
|
|
+ unsigned char user;
|
|
+ } l2;
|
|
+ unsigned char l3_proto;
|
|
+ union {
|
|
+ struct {
|
|
+ unsigned char mode;
|
|
+ unsigned char def_size;
|
|
+ unsigned char window;
|
|
+ } itu;
|
|
+ unsigned char user;
|
|
+ struct {
|
|
+ unsigned char term_type;
|
|
+ unsigned char fw_mpx_cap;
|
|
+ unsigned char bw_mpx_cap;
|
|
+ } h310;
|
|
+ struct {
|
|
+ unsigned char ipi;
|
|
+ unsigned char snap[5];
|
|
+ } tr9577;
|
|
+ } l3;
|
|
+};
|
|
+
|
|
+struct atm_bhli {
|
|
+ unsigned char hl_type;
|
|
+ unsigned char hl_length;
|
|
+ unsigned char hl_info[8];
|
|
+};
|
|
+
|
|
+struct atm_sap {
|
|
+ struct atm_bhli bhli;
|
|
+ struct atm_blli blli[3];
|
|
+};
|
|
+
|
|
+struct atm_trafprm {
|
|
+ unsigned char traffic_class;
|
|
+ int max_pcr;
|
|
+ int pcr;
|
|
+ int min_pcr;
|
|
+ int max_cdv;
|
|
+ int max_sdu;
|
|
+ unsigned int icr;
|
|
+ unsigned int tbe;
|
|
+ unsigned int frtt: 24;
|
|
+ unsigned int rif: 4;
|
|
+ unsigned int rdf: 4;
|
|
+ unsigned int nrm_pres: 1;
|
|
+ unsigned int trm_pres: 1;
|
|
+ unsigned int adtf_pres: 1;
|
|
+ unsigned int cdf_pres: 1;
|
|
+ unsigned int nrm: 3;
|
|
+ unsigned int trm: 3;
|
|
+ unsigned int adtf: 10;
|
|
+ unsigned int cdf: 3;
|
|
+ unsigned int spare: 9;
|
|
+};
|
|
+
|
|
+struct atm_qos {
|
|
+ struct atm_trafprm txtp;
|
|
+ struct atm_trafprm rxtp;
|
|
+ unsigned char aal;
|
|
+};
|
|
+
|
|
+struct sockaddr_atmsvc {
|
|
+ short unsigned int sas_family;
|
|
+ struct {
|
|
+ unsigned char prv[20];
|
|
+ char pub[13];
|
|
+ char lij_type;
|
|
+ __u32 lij_id;
|
|
+ } sas_addr;
|
|
+};
|
|
+
|
|
+struct atm_cirange {
|
|
+ signed char vpi_bits;
|
|
+ signed char vci_bits;
|
|
+};
|
|
+
|
|
+struct k_atm_aal_stats {
|
|
+ atomic_t tx;
|
|
+ atomic_t tx_err;
|
|
+ atomic_t rx;
|
|
+ atomic_t rx_err;
|
|
+ atomic_t rx_drop;
|
|
+};
|
|
+
|
|
+struct k_atm_dev_stats {
|
|
+ struct k_atm_aal_stats aal0;
|
|
+ struct k_atm_aal_stats aal34;
|
|
+ struct k_atm_aal_stats aal5;
|
|
+};
|
|
+
|
|
+struct atm_dev;
|
|
+
|
|
+struct atm_vcc {
|
|
+ struct sock sk;
|
|
+ long unsigned int flags;
|
|
+ short int vpi;
|
|
+ int vci;
|
|
+ long unsigned int aal_options;
|
|
+ long unsigned int atm_options;
|
|
+ struct atm_dev *dev;
|
|
+ struct atm_qos qos;
|
|
+ struct atm_sap sap;
|
|
+ void (*release_cb)(struct atm_vcc *);
|
|
+ void (*push)(struct atm_vcc *, struct sk_buff *);
|
|
+ void (*pop)(struct atm_vcc *, struct sk_buff *);
|
|
+ int (*push_oam)(struct atm_vcc *, void *);
|
|
+ int (*send)(struct atm_vcc *, struct sk_buff *);
|
|
+ void *dev_data;
|
|
+ void *proto_data;
|
|
+ struct k_atm_aal_stats *stats;
|
|
+ struct module___2 *owner;
|
|
+ short int itf;
|
|
+ struct sockaddr_atmsvc local;
|
|
+ struct sockaddr_atmsvc remote;
|
|
+ struct atm_vcc *session;
|
|
+ void *user_back;
|
|
+};
|
|
+
|
|
+struct atmdev_ops;
|
|
+
|
|
+struct atmphy_ops;
|
|
+
|
|
+struct atm_dev {
|
|
+ const struct atmdev_ops *ops;
|
|
+ const struct atmphy_ops *phy;
|
|
+ const char *type;
|
|
+ int number;
|
|
+ void *dev_data;
|
|
+ void *phy_data;
|
|
+ long unsigned int flags;
|
|
+ struct list_head local;
|
|
+ struct list_head lecs;
|
|
+ unsigned char esi[6];
|
|
+ struct atm_cirange ci_range;
|
|
+ struct k_atm_dev_stats stats;
|
|
+ char signal;
|
|
+ int link_rate;
|
|
+ refcount_t refcnt;
|
|
+ spinlock_t lock;
|
|
+ struct proc_dir_entry *proc_entry;
|
|
+ char *proc_name;
|
|
+ struct device class_dev;
|
|
+ struct list_head dev_list;
|
|
+};
|
|
+
|
|
+struct atmdev_ops {
|
|
+ void (*dev_close)(struct atm_dev *);
|
|
+ int (*open)(struct atm_vcc *);
|
|
+ void (*close)(struct atm_vcc *);
|
|
+ int (*ioctl)(struct atm_dev *, unsigned int, void *);
|
|
+ int (*compat_ioctl)(struct atm_dev *, unsigned int, void *);
|
|
+ int (*getsockopt)(struct atm_vcc *, int, int, void *, int);
|
|
+ int (*setsockopt)(struct atm_vcc *, int, int, void *, unsigned int);
|
|
+ int (*send)(struct atm_vcc *, struct sk_buff *);
|
|
+ int (*send_oam)(struct atm_vcc *, void *, int);
|
|
+ void (*phy_put)(struct atm_dev *, unsigned char, long unsigned int);
|
|
+ unsigned char (*phy_get)(struct atm_dev *, long unsigned int);
|
|
+ int (*change_qos)(struct atm_vcc *, struct atm_qos *, int);
|
|
+ int (*proc_read)(struct atm_dev *, loff_t *, char *);
|
|
+ struct module___2 *owner;
|
|
+};
|
|
+
|
|
+struct atmphy_ops {
|
|
+ int (*start)(struct atm_dev *);
|
|
+ int (*ioctl)(struct atm_dev *, unsigned int, void *);
|
|
+ void (*interrupt)(struct atm_dev *);
|
|
+ int (*stop)(struct atm_dev *);
|
|
+};
|
|
+
|
|
+struct atm_tcp_ops {
|
|
+ int (*attach)(struct atm_vcc *, int);
|
|
+ int (*create_persistent)(int);
|
|
+ int (*remove_persistent)(int);
|
|
+ struct module___2 *owner;
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ VIDEO_FORMAT_4_3 = 0,
|
|
+ VIDEO_FORMAT_16_9 = 1,
|
|
+ VIDEO_FORMAT_221_1 = 2,
|
|
+} video_format_t;
|
|
+
|
|
+typedef struct {
|
|
+ int w;
|
|
+ int h;
|
|
+ video_format_t aspect_ratio;
|
|
+} video_size_t;
|
|
+
|
|
+struct video_event {
|
|
+ __s32 type;
|
|
+ long int timestamp;
|
|
+ union {
|
|
+ video_size_t size;
|
|
+ unsigned int frame_rate;
|
|
+ unsigned char vsync_field;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct video_still_picture {
|
|
+ char *iFrame;
|
|
+ __s32 size;
|
|
+};
|
|
+
|
|
+struct compat_video_event {
|
|
+ int32_t type;
|
|
+ compat_time_t timestamp;
|
|
+ union {
|
|
+ video_size_t size;
|
|
+ unsigned int frame_rate;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct compat_video_still_picture {
|
|
+ compat_uptr_t iFrame;
|
|
+ int32_t size;
|
|
+};
|
|
+
|
|
+struct sg_io_hdr32 {
|
|
+ compat_int_t interface_id;
|
|
+ compat_int_t dxfer_direction;
|
|
+ unsigned char cmd_len;
|
|
+ unsigned char mx_sb_len;
|
|
+ short unsigned int iovec_count;
|
|
+ compat_uint_t dxfer_len;
|
|
+ compat_uint_t dxferp;
|
|
+ compat_uptr_t cmdp;
|
|
+ compat_uptr_t sbp;
|
|
+ compat_uint_t timeout;
|
|
+ compat_uint_t flags;
|
|
+ compat_int_t pack_id;
|
|
+ compat_uptr_t usr_ptr;
|
|
+ unsigned char status;
|
|
+ unsigned char masked_status;
|
|
+ unsigned char msg_status;
|
|
+ unsigned char sb_len_wr;
|
|
+ short unsigned int host_status;
|
|
+ short unsigned int driver_status;
|
|
+ compat_int_t resid;
|
|
+ compat_uint_t duration;
|
|
+ compat_uint_t info;
|
|
+};
|
|
+
|
|
+typedef struct sg_io_hdr32 sg_io_hdr32_t;
|
|
+
|
|
+struct sg_iovec32 {
|
|
+ compat_uint_t iov_base;
|
|
+ compat_uint_t iov_len;
|
|
+};
|
|
+
|
|
+typedef struct sg_iovec32 sg_iovec32_t;
|
|
+
|
|
+struct compat_sg_req_info {
|
|
+ char req_state;
|
|
+ char orphan;
|
|
+ char sg_io_owned;
|
|
+ char problem;
|
|
+ int pack_id;
|
|
+ compat_uptr_t usr_ptr;
|
|
+ unsigned int duration;
|
|
+ int unused;
|
|
+};
|
|
+
|
|
+struct sock_fprog32 {
|
|
+ short unsigned int len;
|
|
+ compat_caddr_t filter;
|
|
+};
|
|
+
|
|
+struct ppp_option_data32 {
|
|
+ compat_caddr_t ptr;
|
|
+ u32 length;
|
|
+ compat_int_t transmit;
|
|
+};
|
|
+
|
|
+struct ppp_idle32 {
|
|
+ compat_time_t xmit_idle;
|
|
+ compat_time_t recv_idle;
|
|
+};
|
|
+
|
|
+struct mtget32 {
|
|
+ compat_long_t mt_type;
|
|
+ compat_long_t mt_resid;
|
|
+ compat_long_t mt_dsreg;
|
|
+ compat_long_t mt_gstat;
|
|
+ compat_long_t mt_erreg;
|
|
+ compat_daddr_t mt_fileno;
|
|
+ compat_daddr_t mt_blkno;
|
|
+};
|
|
+
|
|
+struct mtpos32 {
|
|
+ compat_long_t mt_blkno;
|
|
+};
|
|
+
|
|
+struct serial_struct32 {
|
|
+ compat_int_t type;
|
|
+ compat_int_t line;
|
|
+ compat_uint_t port;
|
|
+ compat_int_t irq;
|
|
+ compat_int_t flags;
|
|
+ compat_int_t xmit_fifo_size;
|
|
+ compat_int_t custom_divisor;
|
|
+ compat_int_t baud_base;
|
|
+ short unsigned int close_delay;
|
|
+ char io_type;
|
|
+ char reserved_char[1];
|
|
+ compat_int_t hub6;
|
|
+ short unsigned int closing_wait;
|
|
+ short unsigned int closing_wait2;
|
|
+ compat_uint_t iomem_base;
|
|
+ short unsigned int iomem_reg_shift;
|
|
+ unsigned int port_high;
|
|
+ compat_int_t reserved[1];
|
|
+};
|
|
+
|
|
+struct space_resv_32 {
|
|
+ __s16 l_type;
|
|
+ __s16 l_whence;
|
|
+ __s64 l_start;
|
|
+ __s64 l_len;
|
|
+ __s32 l_sysid;
|
|
+ __u32 l_pid;
|
|
+ __s32 l_pad[4];
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef unsigned int __kernel_uid_t;
|
|
+
|
|
+typedef unsigned int __kernel_gid_t;
|
|
+
|
|
+struct elf_prpsinfo {
|
|
+ char pr_state;
|
|
+ char pr_sname;
|
|
+ char pr_zomb;
|
|
+ char pr_nice;
|
|
+ long unsigned int pr_flag;
|
|
+ __kernel_uid_t pr_uid;
|
|
+ __kernel_gid_t pr_gid;
|
|
+ pid_t pr_pid;
|
|
+ pid_t pr_ppid;
|
|
+ pid_t pr_pgrp;
|
|
+ pid_t pr_sid;
|
|
+ char pr_fname[16];
|
|
+ char pr_psargs[80];
|
|
+};
|
|
+
|
|
+struct arch_elf_state {};
|
|
+
|
|
+struct memelfnote {
|
|
+ const char *name;
|
|
+ int type;
|
|
+ unsigned int datasz;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+typedef int user_regset_active_fn___2(struct task_struct___2 *, const struct user_regset *);
|
|
+
|
|
+typedef int user_regset_get_fn___2(struct task_struct___2 *, const struct user_regset *, unsigned int, unsigned int, void *, void *);
|
|
+
|
|
+typedef int user_regset_set_fn___2(struct task_struct___2 *, const struct user_regset *, unsigned int, unsigned int, const void *, const void *);
|
|
+
|
|
+typedef int user_regset_writeback_fn___2(struct task_struct___2 *, const struct user_regset *, int);
|
|
+
|
|
+typedef unsigned int user_regset_get_size_fn___2(struct task_struct___2 *, const struct user_regset *);
|
|
+
|
|
+struct elf_thread_core_info {
|
|
+ struct elf_thread_core_info *next;
|
|
+ struct task_struct___2 *task;
|
|
+ struct elf_prstatus prstatus;
|
|
+ struct memelfnote notes[0];
|
|
+};
|
|
+
|
|
+struct elf_note_info {
|
|
+ struct elf_thread_core_info *thread;
|
|
+ struct memelfnote psinfo;
|
|
+ struct memelfnote signote;
|
|
+ struct memelfnote auxv;
|
|
+ struct memelfnote files;
|
|
+ siginfo_t csigdata;
|
|
+ size_t size;
|
|
+ int thread_notes;
|
|
+};
|
|
+
|
|
+struct elf32_shdr {
|
|
+ Elf32_Word sh_name;
|
|
+ Elf32_Word sh_type;
|
|
+ Elf32_Word sh_flags;
|
|
+ Elf32_Addr sh_addr;
|
|
+ Elf32_Off sh_offset;
|
|
+ Elf32_Word sh_size;
|
|
+ Elf32_Word sh_link;
|
|
+ Elf32_Word sh_info;
|
|
+ Elf32_Word sh_addralign;
|
|
+ Elf32_Word sh_entsize;
|
|
+};
|
|
+
|
|
+typedef struct user_regs_struct compat_elf_gregset_t;
|
|
+
|
|
+struct compat_elf_siginfo {
|
|
+ compat_int_t si_signo;
|
|
+ compat_int_t si_code;
|
|
+ compat_int_t si_errno;
|
|
+};
|
|
+
|
|
+struct compat_elf_prstatus {
|
|
+ struct compat_elf_siginfo pr_info;
|
|
+ short int pr_cursig;
|
|
+ compat_ulong_t pr_sigpend;
|
|
+ compat_ulong_t pr_sighold;
|
|
+ compat_pid_t pr_pid;
|
|
+ compat_pid_t pr_ppid;
|
|
+ compat_pid_t pr_pgrp;
|
|
+ compat_pid_t pr_sid;
|
|
+ struct compat_timeval pr_utime;
|
|
+ struct compat_timeval pr_stime;
|
|
+ struct compat_timeval pr_cutime;
|
|
+ struct compat_timeval pr_cstime;
|
|
+ compat_elf_gregset_t pr_reg;
|
|
+ compat_int_t pr_fpvalid;
|
|
+};
|
|
+
|
|
+struct compat_elf_prpsinfo {
|
|
+ char pr_state;
|
|
+ char pr_sname;
|
|
+ char pr_zomb;
|
|
+ char pr_nice;
|
|
+ compat_ulong_t pr_flag;
|
|
+ __compat_uid_t pr_uid;
|
|
+ __compat_gid_t pr_gid;
|
|
+ compat_pid_t pr_pid;
|
|
+ compat_pid_t pr_ppid;
|
|
+ compat_pid_t pr_pgrp;
|
|
+ compat_pid_t pr_sid;
|
|
+ char pr_fname[16];
|
|
+ char pr_psargs[80];
|
|
+};
|
|
+
|
|
+struct elf_thread_core_info___2 {
|
|
+ struct elf_thread_core_info___2 *next;
|
|
+ struct task_struct___2 *task;
|
|
+ struct compat_elf_prstatus prstatus;
|
|
+ struct memelfnote notes[0];
|
|
+};
|
|
+
|
|
+struct elf_note_info___2 {
|
|
+ struct elf_thread_core_info___2 *thread;
|
|
+ struct memelfnote psinfo;
|
|
+ struct memelfnote signote;
|
|
+ struct memelfnote auxv;
|
|
+ struct memelfnote files;
|
|
+ compat_siginfo_t csigdata;
|
|
+ size_t size;
|
|
+ int thread_notes;
|
|
+};
|
|
+
|
|
+typedef __u32 __le32;
|
|
+
|
|
+struct posix_acl_xattr_entry {
|
|
+ __le16 e_tag;
|
|
+ __le16 e_perm;
|
|
+ __le32 e_id;
|
|
+};
|
|
+
|
|
+struct posix_acl_xattr_header {
|
|
+ __le32 a_version;
|
|
+};
|
|
+
|
|
+struct core_name {
|
|
+ char *corename;
|
|
+ int used;
|
|
+ int size;
|
|
+};
|
|
+
|
|
+struct files_cgroup {
|
|
+ struct cgroup_subsys_state css;
|
|
+ struct page_counter open_handles;
|
|
+};
|
|
+
|
|
+struct iomap_page {
|
|
+ atomic_t read_count;
|
|
+ atomic_t write_count;
|
|
+ spinlock_t uptodate_lock;
|
|
+ long unsigned int uptodate[1];
|
|
+};
|
|
+
|
|
+typedef int iomap_dio_end_io_t(struct kiocb *, ssize_t, unsigned int);
|
|
+
|
|
+typedef loff_t (*iomap_actor_t)(struct inode___2 *, loff_t, loff_t, void *, struct iomap___2 *);
|
|
+
|
|
+struct iomap_readpage_ctx {
|
|
+ struct page *cur_page;
|
|
+ bool cur_page_in_bio;
|
|
+ bool is_readahead;
|
|
+ struct bio *bio;
|
|
+ struct list_head *pages;
|
|
+};
|
|
+
|
|
+struct fiemap_ctx {
|
|
+ struct fiemap_extent_info *fi;
|
|
+ struct iomap___2 prev;
|
|
+};
|
|
+
|
|
+struct iomap_dio {
|
|
+ struct kiocb *iocb;
|
|
+ iomap_dio_end_io_t *end_io;
|
|
+ loff_t i_size;
|
|
+ loff_t size;
|
|
+ atomic_t ref;
|
|
+ unsigned int flags;
|
|
+ int error;
|
|
+ bool wait_for_completion;
|
|
+ union {
|
|
+ struct {
|
|
+ struct iov_iter *iter;
|
|
+ struct task_struct *waiter;
|
|
+ struct request_queue *last_queue;
|
|
+ blk_qc_t cookie;
|
|
+ } submit;
|
|
+ struct {
|
|
+ struct work_struct work;
|
|
+ } aio;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct iomap_swapfile_info {
|
|
+ struct iomap___2 iomap;
|
|
+ struct swap_info_struct *sis;
|
|
+ uint64_t lowest_ppage;
|
|
+ uint64_t highest_ppage;
|
|
+ long unsigned int nr_pages;
|
|
+ int nr_extents;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ QIF_BLIMITS_B = 0,
|
|
+ QIF_SPACE_B = 1,
|
|
+ QIF_ILIMITS_B = 2,
|
|
+ QIF_INODES_B = 3,
|
|
+ QIF_BTIME_B = 4,
|
|
+ QIF_ITIME_B = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DQF_ROOT_SQUASH_B = 0,
|
|
+ DQF_SYS_FILE_B = 16,
|
|
+ DQF_PRIVATE = 17,
|
|
+};
|
|
+
|
|
+typedef __kernel_uid32_t qid_t;
|
|
+
|
|
+enum {
|
|
+ DQF_INFO_DIRTY_B = 17,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DQST_LOOKUPS = 0,
|
|
+ DQST_DROPS = 1,
|
|
+ DQST_READS = 2,
|
|
+ DQST_WRITES = 3,
|
|
+ DQST_CACHE_HITS = 4,
|
|
+ DQST_ALLOC_DQUOTS = 5,
|
|
+ DQST_FREE_DQUOTS = 6,
|
|
+ DQST_SYNCS = 7,
|
|
+ _DQST_DQSTAT_LAST = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ _DQUOT_USAGE_ENABLED = 0,
|
|
+ _DQUOT_LIMITS_ENABLED = 1,
|
|
+ _DQUOT_SUSPENDED = 2,
|
|
+ _DQUOT_STATE_FLAGS = 3,
|
|
+};
|
|
+
|
|
+struct quota_module_name {
|
|
+ int qm_fmt_id;
|
|
+ char *qm_mod_name;
|
|
+};
|
|
+
|
|
+struct dquot_warn {
|
|
+ struct super_block___2 *w_sb;
|
|
+ struct kqid w_dq_id;
|
|
+ short int w_type;
|
|
+};
|
|
+
|
|
+struct qtree_fmt_operations {
|
|
+ void (*mem2disk_dqblk)(void *, struct dquot___2 *);
|
|
+ void (*disk2mem_dqblk)(struct dquot___2 *, void *);
|
|
+ int (*is_id)(void *, struct dquot___2 *);
|
|
+};
|
|
+
|
|
+struct qtree_mem_dqinfo {
|
|
+ struct super_block___2 *dqi_sb;
|
|
+ int dqi_type;
|
|
+ unsigned int dqi_blocks;
|
|
+ unsigned int dqi_free_blk;
|
|
+ unsigned int dqi_free_entry;
|
|
+ unsigned int dqi_blocksize_bits;
|
|
+ unsigned int dqi_entry_size;
|
|
+ unsigned int dqi_usable_bs;
|
|
+ unsigned int dqi_qtree_depth;
|
|
+ const struct qtree_fmt_operations *dqi_ops;
|
|
+};
|
|
+
|
|
+struct v2_disk_dqheader {
|
|
+ __le32 dqh_magic;
|
|
+ __le32 dqh_version;
|
|
+};
|
|
+
|
|
+struct v2r0_disk_dqblk {
|
|
+ __le32 dqb_id;
|
|
+ __le32 dqb_ihardlimit;
|
|
+ __le32 dqb_isoftlimit;
|
|
+ __le32 dqb_curinodes;
|
|
+ __le32 dqb_bhardlimit;
|
|
+ __le32 dqb_bsoftlimit;
|
|
+ __le64 dqb_curspace;
|
|
+ __le64 dqb_btime;
|
|
+ __le64 dqb_itime;
|
|
+};
|
|
+
|
|
+struct v2r1_disk_dqblk {
|
|
+ __le32 dqb_id;
|
|
+ __le32 dqb_pad;
|
|
+ __le64 dqb_ihardlimit;
|
|
+ __le64 dqb_isoftlimit;
|
|
+ __le64 dqb_curinodes;
|
|
+ __le64 dqb_bhardlimit;
|
|
+ __le64 dqb_bsoftlimit;
|
|
+ __le64 dqb_curspace;
|
|
+ __le64 dqb_btime;
|
|
+ __le64 dqb_itime;
|
|
+};
|
|
+
|
|
+struct v2_disk_dqinfo {
|
|
+ __le32 dqi_bgrace;
|
|
+ __le32 dqi_igrace;
|
|
+ __le32 dqi_flags;
|
|
+ __le32 dqi_blocks;
|
|
+ __le32 dqi_free_blk;
|
|
+ __le32 dqi_free_entry;
|
|
+};
|
|
+
|
|
+struct qt_disk_dqdbheader {
|
|
+ __le32 dqdh_next_free;
|
|
+ __le32 dqdh_prev_free;
|
|
+ __le16 dqdh_entries;
|
|
+ __le16 dqdh_pad1;
|
|
+ __le32 dqdh_pad2;
|
|
+};
|
|
+
|
|
+struct fs_disk_quota {
|
|
+ __s8 d_version;
|
|
+ __s8 d_flags;
|
|
+ __u16 d_fieldmask;
|
|
+ __u32 d_id;
|
|
+ __u64 d_blk_hardlimit;
|
|
+ __u64 d_blk_softlimit;
|
|
+ __u64 d_ino_hardlimit;
|
|
+ __u64 d_ino_softlimit;
|
|
+ __u64 d_bcount;
|
|
+ __u64 d_icount;
|
|
+ __s32 d_itimer;
|
|
+ __s32 d_btimer;
|
|
+ __u16 d_iwarns;
|
|
+ __u16 d_bwarns;
|
|
+ __s32 d_padding2;
|
|
+ __u64 d_rtb_hardlimit;
|
|
+ __u64 d_rtb_softlimit;
|
|
+ __u64 d_rtbcount;
|
|
+ __s32 d_rtbtimer;
|
|
+ __u16 d_rtbwarns;
|
|
+ __s16 d_padding3;
|
|
+ char d_padding4[8];
|
|
+};
|
|
+
|
|
+struct fs_qfilestat {
|
|
+ __u64 qfs_ino;
|
|
+ __u64 qfs_nblks;
|
|
+ __u32 qfs_nextents;
|
|
+};
|
|
+
|
|
+typedef struct fs_qfilestat fs_qfilestat_t;
|
|
+
|
|
+struct fs_quota_stat {
|
|
+ __s8 qs_version;
|
|
+ __u16 qs_flags;
|
|
+ __s8 qs_pad;
|
|
+ fs_qfilestat_t qs_uquota;
|
|
+ fs_qfilestat_t qs_gquota;
|
|
+ __u32 qs_incoredqs;
|
|
+ __s32 qs_btimelimit;
|
|
+ __s32 qs_itimelimit;
|
|
+ __s32 qs_rtbtimelimit;
|
|
+ __u16 qs_bwarnlimit;
|
|
+ __u16 qs_iwarnlimit;
|
|
+};
|
|
+
|
|
+struct fs_qfilestatv {
|
|
+ __u64 qfs_ino;
|
|
+ __u64 qfs_nblks;
|
|
+ __u32 qfs_nextents;
|
|
+ __u32 qfs_pad;
|
|
+};
|
|
+
|
|
+struct fs_quota_statv {
|
|
+ __s8 qs_version;
|
|
+ __u8 qs_pad1;
|
|
+ __u16 qs_flags;
|
|
+ __u32 qs_incoredqs;
|
|
+ struct fs_qfilestatv qs_uquota;
|
|
+ struct fs_qfilestatv qs_gquota;
|
|
+ struct fs_qfilestatv qs_pquota;
|
|
+ __s32 qs_btimelimit;
|
|
+ __s32 qs_itimelimit;
|
|
+ __s32 qs_rtbtimelimit;
|
|
+ __u16 qs_bwarnlimit;
|
|
+ __u16 qs_iwarnlimit;
|
|
+ __u64 qs_pad2[8];
|
|
+};
|
|
+
|
|
+struct if_dqblk {
|
|
+ __u64 dqb_bhardlimit;
|
|
+ __u64 dqb_bsoftlimit;
|
|
+ __u64 dqb_curspace;
|
|
+ __u64 dqb_ihardlimit;
|
|
+ __u64 dqb_isoftlimit;
|
|
+ __u64 dqb_curinodes;
|
|
+ __u64 dqb_btime;
|
|
+ __u64 dqb_itime;
|
|
+ __u32 dqb_valid;
|
|
+};
|
|
+
|
|
+struct if_nextdqblk {
|
|
+ __u64 dqb_bhardlimit;
|
|
+ __u64 dqb_bsoftlimit;
|
|
+ __u64 dqb_curspace;
|
|
+ __u64 dqb_ihardlimit;
|
|
+ __u64 dqb_isoftlimit;
|
|
+ __u64 dqb_curinodes;
|
|
+ __u64 dqb_btime;
|
|
+ __u64 dqb_itime;
|
|
+ __u32 dqb_valid;
|
|
+ __u32 dqb_id;
|
|
+};
|
|
+
|
|
+struct if_dqinfo {
|
|
+ __u64 dqi_bgrace;
|
|
+ __u64 dqi_igrace;
|
|
+ __u32 dqi_flags;
|
|
+ __u32 dqi_valid;
|
|
+};
|
|
+
|
|
+struct compat_if_dqblk {
|
|
+ compat_u64 dqb_bhardlimit;
|
|
+ compat_u64 dqb_bsoftlimit;
|
|
+ compat_u64 dqb_curspace;
|
|
+ compat_u64 dqb_ihardlimit;
|
|
+ compat_u64 dqb_isoftlimit;
|
|
+ compat_u64 dqb_curinodes;
|
|
+ compat_u64 dqb_btime;
|
|
+ compat_u64 dqb_itime;
|
|
+ compat_uint_t dqb_valid;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct compat_fs_qfilestat {
|
|
+ compat_u64 dqb_bhardlimit;
|
|
+ compat_u64 qfs_nblks;
|
|
+ compat_uint_t qfs_nextents;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct compat_fs_quota_stat {
|
|
+ __s8 qs_version;
|
|
+ char: 8;
|
|
+ __u16 qs_flags;
|
|
+ __s8 qs_pad;
|
|
+ int: 24;
|
|
+ struct compat_fs_qfilestat qs_uquota;
|
|
+ struct compat_fs_qfilestat qs_gquota;
|
|
+ compat_uint_t qs_incoredqs;
|
|
+ compat_int_t qs_btimelimit;
|
|
+ compat_int_t qs_itimelimit;
|
|
+ compat_int_t qs_rtbtimelimit;
|
|
+ __u16 qs_bwarnlimit;
|
|
+ __u16 qs_iwarnlimit;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum {
|
|
+ QUOTA_NL_C_UNSPEC = 0,
|
|
+ QUOTA_NL_C_WARNING = 1,
|
|
+ __QUOTA_NL_C_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ QUOTA_NL_A_UNSPEC = 0,
|
|
+ QUOTA_NL_A_QTYPE = 1,
|
|
+ QUOTA_NL_A_EXCESS_ID = 2,
|
|
+ QUOTA_NL_A_WARNING = 3,
|
|
+ QUOTA_NL_A_DEV_MAJOR = 4,
|
|
+ QUOTA_NL_A_DEV_MINOR = 5,
|
|
+ QUOTA_NL_A_CAUSED_ID = 6,
|
|
+ QUOTA_NL_A_PAD = 7,
|
|
+ __QUOTA_NL_A_MAX = 8,
|
|
+};
|
|
+
|
|
+struct proc_maps_private {
|
|
+ struct inode___2 *inode;
|
|
+ struct task_struct *task;
|
|
+ struct mm_struct *mm;
|
|
+ struct vm_area_struct *tail_vma;
|
|
+ struct mempolicy *task_mempolicy;
|
|
+};
|
|
+
|
|
+struct mem_size_stats {
|
|
+ long unsigned int resident;
|
|
+ long unsigned int shared_clean;
|
|
+ long unsigned int shared_dirty;
|
|
+ long unsigned int private_clean;
|
|
+ long unsigned int private_dirty;
|
|
+ long unsigned int referenced;
|
|
+ long unsigned int anonymous;
|
|
+ long unsigned int lazyfree;
|
|
+ long unsigned int anonymous_thp;
|
|
+ long unsigned int shmem_thp;
|
|
+ long unsigned int swap;
|
|
+ long unsigned int shared_hugetlb;
|
|
+ long unsigned int private_hugetlb;
|
|
+ u64 pss;
|
|
+ u64 pss_locked;
|
|
+ u64 swap_pss;
|
|
+ bool check_shmem_swap;
|
|
+};
|
|
+
|
|
+enum clear_refs_types {
|
|
+ CLEAR_REFS_ALL = 1,
|
|
+ CLEAR_REFS_ANON = 2,
|
|
+ CLEAR_REFS_MAPPED = 3,
|
|
+ CLEAR_REFS_SOFT_DIRTY = 4,
|
|
+ CLEAR_REFS_MM_HIWATER_RSS = 5,
|
|
+ CLEAR_REFS_LAST = 6,
|
|
+};
|
|
+
|
|
+struct clear_refs_private {
|
|
+ enum clear_refs_types type;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ u64 pme;
|
|
+} pagemap_entry_t;
|
|
+
|
|
+struct pagemapread {
|
|
+ int pos;
|
|
+ int len;
|
|
+ pagemap_entry_t *buffer;
|
|
+ bool show_pfn;
|
|
+};
|
|
+
|
|
+struct numa_maps {
|
|
+ long unsigned int pages;
|
|
+ long unsigned int anon;
|
|
+ long unsigned int active;
|
|
+ long unsigned int writeback;
|
|
+ long unsigned int mapcount_max;
|
|
+ long unsigned int dirty;
|
|
+ long unsigned int swapcache;
|
|
+ long unsigned int node[1024];
|
|
+};
|
|
+
|
|
+struct numa_maps_private {
|
|
+ struct proc_maps_private proc_maps;
|
|
+ struct numa_maps md;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ HIDEPID_OFF = 0,
|
|
+ HIDEPID_NO_ACCESS = 1,
|
|
+ HIDEPID_INVISIBLE = 2,
|
|
+};
|
|
+
|
|
+struct pde_opener {
|
|
+ struct file___2 *file;
|
|
+ struct list_head lh;
|
|
+ bool closing;
|
|
+ struct completion *c;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BIAS = -2147483648,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_gid = 0,
|
|
+ Opt_hidepid = 1,
|
|
+ Opt_err = 2,
|
|
+};
|
|
+
|
|
+struct reciprocal_value {
|
|
+ u32 m;
|
|
+ u8 sh1;
|
|
+ u8 sh2;
|
|
+};
|
|
+
|
|
+struct flex_array_part;
|
|
+
|
|
+struct flex_array {
|
|
+ union {
|
|
+ struct {
|
|
+ int element_size;
|
|
+ int total_nr_elements;
|
|
+ int elems_per_part;
|
|
+ struct reciprocal_value reciprocal_elems;
|
|
+ struct flex_array_part *parts[0];
|
|
+ };
|
|
+ char padding[4096];
|
|
+ };
|
|
+};
|
|
+
|
|
+typedef struct dentry___2 *instantiate_t(struct dentry___2 *, struct task_struct *, const void *);
|
|
+
|
|
+struct pid_entry {
|
|
+ const char *name;
|
|
+ unsigned int len;
|
|
+ umode_t mode;
|
|
+ const struct inode_operations___2 *iop;
|
|
+ const struct file_operations___2 *fop;
|
|
+ union proc_op op;
|
|
+};
|
|
+
|
|
+struct limit_names {
|
|
+ const char *name;
|
|
+ const char *unit;
|
|
+};
|
|
+
|
|
+struct map_files_info {
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+ fmode_t mode;
|
|
+};
|
|
+
|
|
+struct timers_private {
|
|
+ struct pid *pid;
|
|
+ struct task_struct *task;
|
|
+ struct sighand_struct *sighand;
|
|
+ struct pid_namespace *ns;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct tgid_iter {
|
|
+ unsigned int tgid;
|
|
+ struct task_struct *task;
|
|
+};
|
|
+
|
|
+typedef struct dentry___2 *instantiate_t___2(struct dentry___2 *, struct task_struct___2 *, const void *);
|
|
+
|
|
+struct fd_data {
|
|
+ fmode_t mode;
|
|
+ unsigned int fd;
|
|
+};
|
|
+
|
|
+struct seq_net_private {
|
|
+ struct net *net;
|
|
+};
|
|
+
|
|
+struct vmcore {
|
|
+ struct list_head list;
|
|
+ long long unsigned int paddr;
|
|
+ long long unsigned int size;
|
|
+ loff_t offset;
|
|
+};
|
|
+
|
|
+struct vmcoredd_node {
|
|
+ struct list_head list;
|
|
+ void *buf;
|
|
+ unsigned int size;
|
|
+};
|
|
+
|
|
+typedef struct elf64_note Elf64_Nhdr;
|
|
+
|
|
+struct vmcoredd_header {
|
|
+ __u32 n_namesz;
|
|
+ __u32 n_descsz;
|
|
+ __u32 n_type;
|
|
+ __u8 name[8];
|
|
+ __u8 dump_name[44];
|
|
+};
|
|
+
|
|
+struct vmcoredd_data {
|
|
+ char dump_name[44];
|
|
+ unsigned int size;
|
|
+ int (*vmcoredd_callback)(struct vmcoredd_data *, void *);
|
|
+};
|
|
+
|
|
+struct kernfs_iattrs {
|
|
+ struct iattr___2 ia_iattr;
|
|
+ void *ia_secdata;
|
|
+ u32 ia_secdata_len;
|
|
+ struct simple_xattrs xattrs;
|
|
+};
|
|
+
|
|
+struct kernfs_super_info {
|
|
+ struct super_block___2 *sb;
|
|
+ struct kernfs_root___2 *root;
|
|
+ const void *ns;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+enum kernfs_node_flag {
|
|
+ KERNFS_ACTIVATED = 16,
|
|
+ KERNFS_NS = 32,
|
|
+ KERNFS_HAS_SEQ_SHOW = 64,
|
|
+ KERNFS_HAS_MMAP = 128,
|
|
+ KERNFS_LOCKDEP = 256,
|
|
+ KERNFS_SUICIDAL = 1024,
|
|
+ KERNFS_SUICIDED = 2048,
|
|
+ KERNFS_EMPTY_DIR = 4096,
|
|
+ KERNFS_HAS_RELEASE = 8192,
|
|
+};
|
|
+
|
|
+struct kernfs_open_node {
|
|
+ atomic_t refcnt;
|
|
+ atomic_t event;
|
|
+ wait_queue_head_t poll;
|
|
+ struct list_head files;
|
|
+};
|
|
+
|
|
+struct config_group;
|
|
+
|
|
+struct config_item_type;
|
|
+
|
|
+struct config_item {
|
|
+ char *ci_name;
|
|
+ char ci_namebuf[20];
|
|
+ struct kref ci_kref;
|
|
+ struct list_head ci_entry;
|
|
+ struct config_item *ci_parent;
|
|
+ struct config_group *ci_group;
|
|
+ const struct config_item_type *ci_type;
|
|
+ struct dentry___2 *ci_dentry;
|
|
+};
|
|
+
|
|
+struct configfs_subsystem;
|
|
+
|
|
+struct config_group {
|
|
+ struct config_item cg_item;
|
|
+ struct list_head cg_children;
|
|
+ struct configfs_subsystem *cg_subsys;
|
|
+ struct list_head default_groups;
|
|
+ struct list_head group_entry;
|
|
+};
|
|
+
|
|
+struct configfs_item_operations;
|
|
+
|
|
+struct configfs_group_operations;
|
|
+
|
|
+struct configfs_attribute;
|
|
+
|
|
+struct configfs_bin_attribute;
|
|
+
|
|
+struct config_item_type {
|
|
+ struct module *ct_owner;
|
|
+ struct configfs_item_operations *ct_item_ops;
|
|
+ struct configfs_group_operations *ct_group_ops;
|
|
+ struct configfs_attribute **ct_attrs;
|
|
+ struct configfs_bin_attribute **ct_bin_attrs;
|
|
+};
|
|
+
|
|
+struct configfs_item_operations {
|
|
+ void (*release)(struct config_item *);
|
|
+ int (*allow_link)(struct config_item *, struct config_item *);
|
|
+ void (*drop_link)(struct config_item *, struct config_item *);
|
|
+};
|
|
+
|
|
+struct configfs_group_operations {
|
|
+ struct config_item * (*make_item)(struct config_group *, const char *);
|
|
+ struct config_group * (*make_group)(struct config_group *, const char *);
|
|
+ int (*commit_item)(struct config_item *);
|
|
+ void (*disconnect_notify)(struct config_group *, struct config_item *);
|
|
+ void (*drop_item)(struct config_group *, struct config_item *);
|
|
+};
|
|
+
|
|
+struct configfs_attribute {
|
|
+ const char *ca_name;
|
|
+ struct module *ca_owner;
|
|
+ umode_t ca_mode;
|
|
+ ssize_t (*show)(struct config_item *, char *);
|
|
+ ssize_t (*store)(struct config_item *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct configfs_bin_attribute {
|
|
+ struct configfs_attribute cb_attr;
|
|
+ void *cb_private;
|
|
+ size_t cb_max_size;
|
|
+ ssize_t (*read)(struct config_item *, void *, size_t);
|
|
+ ssize_t (*write)(struct config_item *, const void *, size_t);
|
|
+};
|
|
+
|
|
+struct configfs_subsystem {
|
|
+ struct config_group su_group;
|
|
+ struct mutex su_mutex;
|
|
+};
|
|
+
|
|
+struct configfs_fragment {
|
|
+ atomic_t frag_count;
|
|
+ struct rw_semaphore frag_sem;
|
|
+ bool frag_dead;
|
|
+};
|
|
+
|
|
+struct configfs_dirent {
|
|
+ atomic_t s_count;
|
|
+ int s_dependent_count;
|
|
+ struct list_head s_sibling;
|
|
+ struct list_head s_children;
|
|
+ struct list_head s_links;
|
|
+ void *s_element;
|
|
+ int s_type;
|
|
+ umode_t s_mode;
|
|
+ struct dentry___2 *s_dentry;
|
|
+ struct iattr___2 *s_iattr;
|
|
+ struct configfs_fragment *s_frag;
|
|
+};
|
|
+
|
|
+struct configfs_buffer {
|
|
+ size_t count;
|
|
+ loff_t pos;
|
|
+ char *page;
|
|
+ struct configfs_item_operations *ops;
|
|
+ struct mutex mutex;
|
|
+ int needs_read_fill;
|
|
+ bool read_in_progress;
|
|
+ bool write_in_progress;
|
|
+ char *bin_buffer;
|
|
+ int bin_buffer_size;
|
|
+ int cb_max_size;
|
|
+ struct config_item *item;
|
|
+ struct module___2 *owner;
|
|
+ union {
|
|
+ struct configfs_attribute *attr;
|
|
+ struct configfs_bin_attribute *bin_attr;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct configfs_symlink {
|
|
+ struct list_head sl_list;
|
|
+ struct config_item *sl_target;
|
|
+};
|
|
+
|
|
+struct pts_mount_opts {
|
|
+ int setuid;
|
|
+ int setgid;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ umode_t mode;
|
|
+ umode_t ptmxmode;
|
|
+ int reserve;
|
|
+ int max;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_uid = 0,
|
|
+ Opt_gid___2 = 1,
|
|
+ Opt_mode = 2,
|
|
+ Opt_ptmxmode = 3,
|
|
+ Opt_newinstance = 4,
|
|
+ Opt_max = 5,
|
|
+ Opt_err___2 = 6,
|
|
+};
|
|
+
|
|
+struct pts_fs_info {
|
|
+ struct ida allocated_ptys;
|
|
+ struct pts_mount_opts mount_opts;
|
|
+ struct super_block___2 *sb;
|
|
+ struct dentry___2 *ptmx_dentry;
|
|
+};
|
|
+
|
|
+struct dcookie_struct {
|
|
+ struct path___2 path;
|
|
+ struct list_head hash_list;
|
|
+};
|
|
+
|
|
+struct dcookie_user {
|
|
+ struct list_head next;
|
|
+};
|
|
+
|
|
+struct ramfs_mount_opts {
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+struct ramfs_fs_info {
|
|
+ struct ramfs_mount_opts mount_opts;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_mode___2 = 0,
|
|
+ Opt_err___3 = 1,
|
|
+};
|
|
+
|
|
+struct hugetlbfs_config {
|
|
+ struct hstate *hstate;
|
|
+ long int max_hpages;
|
|
+ long int nr_inodes;
|
|
+ long int min_hpages;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_size = 0,
|
|
+ Opt_nr_inodes = 1,
|
|
+ Opt_mode___3 = 2,
|
|
+ Opt_uid___2 = 3,
|
|
+ Opt_gid___3 = 4,
|
|
+ Opt_pagesize = 5,
|
|
+ Opt_min_size = 6,
|
|
+ Opt_err___4 = 7,
|
|
+};
|
|
+
|
|
+struct hf_args {
|
|
+ struct file___2 *file;
|
|
+ struct task_struct *parent_task;
|
|
+ struct mm_struct *mm;
|
|
+ struct shared_policy *shared_policy;
|
|
+ struct hstate *hstate;
|
|
+ struct address_space *mapping;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+enum hugetlbfs_size_type {
|
|
+ NO_SIZE = 0,
|
|
+ SIZE_STD = 1,
|
|
+ SIZE_PERCENT = 2,
|
|
+};
|
|
+
|
|
+struct getdents_callback___2 {
|
|
+ struct dir_context ctx;
|
|
+ char *name;
|
|
+ u64 ino;
|
|
+ int found;
|
|
+ int sequence;
|
|
+};
|
|
+
|
|
+typedef u16 wchar_t;
|
|
+
|
|
+typedef u32 unicode_t;
|
|
+
|
|
+struct nls_table {
|
|
+ const char *charset;
|
|
+ const char *alias;
|
|
+ int (*uni2char)(wchar_t, unsigned char *, int);
|
|
+ int (*char2uni)(const unsigned char *, int, wchar_t *);
|
|
+ const unsigned char *charset2lower;
|
|
+ const unsigned char *charset2upper;
|
|
+ struct module___2 *owner;
|
|
+ struct nls_table *next;
|
|
+};
|
|
+
|
|
+enum utf16_endian {
|
|
+ UTF16_HOST_ENDIAN = 0,
|
|
+ UTF16_LITTLE_ENDIAN = 1,
|
|
+ UTF16_BIG_ENDIAN = 2,
|
|
+};
|
|
+
|
|
+struct utf8_table {
|
|
+ int cmask;
|
|
+ int cval;
|
|
+ int shift;
|
|
+ long int lmask;
|
|
+ long int lval;
|
|
+};
|
|
+
|
|
+typedef unsigned int autofs_wqt_t;
|
|
+
|
|
+struct autofs_sb_info;
|
|
+
|
|
+struct autofs_info {
|
|
+ struct dentry___2 *dentry;
|
|
+ struct inode___2 *inode;
|
|
+ int flags;
|
|
+ struct completion expire_complete;
|
|
+ struct list_head active;
|
|
+ int active_count;
|
|
+ struct list_head expiring;
|
|
+ struct autofs_sb_info *sbi;
|
|
+ long unsigned int last_used;
|
|
+ atomic_t count;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct autofs_wait_queue;
|
|
+
|
|
+struct autofs_sb_info {
|
|
+ u32 magic;
|
|
+ int pipefd;
|
|
+ struct file___2 *pipe;
|
|
+ struct pid___2 *oz_pgrp;
|
|
+ int catatonic;
|
|
+ int version;
|
|
+ int sub_version;
|
|
+ int min_proto;
|
|
+ int max_proto;
|
|
+ long unsigned int exp_timeout;
|
|
+ unsigned int type;
|
|
+ struct super_block___2 *sb;
|
|
+ struct mutex wq_mutex;
|
|
+ struct mutex pipe_mutex;
|
|
+ spinlock_t fs_lock;
|
|
+ struct autofs_wait_queue *queues;
|
|
+ spinlock_t lookup_lock;
|
|
+ struct list_head active_list;
|
|
+ struct list_head expiring_list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct autofs_wait_queue {
|
|
+ wait_queue_head_t queue;
|
|
+ struct autofs_wait_queue *next;
|
|
+ autofs_wqt_t wait_queue_token;
|
|
+ struct qstr name;
|
|
+ u32 dev;
|
|
+ u64 ino;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ pid_t pid;
|
|
+ pid_t tgid;
|
|
+ int status;
|
|
+ unsigned int wait_ctr;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_err___5 = 0,
|
|
+ Opt_fd = 1,
|
|
+ Opt_uid___3 = 2,
|
|
+ Opt_gid___4 = 3,
|
|
+ Opt_pgrp = 4,
|
|
+ Opt_minproto = 5,
|
|
+ Opt_maxproto = 6,
|
|
+ Opt_indirect = 7,
|
|
+ Opt_direct = 8,
|
|
+ Opt_offset = 9,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ AUTOFS_IOC_READY_CMD = 96,
|
|
+ AUTOFS_IOC_FAIL_CMD = 97,
|
|
+ AUTOFS_IOC_CATATONIC_CMD = 98,
|
|
+ AUTOFS_IOC_PROTOVER_CMD = 99,
|
|
+ AUTOFS_IOC_SETTIMEOUT_CMD = 100,
|
|
+ AUTOFS_IOC_EXPIRE_CMD = 101,
|
|
+};
|
|
+
|
|
+enum autofs_notify {
|
|
+ NFY_NONE = 0,
|
|
+ NFY_MOUNT = 1,
|
|
+ NFY_EXPIRE = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ AUTOFS_IOC_EXPIRE_MULTI_CMD = 102,
|
|
+ AUTOFS_IOC_PROTOSUBVER_CMD = 103,
|
|
+ AUTOFS_IOC_ASKUMOUNT_CMD = 112,
|
|
+};
|
|
+
|
|
+struct autofs_packet_hdr {
|
|
+ int proto_version;
|
|
+ int type;
|
|
+};
|
|
+
|
|
+struct autofs_packet_missing {
|
|
+ struct autofs_packet_hdr hdr;
|
|
+ autofs_wqt_t wait_queue_token;
|
|
+ int len;
|
|
+ char name[256];
|
|
+};
|
|
+
|
|
+struct autofs_packet_expire {
|
|
+ struct autofs_packet_hdr hdr;
|
|
+ int len;
|
|
+ char name[256];
|
|
+};
|
|
+
|
|
+struct autofs_packet_expire_multi {
|
|
+ struct autofs_packet_hdr hdr;
|
|
+ autofs_wqt_t wait_queue_token;
|
|
+ int len;
|
|
+ char name[256];
|
|
+};
|
|
+
|
|
+union autofs_packet_union {
|
|
+ struct autofs_packet_hdr hdr;
|
|
+ struct autofs_packet_missing missing;
|
|
+ struct autofs_packet_expire expire;
|
|
+ struct autofs_packet_expire_multi expire_multi;
|
|
+};
|
|
+
|
|
+struct autofs_v5_packet {
|
|
+ struct autofs_packet_hdr hdr;
|
|
+ autofs_wqt_t wait_queue_token;
|
|
+ __u32 dev;
|
|
+ __u64 ino;
|
|
+ __u32 uid;
|
|
+ __u32 gid;
|
|
+ __u32 pid;
|
|
+ __u32 tgid;
|
|
+ __u32 len;
|
|
+ char name[256];
|
|
+};
|
|
+
|
|
+typedef struct autofs_v5_packet autofs_packet_missing_indirect_t;
|
|
+
|
|
+typedef struct autofs_v5_packet autofs_packet_expire_indirect_t;
|
|
+
|
|
+typedef struct autofs_v5_packet autofs_packet_missing_direct_t;
|
|
+
|
|
+typedef struct autofs_v5_packet autofs_packet_expire_direct_t;
|
|
+
|
|
+union autofs_v5_packet_union {
|
|
+ struct autofs_packet_hdr hdr;
|
|
+ struct autofs_v5_packet v5_packet;
|
|
+ autofs_packet_missing_indirect_t missing_indirect;
|
|
+ autofs_packet_expire_indirect_t expire_indirect;
|
|
+ autofs_packet_missing_direct_t missing_direct;
|
|
+ autofs_packet_expire_direct_t expire_direct;
|
|
+};
|
|
+
|
|
+struct args_protover {
|
|
+ __u32 version;
|
|
+};
|
|
+
|
|
+struct args_protosubver {
|
|
+ __u32 sub_version;
|
|
+};
|
|
+
|
|
+struct args_openmount {
|
|
+ __u32 devid;
|
|
+};
|
|
+
|
|
+struct args_ready {
|
|
+ __u32 token;
|
|
+};
|
|
+
|
|
+struct args_fail {
|
|
+ __u32 token;
|
|
+ __s32 status;
|
|
+};
|
|
+
|
|
+struct args_setpipefd {
|
|
+ __s32 pipefd;
|
|
+};
|
|
+
|
|
+struct args_timeout {
|
|
+ __u64 timeout;
|
|
+};
|
|
+
|
|
+struct args_requester {
|
|
+ __u32 uid;
|
|
+ __u32 gid;
|
|
+};
|
|
+
|
|
+struct args_expire {
|
|
+ __u32 how;
|
|
+};
|
|
+
|
|
+struct args_askumount {
|
|
+ __u32 may_umount;
|
|
+};
|
|
+
|
|
+struct args_in {
|
|
+ __u32 type;
|
|
+};
|
|
+
|
|
+struct args_out {
|
|
+ __u32 devid;
|
|
+ __u32 magic;
|
|
+};
|
|
+
|
|
+struct args_ismountpoint {
|
|
+ union {
|
|
+ struct args_in in;
|
|
+ struct args_out out;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct autofs_dev_ioctl {
|
|
+ __u32 ver_major;
|
|
+ __u32 ver_minor;
|
|
+ __u32 size;
|
|
+ __s32 ioctlfd;
|
|
+ union {
|
|
+ struct args_protover protover;
|
|
+ struct args_protosubver protosubver;
|
|
+ struct args_openmount openmount;
|
|
+ struct args_ready ready;
|
|
+ struct args_fail fail;
|
|
+ struct args_setpipefd setpipefd;
|
|
+ struct args_timeout timeout;
|
|
+ struct args_requester requester;
|
|
+ struct args_expire expire;
|
|
+ struct args_askumount askumount;
|
|
+ struct args_ismountpoint ismountpoint;
|
|
+ };
|
|
+ char path[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ AUTOFS_DEV_IOCTL_VERSION_CMD = 113,
|
|
+ AUTOFS_DEV_IOCTL_PROTOVER_CMD = 114,
|
|
+ AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD = 115,
|
|
+ AUTOFS_DEV_IOCTL_OPENMOUNT_CMD = 116,
|
|
+ AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD = 117,
|
|
+ AUTOFS_DEV_IOCTL_READY_CMD = 118,
|
|
+ AUTOFS_DEV_IOCTL_FAIL_CMD = 119,
|
|
+ AUTOFS_DEV_IOCTL_SETPIPEFD_CMD = 120,
|
|
+ AUTOFS_DEV_IOCTL_CATATONIC_CMD = 121,
|
|
+ AUTOFS_DEV_IOCTL_TIMEOUT_CMD = 122,
|
|
+ AUTOFS_DEV_IOCTL_REQUESTER_CMD = 123,
|
|
+ AUTOFS_DEV_IOCTL_EXPIRE_CMD = 124,
|
|
+ AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD = 125,
|
|
+ AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD = 126,
|
|
+};
|
|
+
|
|
+typedef int (*ioctl_fn)(struct file___2 *, struct autofs_sb_info *, struct autofs_dev_ioctl *);
|
|
+
|
|
+typedef struct vfsmount___2 * (*debugfs_automount_t)(struct dentry___2 *, void *);
|
|
+
|
|
+struct debugfs_fsdata {
|
|
+ const struct file_operations___2 *real_fops;
|
|
+ refcount_t active_users;
|
|
+ struct completion active_users_drained;
|
|
+};
|
|
+
|
|
+struct debugfs_mount_opts {
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_uid___4 = 0,
|
|
+ Opt_gid___5 = 1,
|
|
+ Opt_mode___4 = 2,
|
|
+ Opt_err___6 = 3,
|
|
+};
|
|
+
|
|
+struct debugfs_fs_info {
|
|
+ struct debugfs_mount_opts mount_opts;
|
|
+};
|
|
+
|
|
+struct debugfs_reg32 {
|
|
+ char *name;
|
|
+ long unsigned int offset;
|
|
+};
|
|
+
|
|
+struct debugfs_regset32 {
|
|
+ const struct debugfs_reg32 *regs;
|
|
+ int nregs;
|
|
+ void *base;
|
|
+};
|
|
+
|
|
+struct array_data {
|
|
+ void *array;
|
|
+ u32 elements;
|
|
+};
|
|
+
|
|
+struct debugfs_devm_entry {
|
|
+ int (*read)(struct seq_file___2 *, void *);
|
|
+ struct device___2 *dev;
|
|
+};
|
|
+
|
|
+struct tracefs_dir_ops {
|
|
+ int (*mkdir)(const char *);
|
|
+ int (*rmdir)(const char *);
|
|
+};
|
|
+
|
|
+struct tracefs_mount_opts {
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+struct tracefs_fs_info {
|
|
+ struct tracefs_mount_opts mount_opts;
|
|
+};
|
|
+
|
|
+enum pstore_type_id {
|
|
+ PSTORE_TYPE_DMESG = 0,
|
|
+ PSTORE_TYPE_MCE = 1,
|
|
+ PSTORE_TYPE_CONSOLE = 2,
|
|
+ PSTORE_TYPE_FTRACE = 3,
|
|
+ PSTORE_TYPE_PPC_RTAS = 4,
|
|
+ PSTORE_TYPE_PPC_OF = 5,
|
|
+ PSTORE_TYPE_PPC_COMMON = 6,
|
|
+ PSTORE_TYPE_PMSG = 7,
|
|
+ PSTORE_TYPE_PPC_OPAL = 8,
|
|
+ PSTORE_TYPE_UNKNOWN = 255,
|
|
+};
|
|
+
|
|
+struct pstore_info;
|
|
+
|
|
+struct pstore_record {
|
|
+ struct pstore_info *psi;
|
|
+ enum pstore_type_id type;
|
|
+ u64 id;
|
|
+ struct timespec64 time;
|
|
+ char *buf;
|
|
+ ssize_t size;
|
|
+ ssize_t ecc_notice_size;
|
|
+ int count;
|
|
+ enum kmsg_dump_reason reason;
|
|
+ unsigned int part;
|
|
+ bool compressed;
|
|
+};
|
|
+
|
|
+struct pstore_info {
|
|
+ struct module___2 *owner;
|
|
+ char *name;
|
|
+ struct semaphore buf_lock;
|
|
+ char *buf;
|
|
+ size_t bufsize;
|
|
+ struct mutex read_mutex;
|
|
+ int flags;
|
|
+ void *data;
|
|
+ int (*open)(struct pstore_info *);
|
|
+ int (*close)(struct pstore_info *);
|
|
+ ssize_t (*read)(struct pstore_record *);
|
|
+ int (*write)(struct pstore_record *);
|
|
+ int (*write_user)(struct pstore_record *, const char *);
|
|
+ int (*erase)(struct pstore_record *);
|
|
+};
|
|
+
|
|
+struct pstore_ftrace_record {
|
|
+ long unsigned int ip;
|
|
+ long unsigned int parent_ip;
|
|
+ u64 ts;
|
|
+};
|
|
+
|
|
+struct pstore_private {
|
|
+ struct list_head list;
|
|
+ struct pstore_record *record;
|
|
+ size_t total_size;
|
|
+};
|
|
+
|
|
+struct pstore_ftrace_seq_data {
|
|
+ const void *ptr;
|
|
+ size_t off;
|
|
+ size_t size;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_kmsg_bytes = 0,
|
|
+ Opt_err___7 = 1,
|
|
+};
|
|
+
|
|
+struct pstore_zbackend {
|
|
+ int (*zbufsize)(size_t);
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct efi_variable {
|
|
+ efi_char16_t VariableName[512];
|
|
+ efi_guid_t VendorGuid;
|
|
+ long unsigned int DataSize;
|
|
+ __u8 Data[1024];
|
|
+ efi_status_t Status;
|
|
+ __u32 Attributes;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct efivar_entry {
|
|
+ struct efi_variable var;
|
|
+ struct list_head list;
|
|
+ struct kobject kobj;
|
|
+ bool scanning;
|
|
+ bool deleting;
|
|
+};
|
|
+
|
|
+typedef unsigned int __kernel_mode_t;
|
|
+
|
|
+struct ipc64_perm {
|
|
+ __kernel_key_t key;
|
|
+ __kernel_uid32_t uid;
|
|
+ __kernel_gid32_t gid;
|
|
+ __kernel_uid32_t cuid;
|
|
+ __kernel_gid32_t cgid;
|
|
+ __kernel_mode_t mode;
|
|
+ unsigned char __pad1[0];
|
|
+ short unsigned int seq;
|
|
+ short unsigned int __pad2;
|
|
+ __kernel_ulong_t __unused1;
|
|
+ __kernel_ulong_t __unused2;
|
|
+};
|
|
+
|
|
+typedef u32 __compat_gid32_t;
|
|
+
|
|
+typedef s32 compat_key_t;
|
|
+
|
|
+struct compat_ipc64_perm {
|
|
+ compat_key_t key;
|
|
+ __compat_uid32_t uid;
|
|
+ __compat_gid32_t gid;
|
|
+ __compat_uid32_t cuid;
|
|
+ __compat_gid32_t cgid;
|
|
+ short unsigned int mode;
|
|
+ short unsigned int __pad1;
|
|
+ short unsigned int seq;
|
|
+ short unsigned int __pad2;
|
|
+ compat_ulong_t unused1;
|
|
+ compat_ulong_t unused2;
|
|
+};
|
|
+
|
|
+struct compat_ipc_perm {
|
|
+ key_t key;
|
|
+ __compat_uid_t uid;
|
|
+ __compat_gid_t gid;
|
|
+ __compat_uid_t cuid;
|
|
+ __compat_gid_t cgid;
|
|
+ compat_mode_t mode;
|
|
+ short unsigned int seq;
|
|
+};
|
|
+
|
|
+struct ipc_perm {
|
|
+ __kernel_key_t key;
|
|
+ __kernel_uid_t uid;
|
|
+ __kernel_gid_t gid;
|
|
+ __kernel_uid_t cuid;
|
|
+ __kernel_gid_t cgid;
|
|
+ __kernel_mode_t mode;
|
|
+ short unsigned int seq;
|
|
+};
|
|
+
|
|
+struct ipc_params {
|
|
+ key_t key;
|
|
+ int flg;
|
|
+ union {
|
|
+ size_t size;
|
|
+ int nsems;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct ipc_ops {
|
|
+ int (*getnew)(struct ipc_namespace *, struct ipc_params *);
|
|
+ int (*associate)(struct kern_ipc_perm *, int);
|
|
+ int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *);
|
|
+};
|
|
+
|
|
+struct ipc_proc_iface {
|
|
+ const char *path;
|
|
+ const char *header;
|
|
+ int ids;
|
|
+ int (*show)(struct seq_file *, void *);
|
|
+};
|
|
+
|
|
+struct ipc_proc_iter {
|
|
+ struct ipc_namespace *ns;
|
|
+ struct pid_namespace *pid_ns;
|
|
+ struct ipc_proc_iface *iface;
|
|
+};
|
|
+
|
|
+struct msg_msgseg;
|
|
+
|
|
+struct msg_msg {
|
|
+ struct list_head m_list;
|
|
+ long int m_type;
|
|
+ size_t m_ts;
|
|
+ struct msg_msgseg *next;
|
|
+ void *security;
|
|
+};
|
|
+
|
|
+struct msg_msgseg {
|
|
+ struct msg_msgseg *next;
|
|
+};
|
|
+
|
|
+typedef int __kernel_ipc_pid_t;
|
|
+
|
|
+struct msgbuf {
|
|
+ __kernel_long_t mtype;
|
|
+ char mtext[1];
|
|
+};
|
|
+
|
|
+struct msg;
|
|
+
|
|
+struct msqid_ds {
|
|
+ struct ipc_perm msg_perm;
|
|
+ struct msg *msg_first;
|
|
+ struct msg *msg_last;
|
|
+ __kernel_time_t msg_stime;
|
|
+ __kernel_time_t msg_rtime;
|
|
+ __kernel_time_t msg_ctime;
|
|
+ long unsigned int msg_lcbytes;
|
|
+ long unsigned int msg_lqbytes;
|
|
+ short unsigned int msg_cbytes;
|
|
+ short unsigned int msg_qnum;
|
|
+ short unsigned int msg_qbytes;
|
|
+ __kernel_ipc_pid_t msg_lspid;
|
|
+ __kernel_ipc_pid_t msg_lrpid;
|
|
+};
|
|
+
|
|
+struct msqid64_ds {
|
|
+ struct ipc64_perm msg_perm;
|
|
+ __kernel_time_t msg_stime;
|
|
+ __kernel_time_t msg_rtime;
|
|
+ __kernel_time_t msg_ctime;
|
|
+ long unsigned int msg_cbytes;
|
|
+ long unsigned int msg_qnum;
|
|
+ long unsigned int msg_qbytes;
|
|
+ __kernel_pid_t msg_lspid;
|
|
+ __kernel_pid_t msg_lrpid;
|
|
+ long unsigned int __unused4;
|
|
+ long unsigned int __unused5;
|
|
+};
|
|
+
|
|
+struct msginfo {
|
|
+ int msgpool;
|
|
+ int msgmap;
|
|
+ int msgmax;
|
|
+ int msgmnb;
|
|
+ int msgmni;
|
|
+ int msgssz;
|
|
+ int msgtql;
|
|
+ short unsigned int msgseg;
|
|
+};
|
|
+
|
|
+typedef u16 compat_ipc_pid_t;
|
|
+
|
|
+struct compat_msqid64_ds {
|
|
+ struct compat_ipc64_perm msg_perm;
|
|
+ compat_ulong_t msg_stime;
|
|
+ compat_ulong_t msg_stime_high;
|
|
+ compat_ulong_t msg_rtime;
|
|
+ compat_ulong_t msg_rtime_high;
|
|
+ compat_ulong_t msg_ctime;
|
|
+ compat_ulong_t msg_ctime_high;
|
|
+ compat_ulong_t msg_cbytes;
|
|
+ compat_ulong_t msg_qnum;
|
|
+ compat_ulong_t msg_qbytes;
|
|
+ compat_pid_t msg_lspid;
|
|
+ compat_pid_t msg_lrpid;
|
|
+ compat_ulong_t __unused4;
|
|
+ compat_ulong_t __unused5;
|
|
+};
|
|
+
|
|
+struct msg_queue {
|
|
+ struct kern_ipc_perm q_perm;
|
|
+ time64_t q_stime;
|
|
+ time64_t q_rtime;
|
|
+ time64_t q_ctime;
|
|
+ long unsigned int q_cbytes;
|
|
+ long unsigned int q_qnum;
|
|
+ long unsigned int q_qbytes;
|
|
+ struct pid *q_lspid;
|
|
+ struct pid *q_lrpid;
|
|
+ struct list_head q_messages;
|
|
+ struct list_head q_receivers;
|
|
+ struct list_head q_senders;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct msg_receiver {
|
|
+ struct list_head r_list;
|
|
+ struct task_struct *r_tsk;
|
|
+ int r_mode;
|
|
+ long int r_msgtype;
|
|
+ long int r_maxsize;
|
|
+ struct msg_msg *r_msg;
|
|
+};
|
|
+
|
|
+struct msg_sender {
|
|
+ struct list_head list;
|
|
+ struct task_struct *tsk;
|
|
+ size_t msgsz;
|
|
+};
|
|
+
|
|
+struct compat_msqid_ds {
|
|
+ struct compat_ipc_perm msg_perm;
|
|
+ compat_uptr_t msg_first;
|
|
+ compat_uptr_t msg_last;
|
|
+ compat_time_t msg_stime;
|
|
+ compat_time_t msg_rtime;
|
|
+ compat_time_t msg_ctime;
|
|
+ compat_ulong_t msg_lcbytes;
|
|
+ compat_ulong_t msg_lqbytes;
|
|
+ short unsigned int msg_cbytes;
|
|
+ short unsigned int msg_qnum;
|
|
+ short unsigned int msg_qbytes;
|
|
+ compat_ipc_pid_t msg_lspid;
|
|
+ compat_ipc_pid_t msg_lrpid;
|
|
+};
|
|
+
|
|
+struct compat_msgbuf {
|
|
+ compat_long_t mtype;
|
|
+ char mtext[1];
|
|
+};
|
|
+
|
|
+struct sem;
|
|
+
|
|
+struct sem_queue;
|
|
+
|
|
+struct sem_undo;
|
|
+
|
|
+struct semid_ds {
|
|
+ struct ipc_perm sem_perm;
|
|
+ __kernel_time_t sem_otime;
|
|
+ __kernel_time_t sem_ctime;
|
|
+ struct sem *sem_base;
|
|
+ struct sem_queue *sem_pending;
|
|
+ struct sem_queue **sem_pending_last;
|
|
+ struct sem_undo *undo;
|
|
+ short unsigned int sem_nsems;
|
|
+};
|
|
+
|
|
+struct sem {
|
|
+ int semval;
|
|
+ struct pid *sempid;
|
|
+ spinlock_t lock;
|
|
+ struct list_head pending_alter;
|
|
+ struct list_head pending_const;
|
|
+ time64_t sem_otime;
|
|
+};
|
|
+
|
|
+struct sembuf;
|
|
+
|
|
+struct sem_queue {
|
|
+ struct list_head list;
|
|
+ struct task_struct *sleeper;
|
|
+ struct sem_undo *undo;
|
|
+ struct pid *pid;
|
|
+ int status;
|
|
+ struct sembuf *sops;
|
|
+ struct sembuf *blocking;
|
|
+ int nsops;
|
|
+ bool alter;
|
|
+ bool dupsop;
|
|
+};
|
|
+
|
|
+struct sem_undo {
|
|
+ struct list_head list_proc;
|
|
+ struct callback_head rcu;
|
|
+ struct sem_undo_list *ulp;
|
|
+ struct list_head list_id;
|
|
+ int semid;
|
|
+ short int *semadj;
|
|
+};
|
|
+
|
|
+struct semid64_ds {
|
|
+ struct ipc64_perm sem_perm;
|
|
+ __kernel_time_t sem_otime;
|
|
+ __kernel_ulong_t __unused1;
|
|
+ __kernel_time_t sem_ctime;
|
|
+ __kernel_ulong_t __unused2;
|
|
+ __kernel_ulong_t sem_nsems;
|
|
+ __kernel_ulong_t __unused3;
|
|
+ __kernel_ulong_t __unused4;
|
|
+};
|
|
+
|
|
+struct sembuf {
|
|
+ short unsigned int sem_num;
|
|
+ short int sem_op;
|
|
+ short int sem_flg;
|
|
+};
|
|
+
|
|
+struct seminfo {
|
|
+ int semmap;
|
|
+ int semmni;
|
|
+ int semmns;
|
|
+ int semmnu;
|
|
+ int semmsl;
|
|
+ int semopm;
|
|
+ int semume;
|
|
+ int semusz;
|
|
+ int semvmx;
|
|
+ int semaem;
|
|
+};
|
|
+
|
|
+struct sem_undo_list {
|
|
+ refcount_t refcnt;
|
|
+ spinlock_t lock;
|
|
+ struct list_head list_proc;
|
|
+};
|
|
+
|
|
+struct compat_semid64_ds {
|
|
+ struct compat_ipc64_perm sem_perm;
|
|
+ compat_ulong_t sem_otime;
|
|
+ compat_ulong_t sem_otime_high;
|
|
+ compat_ulong_t sem_ctime;
|
|
+ compat_ulong_t sem_ctime_high;
|
|
+ compat_ulong_t sem_nsems;
|
|
+ compat_ulong_t __unused3;
|
|
+ compat_ulong_t __unused4;
|
|
+};
|
|
+
|
|
+struct sem_array {
|
|
+ struct kern_ipc_perm sem_perm;
|
|
+ time64_t sem_ctime;
|
|
+ struct list_head pending_alter;
|
|
+ struct list_head pending_const;
|
|
+ struct list_head list_id;
|
|
+ int sem_nsems;
|
|
+ int complex_count;
|
|
+ unsigned int use_global_lock;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct sem sems[0];
|
|
+};
|
|
+
|
|
+struct compat_semid_ds {
|
|
+ struct compat_ipc_perm sem_perm;
|
|
+ compat_time_t sem_otime;
|
|
+ compat_time_t sem_ctime;
|
|
+ compat_uptr_t sem_base;
|
|
+ compat_uptr_t sem_pending;
|
|
+ compat_uptr_t sem_pending_last;
|
|
+ compat_uptr_t undo;
|
|
+ short unsigned int sem_nsems;
|
|
+};
|
|
+
|
|
+struct shmid_ds {
|
|
+ struct ipc_perm shm_perm;
|
|
+ int shm_segsz;
|
|
+ __kernel_time_t shm_atime;
|
|
+ __kernel_time_t shm_dtime;
|
|
+ __kernel_time_t shm_ctime;
|
|
+ __kernel_ipc_pid_t shm_cpid;
|
|
+ __kernel_ipc_pid_t shm_lpid;
|
|
+ short unsigned int shm_nattch;
|
|
+ short unsigned int shm_unused;
|
|
+ void *shm_unused2;
|
|
+ void *shm_unused3;
|
|
+};
|
|
+
|
|
+struct shmid64_ds {
|
|
+ struct ipc64_perm shm_perm;
|
|
+ size_t shm_segsz;
|
|
+ __kernel_time_t shm_atime;
|
|
+ __kernel_time_t shm_dtime;
|
|
+ __kernel_time_t shm_ctime;
|
|
+ __kernel_pid_t shm_cpid;
|
|
+ __kernel_pid_t shm_lpid;
|
|
+ long unsigned int shm_nattch;
|
|
+ long unsigned int __unused4;
|
|
+ long unsigned int __unused5;
|
|
+};
|
|
+
|
|
+struct shminfo64 {
|
|
+ long unsigned int shmmax;
|
|
+ long unsigned int shmmin;
|
|
+ long unsigned int shmmni;
|
|
+ long unsigned int shmseg;
|
|
+ long unsigned int shmall;
|
|
+ long unsigned int __unused1;
|
|
+ long unsigned int __unused2;
|
|
+ long unsigned int __unused3;
|
|
+ long unsigned int __unused4;
|
|
+};
|
|
+
|
|
+struct shminfo {
|
|
+ int shmmax;
|
|
+ int shmmin;
|
|
+ int shmmni;
|
|
+ int shmseg;
|
|
+ int shmall;
|
|
+};
|
|
+
|
|
+struct shm_info {
|
|
+ int used_ids;
|
|
+ __kernel_ulong_t shm_tot;
|
|
+ __kernel_ulong_t shm_rss;
|
|
+ __kernel_ulong_t shm_swp;
|
|
+ __kernel_ulong_t swap_attempts;
|
|
+ __kernel_ulong_t swap_successes;
|
|
+};
|
|
+
|
|
+struct compat_shmid64_ds {
|
|
+ struct compat_ipc64_perm shm_perm;
|
|
+ compat_size_t shm_segsz;
|
|
+ compat_ulong_t shm_atime;
|
|
+ compat_ulong_t shm_atime_high;
|
|
+ compat_ulong_t shm_dtime;
|
|
+ compat_ulong_t shm_dtime_high;
|
|
+ compat_ulong_t shm_ctime;
|
|
+ compat_ulong_t shm_ctime_high;
|
|
+ compat_pid_t shm_cpid;
|
|
+ compat_pid_t shm_lpid;
|
|
+ compat_ulong_t shm_nattch;
|
|
+ compat_ulong_t __unused4;
|
|
+ compat_ulong_t __unused5;
|
|
+};
|
|
+
|
|
+struct shmid_kernel {
|
|
+ struct kern_ipc_perm shm_perm;
|
|
+ struct file *shm_file;
|
|
+ long unsigned int shm_nattch;
|
|
+ long unsigned int shm_segsz;
|
|
+ time64_t shm_atim;
|
|
+ time64_t shm_dtim;
|
|
+ time64_t shm_ctim;
|
|
+ struct pid *shm_cprid;
|
|
+ struct pid *shm_lprid;
|
|
+ struct user_struct *mlock_user;
|
|
+ struct task_struct *shm_creator;
|
|
+ struct list_head shm_clist;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct shm_file_data {
|
|
+ int id;
|
|
+ struct ipc_namespace *ns;
|
|
+ struct file *file;
|
|
+ const struct vm_operations_struct *vm_ops;
|
|
+};
|
|
+
|
|
+struct compat_shmid_ds {
|
|
+ struct compat_ipc_perm shm_perm;
|
|
+ int shm_segsz;
|
|
+ compat_time_t shm_atime;
|
|
+ compat_time_t shm_dtime;
|
|
+ compat_time_t shm_ctime;
|
|
+ compat_ipc_pid_t shm_cpid;
|
|
+ compat_ipc_pid_t shm_lpid;
|
|
+ short unsigned int shm_nattch;
|
|
+ short unsigned int shm_unused;
|
|
+ compat_uptr_t shm_unused2;
|
|
+ compat_uptr_t shm_unused3;
|
|
+};
|
|
+
|
|
+struct compat_shminfo64 {
|
|
+ compat_ulong_t shmmax;
|
|
+ compat_ulong_t shmmin;
|
|
+ compat_ulong_t shmmni;
|
|
+ compat_ulong_t shmseg;
|
|
+ compat_ulong_t shmall;
|
|
+ compat_ulong_t __unused1;
|
|
+ compat_ulong_t __unused2;
|
|
+ compat_ulong_t __unused3;
|
|
+ compat_ulong_t __unused4;
|
|
+};
|
|
+
|
|
+struct compat_shm_info {
|
|
+ compat_int_t used_ids;
|
|
+ compat_ulong_t shm_tot;
|
|
+ compat_ulong_t shm_rss;
|
|
+ compat_ulong_t shm_swp;
|
|
+ compat_ulong_t swap_attempts;
|
|
+ compat_ulong_t swap_successes;
|
|
+};
|
|
+
|
|
+struct compat_ipc_kludge {
|
|
+ compat_uptr_t msgp;
|
|
+ compat_long_t msgtyp;
|
|
+};
|
|
+
|
|
+struct posix_msg_tree_node {
|
|
+ struct rb_node rb_node;
|
|
+ struct list_head msg_list;
|
|
+ int priority;
|
|
+};
|
|
+
|
|
+struct ext_wait_queue {
|
|
+ struct task_struct *task;
|
|
+ struct list_head list;
|
|
+ struct msg_msg *msg;
|
|
+ int state;
|
|
+};
|
|
+
|
|
+struct mqueue_inode_info {
|
|
+ spinlock_t lock;
|
|
+ struct inode vfs_inode;
|
|
+ wait_queue_head_t wait_q;
|
|
+ struct rb_root msg_tree;
|
|
+ struct posix_msg_tree_node *node_cache;
|
|
+ struct mq_attr attr;
|
|
+ struct sigevent notify;
|
|
+ struct pid *notify_owner;
|
|
+ u32 notify_self_exec_id;
|
|
+ struct user_namespace *notify_user_ns;
|
|
+ struct user_struct *user;
|
|
+ struct sock *notify_sock;
|
|
+ struct sk_buff *notify_cookie;
|
|
+ struct ext_wait_queue e_wait_q[2];
|
|
+ long unsigned int qsize;
|
|
+};
|
|
+
|
|
+struct compat_mq_attr {
|
|
+ compat_long_t mq_flags;
|
|
+ compat_long_t mq_maxmsg;
|
|
+ compat_long_t mq_msgsize;
|
|
+ compat_long_t mq_curmsgs;
|
|
+ compat_long_t __reserved[4];
|
|
+};
|
|
+
|
|
+enum key_state {
|
|
+ KEY_IS_UNINSTANTIATED = 0,
|
|
+ KEY_IS_POSITIVE = 1,
|
|
+};
|
|
+
|
|
+struct key_user {
|
|
+ struct rb_node node;
|
|
+ struct mutex cons_lock;
|
|
+ spinlock_t lock;
|
|
+ refcount_t usage;
|
|
+ atomic_t nkeys;
|
|
+ atomic_t nikeys;
|
|
+ kuid_t uid;
|
|
+ int qnkeys;
|
|
+ int qnbytes;
|
|
+};
|
|
+
|
|
+struct assoc_array_edit;
|
|
+
|
|
+struct assoc_array_ops {
|
|
+ long unsigned int (*get_key_chunk)(const void *, int);
|
|
+ long unsigned int (*get_object_key_chunk)(const void *, int);
|
|
+ bool (*compare_object)(const void *, const void *);
|
|
+ int (*diff_objects)(const void *, const void *);
|
|
+ void (*free_object)(void *);
|
|
+};
|
|
+
|
|
+struct assoc_array_node {
|
|
+ struct assoc_array_ptr *back_pointer;
|
|
+ u8 parent_slot;
|
|
+ struct assoc_array_ptr *slots[16];
|
|
+ long unsigned int nr_leaves_on_branch;
|
|
+};
|
|
+
|
|
+struct assoc_array_shortcut {
|
|
+ struct assoc_array_ptr *back_pointer;
|
|
+ int parent_slot;
|
|
+ int skip_to_level;
|
|
+ struct assoc_array_ptr *next_node;
|
|
+ long unsigned int index_key[0];
|
|
+};
|
|
+
|
|
+struct assoc_array_edit___2 {
|
|
+ struct callback_head rcu;
|
|
+ struct assoc_array *array;
|
|
+ const struct assoc_array_ops *ops;
|
|
+ const struct assoc_array_ops *ops_for_excised_subtree;
|
|
+ struct assoc_array_ptr *leaf;
|
|
+ struct assoc_array_ptr **leaf_p;
|
|
+ struct assoc_array_ptr *dead_leaf;
|
|
+ struct assoc_array_ptr *new_meta[3];
|
|
+ struct assoc_array_ptr *excised_meta[1];
|
|
+ struct assoc_array_ptr *excised_subtree;
|
|
+ struct assoc_array_ptr **set_backpointers[16];
|
|
+ struct assoc_array_ptr *set_backpointers_to;
|
|
+ struct assoc_array_node *adjust_count_on;
|
|
+ long int adjust_count_by;
|
|
+ struct {
|
|
+ struct assoc_array_ptr **ptr;
|
|
+ struct assoc_array_ptr *to;
|
|
+ } set[2];
|
|
+ struct {
|
|
+ u8 *p;
|
|
+ u8 to;
|
|
+ } set_parent_slot[1];
|
|
+ u8 segment_cache[17];
|
|
+};
|
|
+
|
|
+struct keyring_search_context {
|
|
+ struct keyring_index_key index_key;
|
|
+ const struct cred *cred;
|
|
+ struct key_match_data match_data;
|
|
+ unsigned int flags;
|
|
+ int (*iterator)(const void *, void *);
|
|
+ int skipped_ret;
|
|
+ bool possessed;
|
|
+ key_ref_t result;
|
|
+ time64_t now;
|
|
+};
|
|
+
|
|
+struct keyring_read_iterator_context {
|
|
+ size_t buflen;
|
|
+ size_t count;
|
|
+ key_serial_t *buffer;
|
|
+};
|
|
+
|
|
+struct keyctl_dh_params {
|
|
+ union {
|
|
+ __s32 private;
|
|
+ __s32 priv;
|
|
+ };
|
|
+ __s32 prime;
|
|
+ __s32 base;
|
|
+};
|
|
+
|
|
+struct keyctl_kdf_params {
|
|
+ char *hashname;
|
|
+ char *otherinfo;
|
|
+ __u32 otherinfolen;
|
|
+ __u32 __spare[8];
|
|
+};
|
|
+
|
|
+struct request_key_auth {
|
|
+ struct key *target_key;
|
|
+ struct key *dest_keyring;
|
|
+ const struct cred *cred;
|
|
+ void *callout_info;
|
|
+ size_t callout_len;
|
|
+ pid_t pid;
|
|
+ char op[8];
|
|
+};
|
|
+
|
|
+struct user_key_payload {
|
|
+ struct callback_head rcu;
|
|
+ short unsigned int datalen;
|
|
+ long: 48;
|
|
+ char data[0];
|
|
+};
|
|
+
|
|
+struct big_key_buf {
|
|
+ unsigned int nr_pages;
|
|
+ void *virt;
|
|
+ struct scatterlist *sg;
|
|
+ struct page *pages[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ big_key_data = 0,
|
|
+ big_key_path = 1,
|
|
+ big_key_path_2nd_part = 2,
|
|
+ big_key_len = 3,
|
|
+};
|
|
+
|
|
+enum big_key_op {
|
|
+ BIG_KEY_ENC = 0,
|
|
+ BIG_KEY_DEC = 1,
|
|
+};
|
|
+
|
|
+enum hash_algo {
|
|
+ HASH_ALGO_MD4 = 0,
|
|
+ HASH_ALGO_MD5 = 1,
|
|
+ HASH_ALGO_SHA1 = 2,
|
|
+ HASH_ALGO_RIPE_MD_160 = 3,
|
|
+ HASH_ALGO_SHA256 = 4,
|
|
+ HASH_ALGO_SHA384 = 5,
|
|
+ HASH_ALGO_SHA512 = 6,
|
|
+ HASH_ALGO_SHA224 = 7,
|
|
+ HASH_ALGO_RIPE_MD_128 = 8,
|
|
+ HASH_ALGO_RIPE_MD_256 = 9,
|
|
+ HASH_ALGO_RIPE_MD_320 = 10,
|
|
+ HASH_ALGO_WP_256 = 11,
|
|
+ HASH_ALGO_WP_384 = 12,
|
|
+ HASH_ALGO_WP_512 = 13,
|
|
+ HASH_ALGO_TGR_128 = 14,
|
|
+ HASH_ALGO_TGR_160 = 15,
|
|
+ HASH_ALGO_TGR_192 = 16,
|
|
+ HASH_ALGO_SM3_256 = 17,
|
|
+ HASH_ALGO__LAST = 18,
|
|
+};
|
|
+
|
|
+struct trusted_key_payload {
|
|
+ struct callback_head rcu;
|
|
+ unsigned int key_len;
|
|
+ unsigned int blob_len;
|
|
+ unsigned char migratable;
|
|
+ unsigned char key[129];
|
|
+ unsigned char blob[512];
|
|
+};
|
|
+
|
|
+struct trusted_key_options {
|
|
+ uint16_t keytype;
|
|
+ uint32_t keyhandle;
|
|
+ unsigned char keyauth[20];
|
|
+ unsigned char blobauth[20];
|
|
+ uint32_t pcrinfo_len;
|
|
+ unsigned char pcrinfo[64];
|
|
+ int pcrlock;
|
|
+ uint32_t hash;
|
|
+ uint32_t policydigest_len;
|
|
+ unsigned char policydigest[64];
|
|
+ uint32_t policyhandle;
|
|
+};
|
|
+
|
|
+struct tpm_buf {
|
|
+ int len;
|
|
+ unsigned char data[512];
|
|
+};
|
|
+
|
|
+struct osapsess {
|
|
+ uint32_t handle;
|
|
+ unsigned char secret[20];
|
|
+ unsigned char enonce[20];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SEAL_keytype = 1,
|
|
+ SRK_keytype = 4,
|
|
+};
|
|
+
|
|
+struct sdesc {
|
|
+ struct shash_desc shash;
|
|
+ char ctx[0];
|
|
+};
|
|
+
|
|
+struct tpm_digests {
|
|
+ unsigned char encauth[20];
|
|
+ unsigned char pubauth[20];
|
|
+ unsigned char xorwork[40];
|
|
+ unsigned char xorhash[20];
|
|
+ unsigned char nonceodd[20];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_err___8 = 0,
|
|
+ Opt_new = 1,
|
|
+ Opt_load = 2,
|
|
+ Opt_update = 3,
|
|
+ Opt_keyhandle = 4,
|
|
+ Opt_keyauth = 5,
|
|
+ Opt_blobauth = 6,
|
|
+ Opt_pcrinfo = 7,
|
|
+ Opt_pcrlock = 8,
|
|
+ Opt_migratable = 9,
|
|
+ Opt_hash = 10,
|
|
+ Opt_policydigest = 11,
|
|
+ Opt_policyhandle = 12,
|
|
+};
|
|
+
|
|
+struct encrypted_key_payload {
|
|
+ struct callback_head rcu;
|
|
+ char *format;
|
|
+ char *master_desc;
|
|
+ char *datalen;
|
|
+ u8 *iv;
|
|
+ u8 *encrypted_data;
|
|
+ short unsigned int datablob_len;
|
|
+ short unsigned int decrypted_datalen;
|
|
+ short unsigned int payload_datalen;
|
|
+ short unsigned int encrypted_key_format;
|
|
+ u8 *decrypted_data;
|
|
+ u8 payload_data[0];
|
|
+};
|
|
+
|
|
+struct ecryptfs_session_key {
|
|
+ u32 flags;
|
|
+ u32 encrypted_key_size;
|
|
+ u32 decrypted_key_size;
|
|
+ u8 encrypted_key[512];
|
|
+ u8 decrypted_key[64];
|
|
+};
|
|
+
|
|
+struct ecryptfs_password {
|
|
+ u32 password_bytes;
|
|
+ s32 hash_algo;
|
|
+ u32 hash_iterations;
|
|
+ u32 session_key_encryption_key_bytes;
|
|
+ u32 flags;
|
|
+ u8 session_key_encryption_key[64];
|
|
+ u8 signature[17];
|
|
+ u8 salt[8];
|
|
+};
|
|
+
|
|
+struct ecryptfs_private_key {
|
|
+ u32 key_size;
|
|
+ u32 data_len;
|
|
+ u8 signature[17];
|
|
+ char pki_type[17];
|
|
+ u8 data[0];
|
|
+};
|
|
+
|
|
+struct ecryptfs_auth_tok {
|
|
+ u16 version;
|
|
+ u16 token_type;
|
|
+ u32 flags;
|
|
+ struct ecryptfs_session_key session_key;
|
|
+ u8 reserved[32];
|
|
+ union {
|
|
+ struct ecryptfs_password password;
|
|
+ struct ecryptfs_private_key private_key;
|
|
+ } token;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_err___9 = -1,
|
|
+ Opt_new___2 = 0,
|
|
+ Opt_load___2 = 1,
|
|
+ Opt_update___2 = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_error = -1,
|
|
+ Opt_default = 0,
|
|
+ Opt_ecryptfs = 1,
|
|
+};
|
|
+
|
|
+enum derived_key_type {
|
|
+ ENC_KEY = 0,
|
|
+ AUTH_KEY = 1,
|
|
+};
|
|
+
|
|
+enum ecryptfs_token_types {
|
|
+ ECRYPTFS_PASSWORD = 0,
|
|
+ ECRYPTFS_PRIVATE_KEY = 1,
|
|
+};
|
|
+
|
|
+struct vfs_cap_data {
|
|
+ __le32 magic_etc;
|
|
+ struct {
|
|
+ __le32 permitted;
|
|
+ __le32 inheritable;
|
|
+ } data[2];
|
|
+};
|
|
+
|
|
+struct vfs_ns_cap_data {
|
|
+ __le32 magic_etc;
|
|
+ struct {
|
|
+ __le32 permitted;
|
|
+ __le32 inheritable;
|
|
+ } data[2];
|
|
+ __le32 rootid;
|
|
+};
|
|
+
|
|
+struct security_mnt_opts {
|
|
+ char **mnt_opts;
|
|
+ int *mnt_opts_flags;
|
|
+ int num_mnt_opts;
|
|
+};
|
|
+
|
|
+struct sctp_endpoint;
|
|
+
|
|
+struct xfrm_sec_ctx;
|
|
+
|
|
+struct xfrm_user_sec_ctx;
|
|
+
|
|
+union security_list_options {
|
|
+ int (*binder_set_context_mgr)(struct task_struct *);
|
|
+ int (*binder_transaction)(struct task_struct *, struct task_struct *);
|
|
+ int (*binder_transfer_binder)(struct task_struct *, struct task_struct *);
|
|
+ int (*binder_transfer_file)(struct task_struct *, struct task_struct *, struct file *);
|
|
+ int (*ptrace_access_check)(struct task_struct *, unsigned int);
|
|
+ int (*ptrace_traceme)(struct task_struct *);
|
|
+ int (*capget)(struct task_struct *, kernel_cap_t *, kernel_cap_t *, kernel_cap_t *);
|
|
+ int (*capset)(struct cred *, const struct cred *, const kernel_cap_t *, const kernel_cap_t *, const kernel_cap_t *);
|
|
+ int (*capable)(const struct cred *, struct user_namespace *, int, int);
|
|
+ int (*quotactl)(int, int, int, struct super_block *);
|
|
+ int (*quota_on)(struct dentry *);
|
|
+ int (*syslog)(int);
|
|
+ int (*settime)(const struct timespec64 *, const struct timezone *);
|
|
+ int (*vm_enough_memory)(struct mm_struct *, long int);
|
|
+ int (*bprm_set_creds)(struct linux_binprm *);
|
|
+ int (*bprm_check_security)(struct linux_binprm *);
|
|
+ void (*bprm_committing_creds)(struct linux_binprm *);
|
|
+ void (*bprm_committed_creds)(struct linux_binprm *);
|
|
+ int (*sb_alloc_security)(struct super_block *);
|
|
+ void (*sb_free_security)(struct super_block *);
|
|
+ int (*sb_copy_data)(char *, char *);
|
|
+ int (*sb_remount)(struct super_block *, void *);
|
|
+ int (*sb_kern_mount)(struct super_block *, int, void *);
|
|
+ int (*sb_show_options)(struct seq_file *, struct super_block *);
|
|
+ int (*sb_statfs)(struct dentry *);
|
|
+ int (*sb_mount)(const char *, const struct path *, const char *, long unsigned int, void *);
|
|
+ int (*sb_umount)(struct vfsmount *, int);
|
|
+ int (*sb_pivotroot)(const struct path *, const struct path *);
|
|
+ int (*sb_set_mnt_opts)(struct super_block *, struct security_mnt_opts *, long unsigned int, long unsigned int *);
|
|
+ int (*sb_clone_mnt_opts)(const struct super_block *, struct super_block *, long unsigned int, long unsigned int *);
|
|
+ int (*sb_parse_opts_str)(char *, struct security_mnt_opts *);
|
|
+ int (*dentry_init_security)(struct dentry *, int, const struct qstr *, void **, u32 *);
|
|
+ int (*dentry_create_files_as)(struct dentry *, int, struct qstr *, const struct cred *, struct cred *);
|
|
+ int (*inode_alloc_security)(struct inode *);
|
|
+ void (*inode_free_security)(struct inode *);
|
|
+ int (*inode_init_security)(struct inode *, struct inode *, const struct qstr *, const char **, void **, size_t *);
|
|
+ int (*inode_create)(struct inode *, struct dentry *, umode_t);
|
|
+ int (*inode_link)(struct dentry *, struct inode *, struct dentry *);
|
|
+ int (*inode_unlink)(struct inode *, struct dentry *);
|
|
+ int (*inode_symlink)(struct inode *, struct dentry *, const char *);
|
|
+ int (*inode_mkdir)(struct inode *, struct dentry *, umode_t);
|
|
+ int (*inode_rmdir)(struct inode *, struct dentry *);
|
|
+ int (*inode_mknod)(struct inode *, struct dentry *, umode_t, dev_t);
|
|
+ int (*inode_rename)(struct inode *, struct dentry *, struct inode *, struct dentry *);
|
|
+ int (*inode_readlink)(struct dentry *);
|
|
+ int (*inode_follow_link)(struct dentry *, struct inode *, bool);
|
|
+ int (*inode_permission)(struct inode *, int);
|
|
+ int (*inode_setattr)(struct dentry *, struct iattr *);
|
|
+ int (*inode_getattr)(const struct path *);
|
|
+ int (*inode_setxattr)(struct dentry *, const char *, const void *, size_t, int);
|
|
+ void (*inode_post_setxattr)(struct dentry *, const char *, const void *, size_t, int);
|
|
+ int (*inode_getxattr)(struct dentry *, const char *);
|
|
+ int (*inode_listxattr)(struct dentry *);
|
|
+ int (*inode_removexattr)(struct dentry *, const char *);
|
|
+ int (*inode_need_killpriv)(struct dentry *);
|
|
+ int (*inode_killpriv)(struct dentry *);
|
|
+ int (*inode_getsecurity)(struct inode *, const char *, void **, bool);
|
|
+ int (*inode_setsecurity)(struct inode *, const char *, const void *, size_t, int);
|
|
+ int (*inode_listsecurity)(struct inode *, char *, size_t);
|
|
+ void (*inode_getsecid)(struct inode *, u32 *);
|
|
+ int (*inode_copy_up)(struct dentry *, struct cred **);
|
|
+ int (*inode_copy_up_xattr)(const char *);
|
|
+ int (*file_permission)(struct file *, int);
|
|
+ int (*file_alloc_security)(struct file *);
|
|
+ void (*file_free_security)(struct file *);
|
|
+ int (*file_ioctl)(struct file *, unsigned int, long unsigned int);
|
|
+ int (*mmap_addr)(long unsigned int);
|
|
+ int (*mmap_file)(struct file *, long unsigned int, long unsigned int, long unsigned int);
|
|
+ int (*file_mprotect)(struct vm_area_struct *, long unsigned int, long unsigned int);
|
|
+ int (*file_lock)(struct file *, unsigned int);
|
|
+ int (*file_fcntl)(struct file *, unsigned int, long unsigned int);
|
|
+ void (*file_set_fowner)(struct file *);
|
|
+ int (*file_send_sigiotask)(struct task_struct *, struct fown_struct *, int);
|
|
+ int (*file_receive)(struct file *);
|
|
+ int (*file_open)(struct file *);
|
|
+ int (*task_alloc)(struct task_struct *, long unsigned int);
|
|
+ void (*task_free)(struct task_struct *);
|
|
+ int (*cred_alloc_blank)(struct cred *, gfp_t);
|
|
+ void (*cred_free)(struct cred *);
|
|
+ int (*cred_prepare)(struct cred *, const struct cred *, gfp_t);
|
|
+ void (*cred_transfer)(struct cred *, const struct cred *);
|
|
+ void (*cred_getsecid)(const struct cred *, u32 *);
|
|
+ int (*kernel_act_as)(struct cred *, u32);
|
|
+ int (*kernel_create_files_as)(struct cred *, struct inode *);
|
|
+ int (*kernel_module_request)(char *);
|
|
+ int (*kernel_load_data)(enum kernel_load_data_id);
|
|
+ int (*kernel_read_file)(struct file *, enum kernel_read_file_id);
|
|
+ int (*kernel_post_read_file)(struct file *, char *, loff_t, enum kernel_read_file_id);
|
|
+ int (*task_fix_setuid)(struct cred *, const struct cred *, int);
|
|
+ int (*task_setpgid)(struct task_struct *, pid_t);
|
|
+ int (*task_getpgid)(struct task_struct *);
|
|
+ int (*task_getsid)(struct task_struct *);
|
|
+ void (*task_getsecid)(struct task_struct *, u32 *);
|
|
+ int (*task_setnice)(struct task_struct *, int);
|
|
+ int (*task_setioprio)(struct task_struct *, int);
|
|
+ int (*task_getioprio)(struct task_struct *);
|
|
+ int (*task_prlimit)(const struct cred *, const struct cred *, unsigned int);
|
|
+ int (*task_setrlimit)(struct task_struct *, unsigned int, struct rlimit *);
|
|
+ int (*task_setscheduler)(struct task_struct *);
|
|
+ int (*task_getscheduler)(struct task_struct *);
|
|
+ int (*task_movememory)(struct task_struct *);
|
|
+ int (*task_kill)(struct task_struct *, struct siginfo *, int, const struct cred *);
|
|
+ int (*task_prctl)(int, long unsigned int, long unsigned int, long unsigned int, long unsigned int);
|
|
+ void (*task_to_inode)(struct task_struct *, struct inode *);
|
|
+ int (*ipc_permission)(struct kern_ipc_perm *, short int);
|
|
+ void (*ipc_getsecid)(struct kern_ipc_perm *, u32 *);
|
|
+ int (*msg_msg_alloc_security)(struct msg_msg *);
|
|
+ void (*msg_msg_free_security)(struct msg_msg *);
|
|
+ int (*msg_queue_alloc_security)(struct kern_ipc_perm *);
|
|
+ void (*msg_queue_free_security)(struct kern_ipc_perm *);
|
|
+ int (*msg_queue_associate)(struct kern_ipc_perm *, int);
|
|
+ int (*msg_queue_msgctl)(struct kern_ipc_perm *, int);
|
|
+ int (*msg_queue_msgsnd)(struct kern_ipc_perm *, struct msg_msg *, int);
|
|
+ int (*msg_queue_msgrcv)(struct kern_ipc_perm *, struct msg_msg *, struct task_struct *, long int, int);
|
|
+ int (*shm_alloc_security)(struct kern_ipc_perm *);
|
|
+ void (*shm_free_security)(struct kern_ipc_perm *);
|
|
+ int (*shm_associate)(struct kern_ipc_perm *, int);
|
|
+ int (*shm_shmctl)(struct kern_ipc_perm *, int);
|
|
+ int (*shm_shmat)(struct kern_ipc_perm *, char *, int);
|
|
+ int (*sem_alloc_security)(struct kern_ipc_perm *);
|
|
+ void (*sem_free_security)(struct kern_ipc_perm *);
|
|
+ int (*sem_associate)(struct kern_ipc_perm *, int);
|
|
+ int (*sem_semctl)(struct kern_ipc_perm *, int);
|
|
+ int (*sem_semop)(struct kern_ipc_perm *, struct sembuf *, unsigned int, int);
|
|
+ int (*netlink_send)(struct sock *, struct sk_buff *);
|
|
+ void (*d_instantiate)(struct dentry *, struct inode *);
|
|
+ int (*getprocattr)(struct task_struct *, char *, char **);
|
|
+ int (*setprocattr)(const char *, void *, size_t);
|
|
+ int (*ismaclabel)(const char *);
|
|
+ int (*secid_to_secctx)(u32, char **, u32 *);
|
|
+ int (*secctx_to_secid)(const char *, u32, u32 *);
|
|
+ void (*release_secctx)(char *, u32);
|
|
+ void (*inode_invalidate_secctx)(struct inode *);
|
|
+ int (*inode_notifysecctx)(struct inode *, void *, u32);
|
|
+ int (*inode_setsecctx)(struct dentry *, void *, u32);
|
|
+ int (*inode_getsecctx)(struct inode *, void **, u32 *);
|
|
+ int (*unix_stream_connect)(struct sock *, struct sock *, struct sock *);
|
|
+ int (*unix_may_send)(struct socket *, struct socket *);
|
|
+ int (*socket_create)(int, int, int, int);
|
|
+ int (*socket_post_create)(struct socket *, int, int, int, int);
|
|
+ int (*socket_socketpair)(struct socket *, struct socket *);
|
|
+ int (*socket_bind)(struct socket *, struct sockaddr *, int);
|
|
+ int (*socket_connect)(struct socket *, struct sockaddr *, int);
|
|
+ int (*socket_listen)(struct socket *, int);
|
|
+ int (*socket_accept)(struct socket *, struct socket *);
|
|
+ int (*socket_sendmsg)(struct socket *, struct msghdr *, int);
|
|
+ int (*socket_recvmsg)(struct socket *, struct msghdr *, int, int);
|
|
+ int (*socket_getsockname)(struct socket *);
|
|
+ int (*socket_getpeername)(struct socket *);
|
|
+ int (*socket_getsockopt)(struct socket *, int, int);
|
|
+ int (*socket_setsockopt)(struct socket *, int, int);
|
|
+ int (*socket_shutdown)(struct socket *, int);
|
|
+ int (*socket_sock_rcv_skb)(struct sock *, struct sk_buff *);
|
|
+ int (*socket_getpeersec_stream)(struct socket *, char *, int *, unsigned int);
|
|
+ int (*socket_getpeersec_dgram)(struct socket *, struct sk_buff *, u32 *);
|
|
+ int (*sk_alloc_security)(struct sock *, int, gfp_t);
|
|
+ void (*sk_free_security)(struct sock *);
|
|
+ void (*sk_clone_security)(const struct sock *, struct sock *);
|
|
+ void (*sk_getsecid)(struct sock *, u32 *);
|
|
+ void (*sock_graft)(struct sock *, struct socket *);
|
|
+ int (*inet_conn_request)(struct sock *, struct sk_buff *, struct request_sock *);
|
|
+ void (*inet_csk_clone)(struct sock *, const struct request_sock *);
|
|
+ void (*inet_conn_established)(struct sock *, struct sk_buff *);
|
|
+ int (*secmark_relabel_packet)(u32);
|
|
+ void (*secmark_refcount_inc)();
|
|
+ void (*secmark_refcount_dec)();
|
|
+ void (*req_classify_flow)(const struct request_sock *, struct flowi *);
|
|
+ int (*tun_dev_alloc_security)(void **);
|
|
+ void (*tun_dev_free_security)(void *);
|
|
+ int (*tun_dev_create)();
|
|
+ int (*tun_dev_attach_queue)(void *);
|
|
+ int (*tun_dev_attach)(struct sock *, void *);
|
|
+ int (*tun_dev_open)(void *);
|
|
+ int (*sctp_assoc_request)(struct sctp_endpoint *, struct sk_buff *);
|
|
+ int (*sctp_bind_connect)(struct sock *, int, struct sockaddr *, int);
|
|
+ void (*sctp_sk_clone)(struct sctp_endpoint *, struct sock *, struct sock *);
|
|
+ int (*ib_pkey_access)(void *, u64, u16);
|
|
+ int (*ib_endport_manage_subnet)(void *, const char *, u8);
|
|
+ int (*ib_alloc_security)(void **);
|
|
+ void (*ib_free_security)(void *);
|
|
+ int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **, struct xfrm_user_sec_ctx *, gfp_t);
|
|
+ int (*xfrm_policy_clone_security)(struct xfrm_sec_ctx *, struct xfrm_sec_ctx **);
|
|
+ void (*xfrm_policy_free_security)(struct xfrm_sec_ctx *);
|
|
+ int (*xfrm_policy_delete_security)(struct xfrm_sec_ctx *);
|
|
+ int (*xfrm_state_alloc)(struct xfrm_state *, struct xfrm_user_sec_ctx *);
|
|
+ int (*xfrm_state_alloc_acquire)(struct xfrm_state *, struct xfrm_sec_ctx *, u32);
|
|
+ void (*xfrm_state_free_security)(struct xfrm_state *);
|
|
+ int (*xfrm_state_delete_security)(struct xfrm_state *);
|
|
+ int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *, u32, u8);
|
|
+ int (*xfrm_state_pol_flow_match)(struct xfrm_state *, struct xfrm_policy *, const struct flowi *);
|
|
+ int (*xfrm_decode_session)(struct sk_buff *, u32 *, int);
|
|
+ int (*key_alloc)(struct key *, const struct cred *, long unsigned int);
|
|
+ void (*key_free)(struct key *);
|
|
+ int (*key_permission)(key_ref_t, const struct cred *, unsigned int);
|
|
+ int (*key_getsecurity)(struct key *, char **);
|
|
+ int (*audit_rule_init)(u32, u32, char *, void **);
|
|
+ int (*audit_rule_known)(struct audit_krule *);
|
|
+ int (*audit_rule_match)(u32, u32, u32, void *, struct audit_context *);
|
|
+ void (*audit_rule_free)(void *);
|
|
+ int (*bpf)(int, union bpf_attr *, unsigned int);
|
|
+ int (*bpf_map)(struct bpf_map *, fmode_t);
|
|
+ int (*bpf_prog)(struct bpf_prog *);
|
|
+ int (*bpf_map_alloc_security)(struct bpf_map *);
|
|
+ void (*bpf_map_free_security)(struct bpf_map *);
|
|
+ int (*bpf_prog_alloc_security)(struct bpf_prog_aux *);
|
|
+ void (*bpf_prog_free_security)(struct bpf_prog_aux *);
|
|
+};
|
|
+
|
|
+struct security_hook_heads {
|
|
+ struct hlist_head binder_set_context_mgr;
|
|
+ struct hlist_head binder_transaction;
|
|
+ struct hlist_head binder_transfer_binder;
|
|
+ struct hlist_head binder_transfer_file;
|
|
+ struct hlist_head ptrace_access_check;
|
|
+ struct hlist_head ptrace_traceme;
|
|
+ struct hlist_head capget;
|
|
+ struct hlist_head capset;
|
|
+ struct hlist_head capable;
|
|
+ struct hlist_head quotactl;
|
|
+ struct hlist_head quota_on;
|
|
+ struct hlist_head syslog;
|
|
+ struct hlist_head settime;
|
|
+ struct hlist_head vm_enough_memory;
|
|
+ struct hlist_head bprm_set_creds;
|
|
+ struct hlist_head bprm_check_security;
|
|
+ struct hlist_head bprm_committing_creds;
|
|
+ struct hlist_head bprm_committed_creds;
|
|
+ struct hlist_head sb_alloc_security;
|
|
+ struct hlist_head sb_free_security;
|
|
+ struct hlist_head sb_copy_data;
|
|
+ struct hlist_head sb_remount;
|
|
+ struct hlist_head sb_kern_mount;
|
|
+ struct hlist_head sb_show_options;
|
|
+ struct hlist_head sb_statfs;
|
|
+ struct hlist_head sb_mount;
|
|
+ struct hlist_head sb_umount;
|
|
+ struct hlist_head sb_pivotroot;
|
|
+ struct hlist_head sb_set_mnt_opts;
|
|
+ struct hlist_head sb_clone_mnt_opts;
|
|
+ struct hlist_head sb_parse_opts_str;
|
|
+ struct hlist_head dentry_init_security;
|
|
+ struct hlist_head dentry_create_files_as;
|
|
+ struct hlist_head inode_alloc_security;
|
|
+ struct hlist_head inode_free_security;
|
|
+ struct hlist_head inode_init_security;
|
|
+ struct hlist_head inode_create;
|
|
+ struct hlist_head inode_link;
|
|
+ struct hlist_head inode_unlink;
|
|
+ struct hlist_head inode_symlink;
|
|
+ struct hlist_head inode_mkdir;
|
|
+ struct hlist_head inode_rmdir;
|
|
+ struct hlist_head inode_mknod;
|
|
+ struct hlist_head inode_rename;
|
|
+ struct hlist_head inode_readlink;
|
|
+ struct hlist_head inode_follow_link;
|
|
+ struct hlist_head inode_permission;
|
|
+ struct hlist_head inode_setattr;
|
|
+ struct hlist_head inode_getattr;
|
|
+ struct hlist_head inode_setxattr;
|
|
+ struct hlist_head inode_post_setxattr;
|
|
+ struct hlist_head inode_getxattr;
|
|
+ struct hlist_head inode_listxattr;
|
|
+ struct hlist_head inode_removexattr;
|
|
+ struct hlist_head inode_need_killpriv;
|
|
+ struct hlist_head inode_killpriv;
|
|
+ struct hlist_head inode_getsecurity;
|
|
+ struct hlist_head inode_setsecurity;
|
|
+ struct hlist_head inode_listsecurity;
|
|
+ struct hlist_head inode_getsecid;
|
|
+ struct hlist_head inode_copy_up;
|
|
+ struct hlist_head inode_copy_up_xattr;
|
|
+ struct hlist_head file_permission;
|
|
+ struct hlist_head file_alloc_security;
|
|
+ struct hlist_head file_free_security;
|
|
+ struct hlist_head file_ioctl;
|
|
+ struct hlist_head mmap_addr;
|
|
+ struct hlist_head mmap_file;
|
|
+ struct hlist_head file_mprotect;
|
|
+ struct hlist_head file_lock;
|
|
+ struct hlist_head file_fcntl;
|
|
+ struct hlist_head file_set_fowner;
|
|
+ struct hlist_head file_send_sigiotask;
|
|
+ struct hlist_head file_receive;
|
|
+ struct hlist_head file_open;
|
|
+ struct hlist_head task_alloc;
|
|
+ struct hlist_head task_free;
|
|
+ struct hlist_head cred_alloc_blank;
|
|
+ struct hlist_head cred_free;
|
|
+ struct hlist_head cred_prepare;
|
|
+ struct hlist_head cred_transfer;
|
|
+ struct hlist_head cred_getsecid;
|
|
+ struct hlist_head kernel_act_as;
|
|
+ struct hlist_head kernel_create_files_as;
|
|
+ struct hlist_head kernel_load_data;
|
|
+ struct hlist_head kernel_read_file;
|
|
+ struct hlist_head kernel_post_read_file;
|
|
+ struct hlist_head kernel_module_request;
|
|
+ struct hlist_head task_fix_setuid;
|
|
+ struct hlist_head task_setpgid;
|
|
+ struct hlist_head task_getpgid;
|
|
+ struct hlist_head task_getsid;
|
|
+ struct hlist_head task_getsecid;
|
|
+ struct hlist_head task_setnice;
|
|
+ struct hlist_head task_setioprio;
|
|
+ struct hlist_head task_getioprio;
|
|
+ struct hlist_head task_prlimit;
|
|
+ struct hlist_head task_setrlimit;
|
|
+ struct hlist_head task_setscheduler;
|
|
+ struct hlist_head task_getscheduler;
|
|
+ struct hlist_head task_movememory;
|
|
+ struct hlist_head task_kill;
|
|
+ struct hlist_head task_prctl;
|
|
+ struct hlist_head task_to_inode;
|
|
+ struct hlist_head ipc_permission;
|
|
+ struct hlist_head ipc_getsecid;
|
|
+ struct hlist_head msg_msg_alloc_security;
|
|
+ struct hlist_head msg_msg_free_security;
|
|
+ struct hlist_head msg_queue_alloc_security;
|
|
+ struct hlist_head msg_queue_free_security;
|
|
+ struct hlist_head msg_queue_associate;
|
|
+ struct hlist_head msg_queue_msgctl;
|
|
+ struct hlist_head msg_queue_msgsnd;
|
|
+ struct hlist_head msg_queue_msgrcv;
|
|
+ struct hlist_head shm_alloc_security;
|
|
+ struct hlist_head shm_free_security;
|
|
+ struct hlist_head shm_associate;
|
|
+ struct hlist_head shm_shmctl;
|
|
+ struct hlist_head shm_shmat;
|
|
+ struct hlist_head sem_alloc_security;
|
|
+ struct hlist_head sem_free_security;
|
|
+ struct hlist_head sem_associate;
|
|
+ struct hlist_head sem_semctl;
|
|
+ struct hlist_head sem_semop;
|
|
+ struct hlist_head netlink_send;
|
|
+ struct hlist_head d_instantiate;
|
|
+ struct hlist_head getprocattr;
|
|
+ struct hlist_head setprocattr;
|
|
+ struct hlist_head ismaclabel;
|
|
+ struct hlist_head secid_to_secctx;
|
|
+ struct hlist_head secctx_to_secid;
|
|
+ struct hlist_head release_secctx;
|
|
+ struct hlist_head inode_invalidate_secctx;
|
|
+ struct hlist_head inode_notifysecctx;
|
|
+ struct hlist_head inode_setsecctx;
|
|
+ struct hlist_head inode_getsecctx;
|
|
+ struct hlist_head unix_stream_connect;
|
|
+ struct hlist_head unix_may_send;
|
|
+ struct hlist_head socket_create;
|
|
+ struct hlist_head socket_post_create;
|
|
+ struct hlist_head socket_socketpair;
|
|
+ struct hlist_head socket_bind;
|
|
+ struct hlist_head socket_connect;
|
|
+ struct hlist_head socket_listen;
|
|
+ struct hlist_head socket_accept;
|
|
+ struct hlist_head socket_sendmsg;
|
|
+ struct hlist_head socket_recvmsg;
|
|
+ struct hlist_head socket_getsockname;
|
|
+ struct hlist_head socket_getpeername;
|
|
+ struct hlist_head socket_getsockopt;
|
|
+ struct hlist_head socket_setsockopt;
|
|
+ struct hlist_head socket_shutdown;
|
|
+ struct hlist_head socket_sock_rcv_skb;
|
|
+ struct hlist_head socket_getpeersec_stream;
|
|
+ struct hlist_head socket_getpeersec_dgram;
|
|
+ struct hlist_head sk_alloc_security;
|
|
+ struct hlist_head sk_free_security;
|
|
+ struct hlist_head sk_clone_security;
|
|
+ struct hlist_head sk_getsecid;
|
|
+ struct hlist_head sock_graft;
|
|
+ struct hlist_head inet_conn_request;
|
|
+ struct hlist_head inet_csk_clone;
|
|
+ struct hlist_head inet_conn_established;
|
|
+ struct hlist_head secmark_relabel_packet;
|
|
+ struct hlist_head secmark_refcount_inc;
|
|
+ struct hlist_head secmark_refcount_dec;
|
|
+ struct hlist_head req_classify_flow;
|
|
+ struct hlist_head tun_dev_alloc_security;
|
|
+ struct hlist_head tun_dev_free_security;
|
|
+ struct hlist_head tun_dev_create;
|
|
+ struct hlist_head tun_dev_attach_queue;
|
|
+ struct hlist_head tun_dev_attach;
|
|
+ struct hlist_head tun_dev_open;
|
|
+ struct hlist_head sctp_assoc_request;
|
|
+ struct hlist_head sctp_bind_connect;
|
|
+ struct hlist_head sctp_sk_clone;
|
|
+ struct hlist_head ib_pkey_access;
|
|
+ struct hlist_head ib_endport_manage_subnet;
|
|
+ struct hlist_head ib_alloc_security;
|
|
+ struct hlist_head ib_free_security;
|
|
+ struct hlist_head xfrm_policy_alloc_security;
|
|
+ struct hlist_head xfrm_policy_clone_security;
|
|
+ struct hlist_head xfrm_policy_free_security;
|
|
+ struct hlist_head xfrm_policy_delete_security;
|
|
+ struct hlist_head xfrm_state_alloc;
|
|
+ struct hlist_head xfrm_state_alloc_acquire;
|
|
+ struct hlist_head xfrm_state_free_security;
|
|
+ struct hlist_head xfrm_state_delete_security;
|
|
+ struct hlist_head xfrm_policy_lookup;
|
|
+ struct hlist_head xfrm_state_pol_flow_match;
|
|
+ struct hlist_head xfrm_decode_session;
|
|
+ struct hlist_head key_alloc;
|
|
+ struct hlist_head key_free;
|
|
+ struct hlist_head key_permission;
|
|
+ struct hlist_head key_getsecurity;
|
|
+ struct hlist_head audit_rule_init;
|
|
+ struct hlist_head audit_rule_known;
|
|
+ struct hlist_head audit_rule_match;
|
|
+ struct hlist_head audit_rule_free;
|
|
+ struct hlist_head bpf;
|
|
+ struct hlist_head bpf_map;
|
|
+ struct hlist_head bpf_prog;
|
|
+ struct hlist_head bpf_map_alloc_security;
|
|
+ struct hlist_head bpf_map_free_security;
|
|
+ struct hlist_head bpf_prog_alloc_security;
|
|
+ struct hlist_head bpf_prog_free_security;
|
|
+};
|
|
+
|
|
+struct security_hook_list {
|
|
+ struct hlist_node list;
|
|
+ struct hlist_head *head;
|
|
+ union security_list_options hook;
|
|
+ char *lsm;
|
|
+};
|
|
+
|
|
+enum lsm_event {
|
|
+ LSM_POLICY_CHANGE = 0,
|
|
+};
|
|
+
|
|
+typedef int (*initxattrs)(struct inode *, const struct xattr *, void *);
|
|
+
|
|
+enum {
|
|
+ IB_USER_VERBS_CMD_GET_CONTEXT = 0,
|
|
+ IB_USER_VERBS_CMD_QUERY_DEVICE = 1,
|
|
+ IB_USER_VERBS_CMD_QUERY_PORT = 2,
|
|
+ IB_USER_VERBS_CMD_ALLOC_PD = 3,
|
|
+ IB_USER_VERBS_CMD_DEALLOC_PD = 4,
|
|
+ IB_USER_VERBS_CMD_CREATE_AH = 5,
|
|
+ IB_USER_VERBS_CMD_MODIFY_AH = 6,
|
|
+ IB_USER_VERBS_CMD_QUERY_AH = 7,
|
|
+ IB_USER_VERBS_CMD_DESTROY_AH = 8,
|
|
+ IB_USER_VERBS_CMD_REG_MR = 9,
|
|
+ IB_USER_VERBS_CMD_REG_SMR = 10,
|
|
+ IB_USER_VERBS_CMD_REREG_MR = 11,
|
|
+ IB_USER_VERBS_CMD_QUERY_MR = 12,
|
|
+ IB_USER_VERBS_CMD_DEREG_MR = 13,
|
|
+ IB_USER_VERBS_CMD_ALLOC_MW = 14,
|
|
+ IB_USER_VERBS_CMD_BIND_MW = 15,
|
|
+ IB_USER_VERBS_CMD_DEALLOC_MW = 16,
|
|
+ IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL = 17,
|
|
+ IB_USER_VERBS_CMD_CREATE_CQ = 18,
|
|
+ IB_USER_VERBS_CMD_RESIZE_CQ = 19,
|
|
+ IB_USER_VERBS_CMD_DESTROY_CQ = 20,
|
|
+ IB_USER_VERBS_CMD_POLL_CQ = 21,
|
|
+ IB_USER_VERBS_CMD_PEEK_CQ = 22,
|
|
+ IB_USER_VERBS_CMD_REQ_NOTIFY_CQ = 23,
|
|
+ IB_USER_VERBS_CMD_CREATE_QP = 24,
|
|
+ IB_USER_VERBS_CMD_QUERY_QP = 25,
|
|
+ IB_USER_VERBS_CMD_MODIFY_QP = 26,
|
|
+ IB_USER_VERBS_CMD_DESTROY_QP = 27,
|
|
+ IB_USER_VERBS_CMD_POST_SEND = 28,
|
|
+ IB_USER_VERBS_CMD_POST_RECV = 29,
|
|
+ IB_USER_VERBS_CMD_ATTACH_MCAST = 30,
|
|
+ IB_USER_VERBS_CMD_DETACH_MCAST = 31,
|
|
+ IB_USER_VERBS_CMD_CREATE_SRQ = 32,
|
|
+ IB_USER_VERBS_CMD_MODIFY_SRQ = 33,
|
|
+ IB_USER_VERBS_CMD_QUERY_SRQ = 34,
|
|
+ IB_USER_VERBS_CMD_DESTROY_SRQ = 35,
|
|
+ IB_USER_VERBS_CMD_POST_SRQ_RECV = 36,
|
|
+ IB_USER_VERBS_CMD_OPEN_XRCD = 37,
|
|
+ IB_USER_VERBS_CMD_CLOSE_XRCD = 38,
|
|
+ IB_USER_VERBS_CMD_CREATE_XSRQ = 39,
|
|
+ IB_USER_VERBS_CMD_OPEN_QP = 40,
|
|
+};
|
|
+
|
|
+enum ib_uverbs_create_qp_mask {
|
|
+ IB_UVERBS_CREATE_QP_MASK_IND_TABLE = 1,
|
|
+};
|
|
+
|
|
+enum ib_uverbs_wr_opcode {
|
|
+ IB_UVERBS_WR_RDMA_WRITE = 0,
|
|
+ IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1,
|
|
+ IB_UVERBS_WR_SEND = 2,
|
|
+ IB_UVERBS_WR_SEND_WITH_IMM = 3,
|
|
+ IB_UVERBS_WR_RDMA_READ = 4,
|
|
+ IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5,
|
|
+ IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6,
|
|
+ IB_UVERBS_WR_LOCAL_INV = 7,
|
|
+ IB_UVERBS_WR_BIND_MW = 8,
|
|
+ IB_UVERBS_WR_SEND_WITH_INV = 9,
|
|
+ IB_UVERBS_WR_TSO = 10,
|
|
+ IB_UVERBS_WR_RDMA_READ_WITH_INV = 11,
|
|
+ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,
|
|
+ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,
|
|
+};
|
|
+
|
|
+enum ib_uverbs_access_flags {
|
|
+ IB_UVERBS_ACCESS_LOCAL_WRITE = 1,
|
|
+ IB_UVERBS_ACCESS_REMOTE_WRITE = 2,
|
|
+ IB_UVERBS_ACCESS_REMOTE_READ = 4,
|
|
+ IB_UVERBS_ACCESS_REMOTE_ATOMIC = 8,
|
|
+ IB_UVERBS_ACCESS_MW_BIND = 16,
|
|
+ IB_UVERBS_ACCESS_ZERO_BASED = 32,
|
|
+ IB_UVERBS_ACCESS_ON_DEMAND = 64,
|
|
+ IB_UVERBS_ACCESS_HUGETLB = 128,
|
|
+};
|
|
+
|
|
+union ib_gid {
|
|
+ u8 raw[16];
|
|
+ struct {
|
|
+ __be64 subnet_prefix;
|
|
+ __be64 interface_id;
|
|
+ } global;
|
|
+};
|
|
+
|
|
+struct lsm_network_audit {
|
|
+ int netif;
|
|
+ struct sock *sk;
|
|
+ u16 family;
|
|
+ __be16 dport;
|
|
+ __be16 sport;
|
|
+ union {
|
|
+ struct {
|
|
+ __be32 daddr;
|
|
+ __be32 saddr;
|
|
+ } v4;
|
|
+ struct {
|
|
+ struct in6_addr daddr;
|
|
+ struct in6_addr saddr;
|
|
+ } v6;
|
|
+ } fam;
|
|
+};
|
|
+
|
|
+struct lsm_ioctlop_audit {
|
|
+ struct path path;
|
|
+ u16 cmd;
|
|
+};
|
|
+
|
|
+struct lsm_ibpkey_audit {
|
|
+ u64 subnet_prefix;
|
|
+ u16 pkey;
|
|
+};
|
|
+
|
|
+struct lsm_ibendport_audit {
|
|
+ char dev_name[64];
|
|
+ u8 port;
|
|
+};
|
|
+
|
|
+struct selinux_state;
|
|
+
|
|
+struct selinux_audit_data {
|
|
+ u32 ssid;
|
|
+ u32 tsid;
|
|
+ u16 tclass;
|
|
+ u32 requested;
|
|
+ u32 audited;
|
|
+ u32 denied;
|
|
+ int result;
|
|
+ struct selinux_state *state;
|
|
+};
|
|
+
|
|
+struct common_audit_data {
|
|
+ char type;
|
|
+ union {
|
|
+ struct path path;
|
|
+ struct dentry *dentry;
|
|
+ struct inode *inode;
|
|
+ struct lsm_network_audit *net;
|
|
+ int cap;
|
|
+ int ipc_id;
|
|
+ struct task_struct *tsk;
|
|
+ struct {
|
|
+ key_serial_t key;
|
|
+ char *key_desc;
|
|
+ } key_struct;
|
|
+ char *kmod_name;
|
|
+ struct lsm_ioctlop_audit *op;
|
|
+ struct file *file;
|
|
+ struct lsm_ibpkey_audit *ibpkey;
|
|
+ struct lsm_ibendport_audit *ibendport;
|
|
+ } u;
|
|
+ union {
|
|
+ struct selinux_audit_data *selinux_audit_data;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum {
|
|
+ POLICYDB_CAPABILITY_NETPEER = 0,
|
|
+ POLICYDB_CAPABILITY_OPENPERM = 1,
|
|
+ POLICYDB_CAPABILITY_EXTSOCKCLASS = 2,
|
|
+ POLICYDB_CAPABILITY_ALWAYSNETWORK = 3,
|
|
+ POLICYDB_CAPABILITY_CGROUPSECLABEL = 4,
|
|
+ POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION = 5,
|
|
+ __POLICYDB_CAPABILITY_MAX = 6,
|
|
+};
|
|
+
|
|
+struct selinux_avc;
|
|
+
|
|
+struct selinux_ss;
|
|
+
|
|
+struct selinux_state {
|
|
+ bool disabled;
|
|
+ bool enforcing;
|
|
+ bool checkreqprot;
|
|
+ bool initialized;
|
|
+ bool policycap[6];
|
|
+ struct selinux_avc *avc;
|
|
+ struct selinux_ss *ss;
|
|
+};
|
|
+
|
|
+struct avc_cache {
|
|
+ struct hlist_head slots[512];
|
|
+ spinlock_t slots_lock[512];
|
|
+ atomic_t lru_hint;
|
|
+ atomic_t active_nodes;
|
|
+ u32 latest_notif;
|
|
+};
|
|
+
|
|
+struct selinux_avc {
|
|
+ unsigned int avc_cache_threshold;
|
|
+ struct avc_cache avc_cache;
|
|
+};
|
|
+
|
|
+struct av_decision {
|
|
+ u32 allowed;
|
|
+ u32 auditallow;
|
|
+ u32 auditdeny;
|
|
+ u32 seqno;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct extended_perms_data {
|
|
+ u32 p[8];
|
|
+};
|
|
+
|
|
+struct extended_perms_decision {
|
|
+ u8 used;
|
|
+ u8 driver;
|
|
+ struct extended_perms_data *allowed;
|
|
+ struct extended_perms_data *auditallow;
|
|
+ struct extended_perms_data *dontaudit;
|
|
+};
|
|
+
|
|
+struct extended_perms {
|
|
+ u16 len;
|
|
+ struct extended_perms_data drivers;
|
|
+};
|
|
+
|
|
+struct avc_cache_stats {
|
|
+ unsigned int lookups;
|
|
+ unsigned int misses;
|
|
+ unsigned int allocations;
|
|
+ unsigned int reclaims;
|
|
+ unsigned int frees;
|
|
+};
|
|
+
|
|
+struct security_class_mapping {
|
|
+ const char *name;
|
|
+ const char *perms[33];
|
|
+};
|
|
+
|
|
+struct avc_xperms_node;
|
|
+
|
|
+struct avc_entry {
|
|
+ u32 ssid;
|
|
+ u32 tsid;
|
|
+ u16 tclass;
|
|
+ struct av_decision avd;
|
|
+ struct avc_xperms_node *xp_node;
|
|
+};
|
|
+
|
|
+struct avc_xperms_node {
|
|
+ struct extended_perms xp;
|
|
+ struct list_head xpd_head;
|
|
+};
|
|
+
|
|
+struct avc_node {
|
|
+ struct avc_entry ae;
|
|
+ struct hlist_node list;
|
|
+ struct callback_head rhead;
|
|
+};
|
|
+
|
|
+struct avc_xperms_decision_node {
|
|
+ struct extended_perms_decision xpd;
|
|
+ struct list_head xpd_list;
|
|
+};
|
|
+
|
|
+struct avc_callback_node {
|
|
+ int (*callback)(u32);
|
|
+ u32 events;
|
|
+ struct avc_callback_node *next;
|
|
+};
|
|
+
|
|
+typedef __u16 __sum16;
|
|
+
|
|
+typedef u16 u_int16_t;
|
|
+
|
|
+struct rhltable {
|
|
+ struct rhashtable ht;
|
|
+};
|
|
+
|
|
+enum sctp_endpoint_type {
|
|
+ SCTP_EP_TYPE_SOCKET = 0,
|
|
+ SCTP_EP_TYPE_ASSOCIATION = 1,
|
|
+};
|
|
+
|
|
+struct sctp_chunk;
|
|
+
|
|
+struct sctp_inq {
|
|
+ struct list_head in_chunk_list;
|
|
+ struct sctp_chunk *in_progress;
|
|
+ struct work_struct immediate;
|
|
+};
|
|
+
|
|
+struct sctp_bind_addr {
|
|
+ __u16 port;
|
|
+ struct list_head address_list;
|
|
+};
|
|
+
|
|
+struct sctp_ep_common {
|
|
+ struct hlist_node node;
|
|
+ int hashent;
|
|
+ enum sctp_endpoint_type type;
|
|
+ refcount_t refcnt;
|
|
+ bool dead;
|
|
+ struct sock *sk;
|
|
+ struct net *net;
|
|
+ struct sctp_inq inqueue;
|
|
+ struct sctp_bind_addr bind_addr;
|
|
+};
|
|
+
|
|
+struct crypto_shash___2;
|
|
+
|
|
+struct sctp_hmac_algo_param;
|
|
+
|
|
+struct sctp_chunks_param;
|
|
+
|
|
+struct sctp_endpoint {
|
|
+ struct sctp_ep_common base;
|
|
+ struct list_head asocs;
|
|
+ __u8 secret_key[32];
|
|
+ __u8 *digest;
|
|
+ __u32 sndbuf_policy;
|
|
+ __u32 rcvbuf_policy;
|
|
+ struct crypto_shash___2 **auth_hmacs;
|
|
+ struct sctp_hmac_algo_param *auth_hmacs_list;
|
|
+ struct sctp_chunks_param *auth_chunk_list;
|
|
+ struct list_head endpoint_shared_keys;
|
|
+ __u16 active_key_id;
|
|
+ __u8 auth_enable: 1;
|
|
+ __u8 prsctp_enable: 1;
|
|
+ __u8 reconf_enable: 1;
|
|
+ __u8 strreset_enable;
|
|
+ u32 secid;
|
|
+ u32 peer_secid;
|
|
+};
|
|
+
|
|
+struct xfrm_sec_ctx {
|
|
+ __u8 ctx_doi;
|
|
+ __u8 ctx_alg;
|
|
+ __u16 ctx_len;
|
|
+ __u32 ctx_sid;
|
|
+ char ctx_str[0];
|
|
+};
|
|
+
|
|
+struct xfrm_user_sec_ctx {
|
|
+ __u16 len;
|
|
+ __u16 exttype;
|
|
+ __u8 ctx_alg;
|
|
+ __u8 ctx_doi;
|
|
+ __u16 ctx_len;
|
|
+};
|
|
+
|
|
+struct sockaddr_in6 {
|
|
+ short unsigned int sin6_family;
|
|
+ __be16 sin6_port;
|
|
+ __be32 sin6_flowinfo;
|
|
+ struct in6_addr sin6_addr;
|
|
+ __u32 sin6_scope_id;
|
|
+};
|
|
+
|
|
+struct nf_conntrack {
|
|
+ atomic_t use;
|
|
+};
|
|
+
|
|
+struct in_addr {
|
|
+ __be32 s_addr;
|
|
+};
|
|
+
|
|
+struct sockaddr_in {
|
|
+ __kernel_sa_family_t sin_family;
|
|
+ __be16 sin_port;
|
|
+ struct in_addr sin_addr;
|
|
+ unsigned char __pad[8];
|
|
+};
|
|
+
|
|
+struct nf_hook_state;
|
|
+
|
|
+typedef unsigned int nf_hookfn(void *, struct sk_buff *, const struct nf_hook_state *);
|
|
+
|
|
+struct nf_hook_entry {
|
|
+ nf_hookfn *hook;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct nf_hook_entries {
|
|
+ u16 num_hook_entries;
|
|
+ struct nf_hook_entry hooks[0];
|
|
+};
|
|
+
|
|
+enum ip_conntrack_info {
|
|
+ IP_CT_ESTABLISHED = 0,
|
|
+ IP_CT_RELATED = 1,
|
|
+ IP_CT_NEW = 2,
|
|
+ IP_CT_IS_REPLY = 3,
|
|
+ IP_CT_ESTABLISHED_REPLY = 3,
|
|
+ IP_CT_RELATED_REPLY = 4,
|
|
+ IP_CT_NUMBER = 5,
|
|
+ IP_CT_UNTRACKED = 7,
|
|
+};
|
|
+
|
|
+struct nf_hook_state {
|
|
+ unsigned int hook;
|
|
+ u_int8_t pf;
|
|
+ struct net_device *in;
|
|
+ struct net_device *out;
|
|
+ struct sock *sk;
|
|
+ struct net *net;
|
|
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *);
|
|
+};
|
|
+
|
|
+struct nf_hook_ops {
|
|
+ nf_hookfn *hook;
|
|
+ struct net_device *dev;
|
|
+ void *priv;
|
|
+ u_int8_t pf;
|
|
+ unsigned int hooknum;
|
|
+ int priority;
|
|
+};
|
|
+
|
|
+enum nf_nat_manip_type;
|
|
+
|
|
+struct nf_conn;
|
|
+
|
|
+struct nf_nat_hook {
|
|
+ int (*parse_nat_setup)(struct nf_conn *, enum nf_nat_manip_type, const struct nlattr *);
|
|
+ void (*decode_session)(struct sk_buff *, struct flowi *);
|
|
+ unsigned int (*manip_pkt)(struct sk_buff *, struct nf_conn *, enum nf_nat_manip_type, enum ip_conntrack_dir);
|
|
+};
|
|
+
|
|
+struct nf_conntrack_zone {
|
|
+ u16 id;
|
|
+ u8 flags;
|
|
+ u8 dir;
|
|
+};
|
|
+
|
|
+union nf_inet_addr {
|
|
+ __u32 all[4];
|
|
+ __be32 ip;
|
|
+ __be32 ip6[4];
|
|
+ struct in_addr in;
|
|
+ struct in6_addr in6;
|
|
+};
|
|
+
|
|
+union nf_conntrack_man_proto {
|
|
+ __be16 all;
|
|
+ struct {
|
|
+ __be16 port;
|
|
+ } tcp;
|
|
+ struct {
|
|
+ __be16 port;
|
|
+ } udp;
|
|
+ struct {
|
|
+ __be16 id;
|
|
+ } icmp;
|
|
+ struct {
|
|
+ __be16 port;
|
|
+ } dccp;
|
|
+ struct {
|
|
+ __be16 port;
|
|
+ } sctp;
|
|
+ struct {
|
|
+ __be16 key;
|
|
+ } gre;
|
|
+};
|
|
+
|
|
+struct nf_conntrack_man {
|
|
+ union nf_inet_addr u3;
|
|
+ union nf_conntrack_man_proto u;
|
|
+ u_int16_t l3num;
|
|
+};
|
|
+
|
|
+struct nf_conntrack_tuple {
|
|
+ struct nf_conntrack_man src;
|
|
+ struct {
|
|
+ union nf_inet_addr u3;
|
|
+ union {
|
|
+ __be16 all;
|
|
+ struct {
|
|
+ __be16 port;
|
|
+ } tcp;
|
|
+ struct {
|
|
+ __be16 port;
|
|
+ } udp;
|
|
+ struct {
|
|
+ u_int8_t type;
|
|
+ u_int8_t code;
|
|
+ } icmp;
|
|
+ struct {
|
|
+ __be16 port;
|
|
+ } dccp;
|
|
+ struct {
|
|
+ __be16 port;
|
|
+ } sctp;
|
|
+ struct {
|
|
+ __be16 key;
|
|
+ } gre;
|
|
+ } u;
|
|
+ u_int8_t protonum;
|
|
+ u_int8_t dir;
|
|
+ } dst;
|
|
+};
|
|
+
|
|
+struct nf_conntrack_tuple_hash {
|
|
+ struct hlist_nulls_node hnnode;
|
|
+ struct nf_conntrack_tuple tuple;
|
|
+};
|
|
+
|
|
+typedef u32 u_int32_t;
|
|
+
|
|
+struct nf_ct_ext;
|
|
+
|
|
+typedef u64 u_int64_t;
|
|
+
|
|
+struct nf_ct_dccp {
|
|
+ u_int8_t role[2];
|
|
+ u_int8_t state;
|
|
+ u_int8_t last_pkt;
|
|
+ u_int8_t last_dir;
|
|
+ u_int64_t handshake_seq;
|
|
+};
|
|
+
|
|
+struct ip_ct_sctp {
|
|
+ enum sctp_conntrack state;
|
|
+ __be32 vtag[2];
|
|
+};
|
|
+
|
|
+struct ip_ct_tcp_state {
|
|
+ u_int32_t td_end;
|
|
+ u_int32_t td_maxend;
|
|
+ u_int32_t td_maxwin;
|
|
+ u_int32_t td_maxack;
|
|
+ u_int8_t td_scale;
|
|
+ u_int8_t flags;
|
|
+};
|
|
+
|
|
+struct ip_ct_tcp {
|
|
+ struct ip_ct_tcp_state seen[2];
|
|
+ u_int8_t state;
|
|
+ u_int8_t last_dir;
|
|
+ u_int8_t retrans;
|
|
+ u_int8_t last_index;
|
|
+ u_int32_t last_seq;
|
|
+ u_int32_t last_ack;
|
|
+ u_int32_t last_end;
|
|
+ u_int16_t last_win;
|
|
+ u_int8_t last_wscale;
|
|
+ u_int8_t last_flags;
|
|
+};
|
|
+
|
|
+struct nf_ct_gre {
|
|
+ unsigned int stream_timeout;
|
|
+ unsigned int timeout;
|
|
+};
|
|
+
|
|
+union nf_conntrack_proto {
|
|
+ struct nf_ct_dccp dccp;
|
|
+ struct ip_ct_sctp sctp;
|
|
+ struct ip_ct_tcp tcp;
|
|
+ struct nf_ct_gre gre;
|
|
+ unsigned int tmpl_padto;
|
|
+};
|
|
+
|
|
+struct nf_conn {
|
|
+ struct nf_conntrack ct_general;
|
|
+ spinlock_t lock;
|
|
+ u16 cpu;
|
|
+ struct nf_conntrack_zone zone;
|
|
+ struct nf_conntrack_tuple_hash tuplehash[2];
|
|
+ long unsigned int status;
|
|
+ u32 timeout;
|
|
+ possible_net_t ct_net;
|
|
+ struct hlist_node nat_bysource;
|
|
+ u8 __nfct_init_offset[0];
|
|
+ struct nf_conn *master;
|
|
+ u_int32_t mark;
|
|
+ u_int32_t secmark;
|
|
+ struct nf_ct_ext *ext;
|
|
+ union nf_conntrack_proto proto;
|
|
+};
|
|
+
|
|
+struct nf_ct_hook {
|
|
+ int (*update)(struct net *, struct sk_buff *);
|
|
+ void (*destroy)(struct nf_conntrack *);
|
|
+ bool (*get_tuple_skb)(struct nf_conntrack_tuple *, const struct sk_buff *);
|
|
+};
|
|
+
|
|
+struct nfnl_ct_hook {
|
|
+ struct nf_conn * (*get_ct)(const struct sk_buff *, enum ip_conntrack_info *);
|
|
+ size_t (*build_size)(const struct nf_conn *);
|
|
+ int (*build)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, u_int16_t, u_int16_t);
|
|
+ int (*parse)(const struct nlattr *, struct nf_conn *);
|
|
+ int (*attach_expect)(const struct nlattr *, struct nf_conn *, u32, u32);
|
|
+ void (*seq_adjust)(struct sk_buff *, struct nf_conn *, enum ip_conntrack_info, s32);
|
|
+};
|
|
+
|
|
+enum nf_ip_hook_priorities {
|
|
+ NF_IP_PRI_FIRST = -2147483648,
|
|
+ NF_IP_PRI_RAW_BEFORE_DEFRAG = -450,
|
|
+ NF_IP_PRI_CONNTRACK_DEFRAG = -400,
|
|
+ NF_IP_PRI_RAW = -300,
|
|
+ NF_IP_PRI_SELINUX_FIRST = -225,
|
|
+ NF_IP_PRI_CONNTRACK = -200,
|
|
+ NF_IP_PRI_MANGLE = -150,
|
|
+ NF_IP_PRI_NAT_DST = -100,
|
|
+ NF_IP_PRI_FILTER = 0,
|
|
+ NF_IP_PRI_SECURITY = 50,
|
|
+ NF_IP_PRI_NAT_SRC = 100,
|
|
+ NF_IP_PRI_SELINUX_LAST = 225,
|
|
+ NF_IP_PRI_CONNTRACK_HELPER = 300,
|
|
+ NF_IP_PRI_CONNTRACK_CONFIRM = 2147483647,
|
|
+ NF_IP_PRI_LAST = 2147483647,
|
|
+};
|
|
+
|
|
+enum nf_ip6_hook_priorities {
|
|
+ NF_IP6_PRI_FIRST = -2147483648,
|
|
+ NF_IP6_PRI_RAW_BEFORE_DEFRAG = -450,
|
|
+ NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
|
|
+ NF_IP6_PRI_RAW = -300,
|
|
+ NF_IP6_PRI_SELINUX_FIRST = -225,
|
|
+ NF_IP6_PRI_CONNTRACK = -200,
|
|
+ NF_IP6_PRI_MANGLE = -150,
|
|
+ NF_IP6_PRI_NAT_DST = -100,
|
|
+ NF_IP6_PRI_FILTER = 0,
|
|
+ NF_IP6_PRI_SECURITY = 50,
|
|
+ NF_IP6_PRI_NAT_SRC = 100,
|
|
+ NF_IP6_PRI_SELINUX_LAST = 225,
|
|
+ NF_IP6_PRI_CONNTRACK_HELPER = 300,
|
|
+ NF_IP6_PRI_LAST = 2147483647,
|
|
+};
|
|
+
|
|
+struct nf_ipv6_ops {
|
|
+ int (*chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int);
|
|
+ void (*route_input)(struct sk_buff *);
|
|
+ int (*fragment)(struct net *, struct sock *, struct sk_buff *, int (*)(struct net *, struct sock *, struct sk_buff *));
|
|
+ int (*route)(struct net *, struct dst_entry **, struct flowi *, bool);
|
|
+ int (*reroute)(struct sk_buff *, const struct nf_queue_entry *);
|
|
+};
|
|
+
|
|
+struct nf_queue_entry {
|
|
+ struct list_head list;
|
|
+ struct sk_buff *skb;
|
|
+ unsigned int id;
|
|
+ unsigned int hook_index;
|
|
+ struct nf_hook_state state;
|
|
+ u16 size;
|
|
+};
|
|
+
|
|
+struct tty_file_private {
|
|
+ struct tty_struct *tty;
|
|
+ struct file *file;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct socket_alloc {
|
|
+ struct socket socket;
|
|
+ struct inode vfs_inode;
|
|
+};
|
|
+
|
|
+struct iphdr {
|
|
+ __u8 ihl: 4;
|
|
+ __u8 version: 4;
|
|
+ __u8 tos;
|
|
+ __be16 tot_len;
|
|
+ __be16 id;
|
|
+ __be16 frag_off;
|
|
+ __u8 ttl;
|
|
+ __u8 protocol;
|
|
+ __sum16 check;
|
|
+ __be32 saddr;
|
|
+ __be32 daddr;
|
|
+};
|
|
+
|
|
+struct ipv6hdr {
|
|
+ __u8 priority: 4;
|
|
+ __u8 version: 4;
|
|
+ __u8 flow_lbl[3];
|
|
+ __be16 payload_len;
|
|
+ __u8 nexthdr;
|
|
+ __u8 hop_limit;
|
|
+ struct in6_addr saddr;
|
|
+ struct in6_addr daddr;
|
|
+};
|
|
+
|
|
+struct tcphdr {
|
|
+ __be16 source;
|
|
+ __be16 dest;
|
|
+ __be32 seq;
|
|
+ __be32 ack_seq;
|
|
+ __u16 res1: 4;
|
|
+ __u16 doff: 4;
|
|
+ __u16 fin: 1;
|
|
+ __u16 syn: 1;
|
|
+ __u16 rst: 1;
|
|
+ __u16 psh: 1;
|
|
+ __u16 ack: 1;
|
|
+ __u16 urg: 1;
|
|
+ __u16 ece: 1;
|
|
+ __u16 cwr: 1;
|
|
+ __be16 window;
|
|
+ __sum16 check;
|
|
+ __be16 urg_ptr;
|
|
+};
|
|
+
|
|
+struct udphdr {
|
|
+ __be16 source;
|
|
+ __be16 dest;
|
|
+ __be16 len;
|
|
+ __sum16 check;
|
|
+};
|
|
+
|
|
+struct icmp_err {
|
|
+ int errno;
|
|
+ unsigned int fatal: 1;
|
|
+};
|
|
+
|
|
+struct netlbl_lsm_cache {
|
|
+ refcount_t refcount;
|
|
+ void (*free)(const void *);
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct netlbl_lsm_catmap {
|
|
+ u32 startbit;
|
|
+ u64 bitmap[4];
|
|
+ struct netlbl_lsm_catmap *next;
|
|
+};
|
|
+
|
|
+struct netlbl_lsm_secattr {
|
|
+ u32 flags;
|
|
+ u32 type;
|
|
+ char *domain;
|
|
+ struct netlbl_lsm_cache *cache;
|
|
+ struct {
|
|
+ struct {
|
|
+ struct netlbl_lsm_catmap *cat;
|
|
+ u32 lvl;
|
|
+ } mls;
|
|
+ u32 secid;
|
|
+ } attr;
|
|
+};
|
|
+
|
|
+struct dccp_hdr {
|
|
+ __be16 dccph_sport;
|
|
+ __be16 dccph_dport;
|
|
+ __u8 dccph_doff;
|
|
+ __u8 dccph_cscov: 4;
|
|
+ __u8 dccph_ccval: 4;
|
|
+ __sum16 dccph_checksum;
|
|
+ __u8 dccph_x: 1;
|
|
+ __u8 dccph_type: 4;
|
|
+ __u8 dccph_reserved: 3;
|
|
+ __u8 dccph_seq2;
|
|
+ __be16 dccph_seq;
|
|
+};
|
|
+
|
|
+enum dccp_state {
|
|
+ DCCP_OPEN = 1,
|
|
+ DCCP_REQUESTING = 2,
|
|
+ DCCP_LISTEN = 10,
|
|
+ DCCP_RESPOND = 3,
|
|
+ DCCP_ACTIVE_CLOSEREQ = 4,
|
|
+ DCCP_PASSIVE_CLOSE = 8,
|
|
+ DCCP_CLOSING = 11,
|
|
+ DCCP_TIME_WAIT = 6,
|
|
+ DCCP_CLOSED = 7,
|
|
+ DCCP_NEW_SYN_RECV = 12,
|
|
+ DCCP_PARTOPEN = 13,
|
|
+ DCCP_PASSIVE_CLOSEREQ = 14,
|
|
+ DCCP_MAX_STATES = 15,
|
|
+};
|
|
+
|
|
+typedef __s32 sctp_assoc_t;
|
|
+
|
|
+enum sctp_msg_flags {
|
|
+ MSG_NOTIFICATION = 32768,
|
|
+};
|
|
+
|
|
+struct sctp_initmsg {
|
|
+ __u16 sinit_num_ostreams;
|
|
+ __u16 sinit_max_instreams;
|
|
+ __u16 sinit_max_attempts;
|
|
+ __u16 sinit_max_init_timeo;
|
|
+};
|
|
+
|
|
+struct sctp_sndrcvinfo {
|
|
+ __u16 sinfo_stream;
|
|
+ __u16 sinfo_ssn;
|
|
+ __u16 sinfo_flags;
|
|
+ __u32 sinfo_ppid;
|
|
+ __u32 sinfo_context;
|
|
+ __u32 sinfo_timetolive;
|
|
+ __u32 sinfo_tsn;
|
|
+ __u32 sinfo_cumtsn;
|
|
+ sctp_assoc_t sinfo_assoc_id;
|
|
+};
|
|
+
|
|
+struct sctp_event_subscribe {
|
|
+ __u8 sctp_data_io_event;
|
|
+ __u8 sctp_association_event;
|
|
+ __u8 sctp_address_event;
|
|
+ __u8 sctp_send_failure_event;
|
|
+ __u8 sctp_peer_error_event;
|
|
+ __u8 sctp_shutdown_event;
|
|
+ __u8 sctp_partial_delivery_event;
|
|
+ __u8 sctp_adaptation_layer_event;
|
|
+ __u8 sctp_authentication_event;
|
|
+ __u8 sctp_sender_dry_event;
|
|
+ __u8 sctp_stream_reset_event;
|
|
+ __u8 sctp_assoc_reset_event;
|
|
+ __u8 sctp_stream_change_event;
|
|
+};
|
|
+
|
|
+struct sctp_rtoinfo {
|
|
+ sctp_assoc_t srto_assoc_id;
|
|
+ __u32 srto_initial;
|
|
+ __u32 srto_max;
|
|
+ __u32 srto_min;
|
|
+};
|
|
+
|
|
+struct sctp_assocparams {
|
|
+ sctp_assoc_t sasoc_assoc_id;
|
|
+ __u16 sasoc_asocmaxrxt;
|
|
+ __u16 sasoc_number_peer_destinations;
|
|
+ __u32 sasoc_peer_rwnd;
|
|
+ __u32 sasoc_local_rwnd;
|
|
+ __u32 sasoc_cookie_life;
|
|
+};
|
|
+
|
|
+struct sctp_paddrparams {
|
|
+ sctp_assoc_t spp_assoc_id;
|
|
+ struct __kernel_sockaddr_storage spp_address;
|
|
+ __u32 spp_hbinterval;
|
|
+ __u16 spp_pathmaxrxt;
|
|
+ __u32 spp_pathmtu;
|
|
+ __u32 spp_sackdelay;
|
|
+ __u32 spp_flags;
|
|
+ __u32 spp_ipv6_flowlabel;
|
|
+ __u8 spp_dscp;
|
|
+ char: 8;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct sctphdr {
|
|
+ __be16 source;
|
|
+ __be16 dest;
|
|
+ __be32 vtag;
|
|
+ __le32 checksum;
|
|
+};
|
|
+
|
|
+struct sctp_chunkhdr {
|
|
+ __u8 type;
|
|
+ __u8 flags;
|
|
+ __be16 length;
|
|
+};
|
|
+
|
|
+enum sctp_cid {
|
|
+ SCTP_CID_DATA = 0,
|
|
+ SCTP_CID_INIT = 1,
|
|
+ SCTP_CID_INIT_ACK = 2,
|
|
+ SCTP_CID_SACK = 3,
|
|
+ SCTP_CID_HEARTBEAT = 4,
|
|
+ SCTP_CID_HEARTBEAT_ACK = 5,
|
|
+ SCTP_CID_ABORT = 6,
|
|
+ SCTP_CID_SHUTDOWN = 7,
|
|
+ SCTP_CID_SHUTDOWN_ACK = 8,
|
|
+ SCTP_CID_ERROR = 9,
|
|
+ SCTP_CID_COOKIE_ECHO = 10,
|
|
+ SCTP_CID_COOKIE_ACK = 11,
|
|
+ SCTP_CID_ECN_ECNE = 12,
|
|
+ SCTP_CID_ECN_CWR = 13,
|
|
+ SCTP_CID_SHUTDOWN_COMPLETE = 14,
|
|
+ SCTP_CID_AUTH = 15,
|
|
+ SCTP_CID_I_DATA = 64,
|
|
+ SCTP_CID_FWD_TSN = 192,
|
|
+ SCTP_CID_ASCONF = 193,
|
|
+ SCTP_CID_I_FWD_TSN = 194,
|
|
+ SCTP_CID_ASCONF_ACK = 128,
|
|
+ SCTP_CID_RECONF = 130,
|
|
+};
|
|
+
|
|
+struct sctp_paramhdr {
|
|
+ __be16 type;
|
|
+ __be16 length;
|
|
+};
|
|
+
|
|
+enum sctp_param {
|
|
+ SCTP_PARAM_HEARTBEAT_INFO = 256,
|
|
+ SCTP_PARAM_IPV4_ADDRESS = 1280,
|
|
+ SCTP_PARAM_IPV6_ADDRESS = 1536,
|
|
+ SCTP_PARAM_STATE_COOKIE = 1792,
|
|
+ SCTP_PARAM_UNRECOGNIZED_PARAMETERS = 2048,
|
|
+ SCTP_PARAM_COOKIE_PRESERVATIVE = 2304,
|
|
+ SCTP_PARAM_HOST_NAME_ADDRESS = 2816,
|
|
+ SCTP_PARAM_SUPPORTED_ADDRESS_TYPES = 3072,
|
|
+ SCTP_PARAM_ECN_CAPABLE = 128,
|
|
+ SCTP_PARAM_RANDOM = 640,
|
|
+ SCTP_PARAM_CHUNKS = 896,
|
|
+ SCTP_PARAM_HMAC_ALGO = 1152,
|
|
+ SCTP_PARAM_SUPPORTED_EXT = 2176,
|
|
+ SCTP_PARAM_FWD_TSN_SUPPORT = 192,
|
|
+ SCTP_PARAM_ADD_IP = 448,
|
|
+ SCTP_PARAM_DEL_IP = 704,
|
|
+ SCTP_PARAM_ERR_CAUSE = 960,
|
|
+ SCTP_PARAM_SET_PRIMARY = 1216,
|
|
+ SCTP_PARAM_SUCCESS_REPORT = 1472,
|
|
+ SCTP_PARAM_ADAPTATION_LAYER_IND = 1728,
|
|
+ SCTP_PARAM_RESET_OUT_REQUEST = 3328,
|
|
+ SCTP_PARAM_RESET_IN_REQUEST = 3584,
|
|
+ SCTP_PARAM_RESET_TSN_REQUEST = 3840,
|
|
+ SCTP_PARAM_RESET_RESPONSE = 4096,
|
|
+ SCTP_PARAM_RESET_ADD_OUT_STREAMS = 4352,
|
|
+ SCTP_PARAM_RESET_ADD_IN_STREAMS = 4608,
|
|
+};
|
|
+
|
|
+struct sctp_datahdr {
|
|
+ __be32 tsn;
|
|
+ __be16 stream;
|
|
+ __be16 ssn;
|
|
+ __u32 ppid;
|
|
+ __u8 payload[0];
|
|
+};
|
|
+
|
|
+struct sctp_idatahdr {
|
|
+ __be32 tsn;
|
|
+ __be16 stream;
|
|
+ __be16 reserved;
|
|
+ __be32 mid;
|
|
+ union {
|
|
+ __u32 ppid;
|
|
+ __be32 fsn;
|
|
+ };
|
|
+ __u8 payload[0];
|
|
+};
|
|
+
|
|
+struct sctp_inithdr {
|
|
+ __be32 init_tag;
|
|
+ __be32 a_rwnd;
|
|
+ __be16 num_outbound_streams;
|
|
+ __be16 num_inbound_streams;
|
|
+ __be32 initial_tsn;
|
|
+ __u8 params[0];
|
|
+};
|
|
+
|
|
+struct sctp_init_chunk {
|
|
+ struct sctp_chunkhdr chunk_hdr;
|
|
+ struct sctp_inithdr init_hdr;
|
|
+};
|
|
+
|
|
+struct sctp_ipv4addr_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ struct in_addr addr;
|
|
+};
|
|
+
|
|
+struct sctp_ipv6addr_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ struct in6_addr addr;
|
|
+};
|
|
+
|
|
+struct sctp_cookie_preserve_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ __be32 lifespan_increment;
|
|
+};
|
|
+
|
|
+struct sctp_hostname_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ uint8_t hostname[0];
|
|
+};
|
|
+
|
|
+struct sctp_supported_addrs_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ __be16 types[0];
|
|
+};
|
|
+
|
|
+struct sctp_adaptation_ind_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ __be32 adaptation_ind;
|
|
+};
|
|
+
|
|
+struct sctp_supported_ext_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ __u8 chunks[0];
|
|
+};
|
|
+
|
|
+struct sctp_random_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ __u8 random_val[0];
|
|
+};
|
|
+
|
|
+struct sctp_chunks_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ __u8 chunks[0];
|
|
+};
|
|
+
|
|
+struct sctp_hmac_algo_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ __be16 hmac_ids[0];
|
|
+};
|
|
+
|
|
+struct sctp_cookie_param {
|
|
+ struct sctp_paramhdr p;
|
|
+ __u8 body[0];
|
|
+};
|
|
+
|
|
+struct sctp_gap_ack_block {
|
|
+ __be16 start;
|
|
+ __be16 end;
|
|
+};
|
|
+
|
|
+union sctp_sack_variable {
|
|
+ struct sctp_gap_ack_block gab;
|
|
+ __be32 dup;
|
|
+};
|
|
+
|
|
+struct sctp_sackhdr {
|
|
+ __be32 cum_tsn_ack;
|
|
+ __be32 a_rwnd;
|
|
+ __be16 num_gap_ack_blocks;
|
|
+ __be16 num_dup_tsns;
|
|
+ union sctp_sack_variable variable[0];
|
|
+};
|
|
+
|
|
+struct sctp_heartbeathdr {
|
|
+ struct sctp_paramhdr info;
|
|
+};
|
|
+
|
|
+struct sctp_shutdownhdr {
|
|
+ __be32 cum_tsn_ack;
|
|
+};
|
|
+
|
|
+struct sctp_errhdr {
|
|
+ __be16 cause;
|
|
+ __be16 length;
|
|
+ __u8 variable[0];
|
|
+};
|
|
+
|
|
+struct sctp_ecnehdr {
|
|
+ __be32 lowest_tsn;
|
|
+};
|
|
+
|
|
+struct sctp_cwrhdr {
|
|
+ __be32 lowest_tsn;
|
|
+};
|
|
+
|
|
+struct sctp_fwdtsn_skip {
|
|
+ __be16 stream;
|
|
+ __be16 ssn;
|
|
+};
|
|
+
|
|
+struct sctp_fwdtsn_hdr {
|
|
+ __be32 new_cum_tsn;
|
|
+ struct sctp_fwdtsn_skip skip[0];
|
|
+};
|
|
+
|
|
+struct sctp_ifwdtsn_skip {
|
|
+ __be16 stream;
|
|
+ __u8 reserved;
|
|
+ __u8 flags;
|
|
+ __be32 mid;
|
|
+};
|
|
+
|
|
+struct sctp_ifwdtsn_hdr {
|
|
+ __be32 new_cum_tsn;
|
|
+ struct sctp_ifwdtsn_skip skip[0];
|
|
+};
|
|
+
|
|
+struct sctp_addip_param {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ __be32 crr_id;
|
|
+};
|
|
+
|
|
+struct sctp_addiphdr {
|
|
+ __be32 serial;
|
|
+ __u8 params[0];
|
|
+};
|
|
+
|
|
+struct sctp_authhdr {
|
|
+ __be16 shkey_id;
|
|
+ __be16 hmac_id;
|
|
+ __u8 hmac[0];
|
|
+};
|
|
+
|
|
+union sctp_addr {
|
|
+ struct sockaddr_in v4;
|
|
+ struct sockaddr_in6 v6;
|
|
+ struct sockaddr sa;
|
|
+};
|
|
+
|
|
+struct sctp_cookie {
|
|
+ __u32 my_vtag;
|
|
+ __u32 peer_vtag;
|
|
+ __u32 my_ttag;
|
|
+ __u32 peer_ttag;
|
|
+ ktime_t expiration;
|
|
+ __u16 sinit_num_ostreams;
|
|
+ __u16 sinit_max_instreams;
|
|
+ __u32 initial_tsn;
|
|
+ union sctp_addr peer_addr;
|
|
+ __u16 my_port;
|
|
+ __u8 prsctp_capable;
|
|
+ __u8 padding;
|
|
+ __u32 adaptation_ind;
|
|
+ __u8 auth_random[36];
|
|
+ __u8 auth_hmacs[10];
|
|
+ __u8 auth_chunks[20];
|
|
+ __u32 raw_addr_list_len;
|
|
+ struct sctp_init_chunk peer_init[0];
|
|
+};
|
|
+
|
|
+struct sctp_tsnmap {
|
|
+ long unsigned int *tsn_map;
|
|
+ __u32 base_tsn;
|
|
+ __u32 cumulative_tsn_ack_point;
|
|
+ __u32 max_tsn_seen;
|
|
+ __u16 len;
|
|
+ __u16 pending_data;
|
|
+ __u16 num_dup_tsns;
|
|
+ __be32 dup_tsns[16];
|
|
+};
|
|
+
|
|
+struct sctp_inithdr_host {
|
|
+ __u32 init_tag;
|
|
+ __u32 a_rwnd;
|
|
+ __u16 num_outbound_streams;
|
|
+ __u16 num_inbound_streams;
|
|
+ __u32 initial_tsn;
|
|
+};
|
|
+
|
|
+enum sctp_state {
|
|
+ SCTP_STATE_CLOSED = 0,
|
|
+ SCTP_STATE_COOKIE_WAIT = 1,
|
|
+ SCTP_STATE_COOKIE_ECHOED = 2,
|
|
+ SCTP_STATE_ESTABLISHED = 3,
|
|
+ SCTP_STATE_SHUTDOWN_PENDING = 4,
|
|
+ SCTP_STATE_SHUTDOWN_SENT = 5,
|
|
+ SCTP_STATE_SHUTDOWN_RECEIVED = 6,
|
|
+ SCTP_STATE_SHUTDOWN_ACK_SENT = 7,
|
|
+};
|
|
+
|
|
+struct sctp_stream_out;
|
|
+
|
|
+struct sctp_stream_out_ext;
|
|
+
|
|
+struct sctp_stream_interleave;
|
|
+
|
|
+struct sctp_stream {
|
|
+ struct flex_array *out;
|
|
+ struct flex_array *in;
|
|
+ __u16 outcnt;
|
|
+ __u16 incnt;
|
|
+ struct sctp_stream_out *out_curr;
|
|
+ union {
|
|
+ struct {
|
|
+ struct list_head prio_list;
|
|
+ };
|
|
+ struct {
|
|
+ struct list_head rr_list;
|
|
+ struct sctp_stream_out_ext *rr_next;
|
|
+ };
|
|
+ };
|
|
+ struct sctp_stream_interleave *si;
|
|
+};
|
|
+
|
|
+struct sctp_sched_ops;
|
|
+
|
|
+struct sctp_association;
|
|
+
|
|
+struct sctp_outq {
|
|
+ struct sctp_association *asoc;
|
|
+ struct list_head out_chunk_list;
|
|
+ struct sctp_sched_ops *sched;
|
|
+ unsigned int out_qlen;
|
|
+ unsigned int error;
|
|
+ struct list_head control_chunk_list;
|
|
+ struct list_head sacked;
|
|
+ struct list_head retransmit;
|
|
+ struct list_head abandoned;
|
|
+ __u32 outstanding_bytes;
|
|
+ char fast_rtx;
|
|
+ char cork;
|
|
+};
|
|
+
|
|
+struct sctp_ulpq {
|
|
+ char pd_mode;
|
|
+ struct sctp_association *asoc;
|
|
+ struct sk_buff_head reasm;
|
|
+ struct sk_buff_head reasm_uo;
|
|
+ struct sk_buff_head lobby;
|
|
+};
|
|
+
|
|
+struct sctp_priv_assoc_stats {
|
|
+ struct __kernel_sockaddr_storage obs_rto_ipaddr;
|
|
+ __u64 max_obs_rto;
|
|
+ __u64 isacks;
|
|
+ __u64 osacks;
|
|
+ __u64 opackets;
|
|
+ __u64 ipackets;
|
|
+ __u64 rtxchunks;
|
|
+ __u64 outofseqtsns;
|
|
+ __u64 idupchunks;
|
|
+ __u64 gapcnt;
|
|
+ __u64 ouodchunks;
|
|
+ __u64 iuodchunks;
|
|
+ __u64 oodchunks;
|
|
+ __u64 iodchunks;
|
|
+ __u64 octrlchunks;
|
|
+ __u64 ictrlchunks;
|
|
+};
|
|
+
|
|
+struct sctp_transport;
|
|
+
|
|
+struct sctp_auth_bytes;
|
|
+
|
|
+struct sctp_shared_key;
|
|
+
|
|
+struct sctp_association {
|
|
+ struct sctp_ep_common base;
|
|
+ struct list_head asocs;
|
|
+ sctp_assoc_t assoc_id;
|
|
+ struct sctp_endpoint *ep;
|
|
+ struct sctp_cookie c;
|
|
+ struct {
|
|
+ struct list_head transport_addr_list;
|
|
+ __u32 rwnd;
|
|
+ __u16 transport_count;
|
|
+ __u16 port;
|
|
+ struct sctp_transport *primary_path;
|
|
+ union sctp_addr primary_addr;
|
|
+ struct sctp_transport *active_path;
|
|
+ struct sctp_transport *retran_path;
|
|
+ struct sctp_transport *last_sent_to;
|
|
+ struct sctp_transport *last_data_from;
|
|
+ struct sctp_tsnmap tsn_map;
|
|
+ __be16 addip_disabled_mask;
|
|
+ __u8 ecn_capable: 1;
|
|
+ __u8 ipv4_address: 1;
|
|
+ __u8 ipv6_address: 1;
|
|
+ __u8 hostname_address: 1;
|
|
+ __u8 asconf_capable: 1;
|
|
+ __u8 prsctp_capable: 1;
|
|
+ __u8 reconf_capable: 1;
|
|
+ __u8 auth_capable: 1;
|
|
+ __u8 sack_needed: 1;
|
|
+ __u8 sack_generation: 1;
|
|
+ __u8 zero_window_announced: 1;
|
|
+ __u32 sack_cnt;
|
|
+ __u32 adaptation_ind;
|
|
+ struct sctp_inithdr_host i;
|
|
+ void *cookie;
|
|
+ int cookie_len;
|
|
+ __u32 addip_serial;
|
|
+ struct sctp_random_param *peer_random;
|
|
+ struct sctp_chunks_param *peer_chunks;
|
|
+ struct sctp_hmac_algo_param *peer_hmacs;
|
|
+ } peer;
|
|
+ enum sctp_state state;
|
|
+ int overall_error_count;
|
|
+ ktime_t cookie_life;
|
|
+ long unsigned int rto_initial;
|
|
+ long unsigned int rto_max;
|
|
+ long unsigned int rto_min;
|
|
+ int max_burst;
|
|
+ int max_retrans;
|
|
+ int pf_retrans;
|
|
+ __u16 max_init_attempts;
|
|
+ __u16 init_retries;
|
|
+ long unsigned int max_init_timeo;
|
|
+ long unsigned int hbinterval;
|
|
+ __u16 pathmaxrxt;
|
|
+ __u32 flowlabel;
|
|
+ __u8 dscp;
|
|
+ __u8 pmtu_pending;
|
|
+ __u32 pathmtu;
|
|
+ __u32 param_flags;
|
|
+ __u32 sackfreq;
|
|
+ long unsigned int sackdelay;
|
|
+ long unsigned int timeouts[11];
|
|
+ struct timer_list timers[11];
|
|
+ struct sctp_transport *shutdown_last_sent_to;
|
|
+ struct sctp_transport *init_last_sent_to;
|
|
+ int shutdown_retries;
|
|
+ __u32 next_tsn;
|
|
+ __u32 ctsn_ack_point;
|
|
+ __u32 adv_peer_ack_point;
|
|
+ __u32 highest_sacked;
|
|
+ __u32 fast_recovery_exit;
|
|
+ __u8 fast_recovery;
|
|
+ __u16 unack_data;
|
|
+ __u32 rtx_data_chunks;
|
|
+ __u32 rwnd;
|
|
+ __u32 a_rwnd;
|
|
+ __u32 rwnd_over;
|
|
+ __u32 rwnd_press;
|
|
+ int sndbuf_used;
|
|
+ atomic_t rmem_alloc;
|
|
+ wait_queue_head_t wait;
|
|
+ __u32 frag_point;
|
|
+ __u32 user_frag;
|
|
+ int init_err_counter;
|
|
+ int init_cycle;
|
|
+ __u16 default_stream;
|
|
+ __u16 default_flags;
|
|
+ __u32 default_ppid;
|
|
+ __u32 default_context;
|
|
+ __u32 default_timetolive;
|
|
+ __u32 default_rcv_context;
|
|
+ struct sctp_stream stream;
|
|
+ struct sctp_outq outqueue;
|
|
+ struct sctp_ulpq ulpq;
|
|
+ __u32 last_ecne_tsn;
|
|
+ __u32 last_cwr_tsn;
|
|
+ int numduptsns;
|
|
+ struct sctp_chunk *addip_last_asconf;
|
|
+ struct list_head asconf_ack_list;
|
|
+ struct list_head addip_chunk_list;
|
|
+ __u32 addip_serial;
|
|
+ int src_out_of_asoc_ok;
|
|
+ union sctp_addr *asconf_addr_del_pending;
|
|
+ struct sctp_transport *new_transport;
|
|
+ struct list_head endpoint_shared_keys;
|
|
+ struct sctp_auth_bytes *asoc_shared_key;
|
|
+ struct sctp_shared_key *shkey;
|
|
+ __u16 default_hmac_id;
|
|
+ __u16 active_key_id;
|
|
+ __u8 need_ecne: 1;
|
|
+ __u8 temp: 1;
|
|
+ __u8 force_delay: 1;
|
|
+ __u8 intl_enable: 1;
|
|
+ __u8 prsctp_enable: 1;
|
|
+ __u8 reconf_enable: 1;
|
|
+ __u8 strreset_enable;
|
|
+ __u8 strreset_outstanding;
|
|
+ __u32 strreset_outseq;
|
|
+ __u32 strreset_inseq;
|
|
+ __u32 strreset_result[2];
|
|
+ struct sctp_chunk *strreset_chunk;
|
|
+ struct sctp_priv_assoc_stats stats;
|
|
+ int sent_cnt_removable;
|
|
+ __u64 abandoned_unsent[3];
|
|
+ __u64 abandoned_sent[3];
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct sctp_auth_bytes {
|
|
+ refcount_t refcnt;
|
|
+ __u32 len;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct sctp_shared_key {
|
|
+ struct list_head key_list;
|
|
+ struct sctp_auth_bytes *key;
|
|
+ refcount_t refcnt;
|
|
+ __u16 key_id;
|
|
+ __u8 deactivated;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SCTP_MAX_STREAM = 65535,
|
|
+};
|
|
+
|
|
+enum sctp_event_timeout {
|
|
+ SCTP_EVENT_TIMEOUT_NONE = 0,
|
|
+ SCTP_EVENT_TIMEOUT_T1_COOKIE = 1,
|
|
+ SCTP_EVENT_TIMEOUT_T1_INIT = 2,
|
|
+ SCTP_EVENT_TIMEOUT_T2_SHUTDOWN = 3,
|
|
+ SCTP_EVENT_TIMEOUT_T3_RTX = 4,
|
|
+ SCTP_EVENT_TIMEOUT_T4_RTO = 5,
|
|
+ SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD = 6,
|
|
+ SCTP_EVENT_TIMEOUT_HEARTBEAT = 7,
|
|
+ SCTP_EVENT_TIMEOUT_RECONF = 8,
|
|
+ SCTP_EVENT_TIMEOUT_SACK = 9,
|
|
+ SCTP_EVENT_TIMEOUT_AUTOCLOSE = 10,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SCTP_MAX_DUP_TSNS = 16,
|
|
+};
|
|
+
|
|
+enum sctp_scope {
|
|
+ SCTP_SCOPE_GLOBAL = 0,
|
|
+ SCTP_SCOPE_PRIVATE = 1,
|
|
+ SCTP_SCOPE_LINK = 2,
|
|
+ SCTP_SCOPE_LOOPBACK = 3,
|
|
+ SCTP_SCOPE_UNUSABLE = 4,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SCTP_AUTH_HMAC_ID_RESERVED_0 = 0,
|
|
+ SCTP_AUTH_HMAC_ID_SHA1 = 1,
|
|
+ SCTP_AUTH_HMAC_ID_RESERVED_2 = 2,
|
|
+ SCTP_AUTH_HMAC_ID_SHA256 = 3,
|
|
+ __SCTP_AUTH_HMAC_MAX = 4,
|
|
+};
|
|
+
|
|
+struct sctp_ulpevent {
|
|
+ struct sctp_association *asoc;
|
|
+ struct sctp_chunk *chunk;
|
|
+ unsigned int rmem_len;
|
|
+ union {
|
|
+ __u32 mid;
|
|
+ __u16 ssn;
|
|
+ };
|
|
+ union {
|
|
+ __u32 ppid;
|
|
+ __u32 fsn;
|
|
+ };
|
|
+ __u32 tsn;
|
|
+ __u32 cumtsn;
|
|
+ __u16 stream;
|
|
+ __u16 flags;
|
|
+ __u16 msg_flags;
|
|
+} __attribute__((packed));
|
|
+
|
|
+union sctp_addr_param;
|
|
+
|
|
+union sctp_params {
|
|
+ void *v;
|
|
+ struct sctp_paramhdr *p;
|
|
+ struct sctp_cookie_preserve_param *life;
|
|
+ struct sctp_hostname_param *dns;
|
|
+ struct sctp_cookie_param *cookie;
|
|
+ struct sctp_supported_addrs_param *sat;
|
|
+ struct sctp_ipv4addr_param *v4;
|
|
+ struct sctp_ipv6addr_param *v6;
|
|
+ union sctp_addr_param *addr;
|
|
+ struct sctp_adaptation_ind_param *aind;
|
|
+ struct sctp_supported_ext_param *ext;
|
|
+ struct sctp_random_param *random;
|
|
+ struct sctp_chunks_param *chunks;
|
|
+ struct sctp_hmac_algo_param *hmac_algo;
|
|
+ struct sctp_addip_param *addip;
|
|
+};
|
|
+
|
|
+struct sctp_sender_hb_info;
|
|
+
|
|
+struct sctp_signed_cookie;
|
|
+
|
|
+struct sctp_datamsg;
|
|
+
|
|
+struct sctp_chunk {
|
|
+ struct list_head list;
|
|
+ refcount_t refcnt;
|
|
+ int sent_count;
|
|
+ union {
|
|
+ struct list_head transmitted_list;
|
|
+ struct list_head stream_list;
|
|
+ };
|
|
+ struct list_head frag_list;
|
|
+ struct sk_buff *skb;
|
|
+ union {
|
|
+ struct sk_buff *head_skb;
|
|
+ struct sctp_shared_key *shkey;
|
|
+ };
|
|
+ union sctp_params param_hdr;
|
|
+ union {
|
|
+ __u8 *v;
|
|
+ struct sctp_datahdr *data_hdr;
|
|
+ struct sctp_inithdr *init_hdr;
|
|
+ struct sctp_sackhdr *sack_hdr;
|
|
+ struct sctp_heartbeathdr *hb_hdr;
|
|
+ struct sctp_sender_hb_info *hbs_hdr;
|
|
+ struct sctp_shutdownhdr *shutdown_hdr;
|
|
+ struct sctp_signed_cookie *cookie_hdr;
|
|
+ struct sctp_ecnehdr *ecne_hdr;
|
|
+ struct sctp_cwrhdr *ecn_cwr_hdr;
|
|
+ struct sctp_errhdr *err_hdr;
|
|
+ struct sctp_addiphdr *addip_hdr;
|
|
+ struct sctp_fwdtsn_hdr *fwdtsn_hdr;
|
|
+ struct sctp_authhdr *auth_hdr;
|
|
+ struct sctp_idatahdr *idata_hdr;
|
|
+ struct sctp_ifwdtsn_hdr *ifwdtsn_hdr;
|
|
+ } subh;
|
|
+ __u8 *chunk_end;
|
|
+ struct sctp_chunkhdr *chunk_hdr;
|
|
+ struct sctphdr *sctp_hdr;
|
|
+ struct sctp_sndrcvinfo sinfo;
|
|
+ struct sctp_association *asoc;
|
|
+ struct sctp_ep_common *rcvr;
|
|
+ long unsigned int sent_at;
|
|
+ union sctp_addr source;
|
|
+ union sctp_addr dest;
|
|
+ struct sctp_datamsg *msg;
|
|
+ struct sctp_transport *transport;
|
|
+ struct sk_buff *auth_chunk;
|
|
+ __u16 rtt_in_progress: 1;
|
|
+ __u16 has_tsn: 1;
|
|
+ __u16 has_ssn: 1;
|
|
+ __u16 singleton: 1;
|
|
+ __u16 end_of_packet: 1;
|
|
+ __u16 ecn_ce_done: 1;
|
|
+ __u16 pdiscard: 1;
|
|
+ __u16 tsn_gap_acked: 1;
|
|
+ __u16 data_accepted: 1;
|
|
+ __u16 auth: 1;
|
|
+ __u16 has_asconf: 1;
|
|
+ __u16 tsn_missing_report: 2;
|
|
+ __u16 fast_retransmit: 2;
|
|
+};
|
|
+
|
|
+struct sctp_stream_interleave {
|
|
+ __u16 data_chunk_len;
|
|
+ __u16 ftsn_chunk_len;
|
|
+ struct sctp_chunk * (*make_datafrag)(const struct sctp_association *, const struct sctp_sndrcvinfo *, int, __u8, gfp_t);
|
|
+ void (*assign_number)(struct sctp_chunk *);
|
|
+ bool (*validate_data)(struct sctp_chunk *);
|
|
+ int (*ulpevent_data)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
|
+ int (*enqueue_event)(struct sctp_ulpq *, struct sctp_ulpevent *);
|
|
+ void (*renege_events)(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
|
+ void (*start_pd)(struct sctp_ulpq *, gfp_t);
|
|
+ void (*abort_pd)(struct sctp_ulpq *, gfp_t);
|
|
+ void (*generate_ftsn)(struct sctp_outq *, __u32);
|
|
+ bool (*validate_ftsn)(struct sctp_chunk *);
|
|
+ void (*report_ftsn)(struct sctp_ulpq *, __u32);
|
|
+ void (*handle_ftsn)(struct sctp_ulpq *, struct sctp_chunk *);
|
|
+};
|
|
+
|
|
+struct sctp_bind_bucket {
|
|
+ short unsigned int port;
|
|
+ short unsigned int fastreuse;
|
|
+ struct hlist_node node;
|
|
+ struct hlist_head owner;
|
|
+ struct net *net;
|
|
+};
|
|
+
|
|
+struct sctp_bind_hashbucket {
|
|
+ spinlock_t lock;
|
|
+ struct hlist_head chain;
|
|
+};
|
|
+
|
|
+struct sctp_hashbucket {
|
|
+ rwlock_t lock;
|
|
+ struct hlist_head chain;
|
|
+};
|
|
+
|
|
+struct sctp_globals {
|
|
+ struct list_head address_families;
|
|
+ struct sctp_hashbucket *ep_hashtable;
|
|
+ struct sctp_bind_hashbucket *port_hashtable;
|
|
+ struct rhltable transport_hashtable;
|
|
+ int ep_hashsize;
|
|
+ int port_hashsize;
|
|
+ __u16 max_instreams;
|
|
+ __u16 max_outstreams;
|
|
+ bool checksum_disable;
|
|
+};
|
|
+
|
|
+enum sctp_socket_type {
|
|
+ SCTP_SOCKET_UDP = 0,
|
|
+ SCTP_SOCKET_UDP_HIGH_BANDWIDTH = 1,
|
|
+ SCTP_SOCKET_TCP = 2,
|
|
+};
|
|
+
|
|
+struct sctp_pf;
|
|
+
|
|
+struct sctp_sock {
|
|
+ struct inet_sock inet;
|
|
+ enum sctp_socket_type type;
|
|
+ struct sctp_pf *pf;
|
|
+ struct crypto_shash___2 *hmac;
|
|
+ char *sctp_hmac_alg;
|
|
+ struct sctp_endpoint *ep;
|
|
+ struct sctp_bind_bucket *bind_hash;
|
|
+ __u16 default_stream;
|
|
+ __u32 default_ppid;
|
|
+ __u16 default_flags;
|
|
+ __u32 default_context;
|
|
+ __u32 default_timetolive;
|
|
+ __u32 default_rcv_context;
|
|
+ int max_burst;
|
|
+ __u32 hbinterval;
|
|
+ __u16 pathmaxrxt;
|
|
+ __u32 flowlabel;
|
|
+ __u8 dscp;
|
|
+ __u32 pathmtu;
|
|
+ __u32 sackdelay;
|
|
+ __u32 sackfreq;
|
|
+ __u32 param_flags;
|
|
+ struct sctp_rtoinfo rtoinfo;
|
|
+ struct sctp_paddrparams paddrparam;
|
|
+ struct sctp_assocparams assocparams;
|
|
+ struct sctp_event_subscribe subscribe;
|
|
+ struct sctp_initmsg initmsg;
|
|
+ int user_frag;
|
|
+ __u32 autoclose;
|
|
+ __u32 adaptation_ind;
|
|
+ __u32 pd_point;
|
|
+ __u16 nodelay: 1;
|
|
+ __u16 reuse: 1;
|
|
+ __u16 disable_fragments: 1;
|
|
+ __u16 v4mapped: 1;
|
|
+ __u16 frag_interleave: 1;
|
|
+ __u16 strm_interleave: 1;
|
|
+ __u16 recvrcvinfo: 1;
|
|
+ __u16 recvnxtinfo: 1;
|
|
+ __u16 data_ready_signalled: 1;
|
|
+ atomic_t pd_mode;
|
|
+ struct sk_buff_head pd_lobby;
|
|
+ struct list_head auto_asconf_list;
|
|
+ int do_auto_asconf;
|
|
+};
|
|
+
|
|
+struct sctp_af;
|
|
+
|
|
+struct sctp_pf {
|
|
+ void (*event_msgname)(struct sctp_ulpevent *, char *, int *);
|
|
+ void (*skb_msgname)(struct sk_buff *, char *, int *);
|
|
+ int (*af_supported)(sa_family_t, struct sctp_sock *);
|
|
+ int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *, struct sctp_sock *);
|
|
+ int (*bind_verify)(struct sctp_sock *, union sctp_addr *);
|
|
+ int (*send_verify)(struct sctp_sock *, union sctp_addr *);
|
|
+ int (*supported_addrs)(const struct sctp_sock *, __be16 *);
|
|
+ struct sock * (*create_accept_sk)(struct sock *, struct sctp_association *, bool);
|
|
+ int (*addr_to_user)(struct sctp_sock *, union sctp_addr *);
|
|
+ void (*to_sk_saddr)(union sctp_addr *, struct sock *);
|
|
+ void (*to_sk_daddr)(union sctp_addr *, struct sock *);
|
|
+ void (*copy_ip_options)(struct sock *, struct sock *);
|
|
+ struct sctp_af *af;
|
|
+};
|
|
+
|
|
+struct sctp_signed_cookie {
|
|
+ __u8 signature[32];
|
|
+ __u32 __pad;
|
|
+ struct sctp_cookie c;
|
|
+} __attribute__((packed));
|
|
+
|
|
+union sctp_addr_param {
|
|
+ struct sctp_paramhdr p;
|
|
+ struct sctp_ipv4addr_param v4;
|
|
+ struct sctp_ipv6addr_param v6;
|
|
+};
|
|
+
|
|
+struct sctp_sender_hb_info {
|
|
+ struct sctp_paramhdr param_hdr;
|
|
+ union sctp_addr daddr;
|
|
+ long unsigned int sent_at;
|
|
+ __u64 hb_nonce;
|
|
+};
|
|
+
|
|
+struct sctp_af {
|
|
+ int (*sctp_xmit)(struct sk_buff *, struct sctp_transport *);
|
|
+ int (*setsockopt)(struct sock *, int, int, char *, unsigned int);
|
|
+ int (*getsockopt)(struct sock *, int, int, char *, int *);
|
|
+ int (*compat_setsockopt)(struct sock *, int, int, char *, unsigned int);
|
|
+ int (*compat_getsockopt)(struct sock *, int, int, char *, int *);
|
|
+ void (*get_dst)(struct sctp_transport *, union sctp_addr *, struct flowi *, struct sock *);
|
|
+ void (*get_saddr)(struct sctp_sock *, struct sctp_transport *, struct flowi *);
|
|
+ void (*copy_addrlist)(struct list_head *, struct net_device *);
|
|
+ int (*cmp_addr)(const union sctp_addr *, const union sctp_addr *);
|
|
+ void (*addr_copy)(union sctp_addr *, union sctp_addr *);
|
|
+ void (*from_skb)(union sctp_addr *, struct sk_buff *, int);
|
|
+ void (*from_sk)(union sctp_addr *, struct sock *);
|
|
+ bool (*from_addr_param)(union sctp_addr *, union sctp_addr_param *, __be16, int);
|
|
+ int (*to_addr_param)(const union sctp_addr *, union sctp_addr_param *);
|
|
+ int (*addr_valid)(union sctp_addr *, struct sctp_sock *, const struct sk_buff *);
|
|
+ enum sctp_scope (*scope)(union sctp_addr *);
|
|
+ void (*inaddr_any)(union sctp_addr *, __be16);
|
|
+ int (*is_any)(const union sctp_addr *);
|
|
+ int (*available)(union sctp_addr *, struct sctp_sock *);
|
|
+ int (*skb_iif)(const struct sk_buff *);
|
|
+ int (*is_ce)(const struct sk_buff *);
|
|
+ void (*seq_dump_addr)(struct seq_file *, union sctp_addr *);
|
|
+ void (*ecn_capable)(struct sock *);
|
|
+ __u16 net_header_len;
|
|
+ int sockaddr_len;
|
|
+ int (*ip_options_len)(struct sock *);
|
|
+ sa_family_t sa_family;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct sctp_packet {
|
|
+ __u16 source_port;
|
|
+ __u16 destination_port;
|
|
+ __u32 vtag;
|
|
+ struct list_head chunk_list;
|
|
+ size_t overhead;
|
|
+ size_t size;
|
|
+ size_t max_size;
|
|
+ struct sctp_transport *transport;
|
|
+ struct sctp_chunk *auth;
|
|
+ u8 has_cookie_echo: 1;
|
|
+ u8 has_sack: 1;
|
|
+ u8 has_auth: 1;
|
|
+ u8 has_data: 1;
|
|
+ u8 ipfragok: 1;
|
|
+};
|
|
+
|
|
+struct sctp_transport {
|
|
+ struct list_head transports;
|
|
+ struct rhlist_head node;
|
|
+ refcount_t refcnt;
|
|
+ __u32 rto_pending: 1;
|
|
+ __u32 hb_sent: 1;
|
|
+ __u32 pmtu_pending: 1;
|
|
+ __u32 dst_pending_confirm: 1;
|
|
+ __u32 sack_generation: 1;
|
|
+ u32 dst_cookie;
|
|
+ struct flowi fl;
|
|
+ union sctp_addr ipaddr;
|
|
+ struct sctp_af *af_specific;
|
|
+ struct sctp_association *asoc;
|
|
+ long unsigned int rto;
|
|
+ __u32 rtt;
|
|
+ __u32 rttvar;
|
|
+ __u32 srtt;
|
|
+ __u32 cwnd;
|
|
+ __u32 ssthresh;
|
|
+ __u32 partial_bytes_acked;
|
|
+ __u32 flight_size;
|
|
+ __u32 burst_limited;
|
|
+ struct dst_entry *dst;
|
|
+ union sctp_addr saddr;
|
|
+ long unsigned int hbinterval;
|
|
+ long unsigned int sackdelay;
|
|
+ __u32 sackfreq;
|
|
+ atomic_t mtu_info;
|
|
+ ktime_t last_time_heard;
|
|
+ long unsigned int last_time_sent;
|
|
+ long unsigned int last_time_ecne_reduced;
|
|
+ __u16 pathmaxrxt;
|
|
+ __u32 flowlabel;
|
|
+ __u8 dscp;
|
|
+ int pf_retrans;
|
|
+ __u32 pathmtu;
|
|
+ __u32 param_flags;
|
|
+ int init_sent_count;
|
|
+ int state;
|
|
+ short unsigned int error_count;
|
|
+ struct timer_list T3_rtx_timer;
|
|
+ struct timer_list hb_timer;
|
|
+ struct timer_list proto_unreach_timer;
|
|
+ struct timer_list reconf_timer;
|
|
+ struct list_head transmitted;
|
|
+ struct sctp_packet packet;
|
|
+ struct list_head send_ready;
|
|
+ struct {
|
|
+ __u32 next_tsn_at_change;
|
|
+ char changeover_active;
|
|
+ char cycling_changeover;
|
|
+ char cacc_saw_newack;
|
|
+ } cacc;
|
|
+ __u64 hb_nonce;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct sctp_datamsg {
|
|
+ struct list_head chunks;
|
|
+ refcount_t refcnt;
|
|
+ long unsigned int expires_at;
|
|
+ int send_error;
|
|
+ u8 send_failed: 1;
|
|
+ u8 can_delay: 1;
|
|
+ u8 abandoned: 1;
|
|
+};
|
|
+
|
|
+struct sctp_stream_priorities {
|
|
+ struct list_head prio_sched;
|
|
+ struct list_head active;
|
|
+ struct sctp_stream_out_ext *next;
|
|
+ __u16 prio;
|
|
+};
|
|
+
|
|
+struct sctp_stream_out_ext {
|
|
+ __u64 abandoned_unsent[3];
|
|
+ __u64 abandoned_sent[3];
|
|
+ struct list_head outq;
|
|
+ union {
|
|
+ struct {
|
|
+ struct list_head prio_list;
|
|
+ struct sctp_stream_priorities *prio_head;
|
|
+ };
|
|
+ struct {
|
|
+ struct list_head rr_list;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct sctp_stream_out {
|
|
+ union {
|
|
+ __u32 mid;
|
|
+ __u16 ssn;
|
|
+ };
|
|
+ __u32 mid_uo;
|
|
+ struct sctp_stream_out_ext *ext;
|
|
+ __u8 state;
|
|
+};
|
|
+
|
|
+struct task_security_struct {
|
|
+ u32 osid;
|
|
+ u32 sid;
|
|
+ u32 exec_sid;
|
|
+ u32 create_sid;
|
|
+ u32 keycreate_sid;
|
|
+ u32 sockcreate_sid;
|
|
+};
|
|
+
|
|
+enum label_initialized {
|
|
+ LABEL_INVALID = 0,
|
|
+ LABEL_INITIALIZED = 1,
|
|
+ LABEL_PENDING = 2,
|
|
+};
|
|
+
|
|
+struct inode_security_struct {
|
|
+ struct inode *inode;
|
|
+ union {
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ u32 task_sid;
|
|
+ u32 sid;
|
|
+ u16 sclass;
|
|
+ unsigned char initialized;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct file_security_struct {
|
|
+ u32 sid;
|
|
+ u32 fown_sid;
|
|
+ u32 isid;
|
|
+ u32 pseqno;
|
|
+};
|
|
+
|
|
+struct superblock_security_struct {
|
|
+ struct super_block *sb;
|
|
+ u32 sid;
|
|
+ u32 def_sid;
|
|
+ u32 mntpoint_sid;
|
|
+ short unsigned int behavior;
|
|
+ short unsigned int flags;
|
|
+ struct mutex lock;
|
|
+ struct list_head isec_head;
|
|
+ spinlock_t isec_lock;
|
|
+};
|
|
+
|
|
+struct msg_security_struct {
|
|
+ u32 sid;
|
|
+};
|
|
+
|
|
+struct ipc_security_struct {
|
|
+ u16 sclass;
|
|
+ u32 sid;
|
|
+};
|
|
+
|
|
+struct sk_security_struct {
|
|
+ enum {
|
|
+ NLBL_UNSET = 0,
|
|
+ NLBL_REQUIRE = 1,
|
|
+ NLBL_LABELED = 2,
|
|
+ NLBL_REQSKB = 3,
|
|
+ NLBL_CONNLABELED = 4,
|
|
+ } nlbl_state;
|
|
+ struct netlbl_lsm_secattr *nlbl_secattr;
|
|
+ u32 sid;
|
|
+ u32 peer_sid;
|
|
+ u16 sclass;
|
|
+ enum {
|
|
+ SCTP_ASSOC_UNSET = 0,
|
|
+ SCTP_ASSOC_SET = 1,
|
|
+ } sctp_assoc_state;
|
|
+};
|
|
+
|
|
+struct tun_security_struct {
|
|
+ u32 sid;
|
|
+};
|
|
+
|
|
+struct key_security_struct {
|
|
+ u32 sid;
|
|
+};
|
|
+
|
|
+struct ib_security_struct {
|
|
+ u32 sid;
|
|
+};
|
|
+
|
|
+struct bpf_security_struct {
|
|
+ u32 sid;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_error___2 = -1,
|
|
+ Opt_context = 1,
|
|
+ Opt_fscontext = 2,
|
|
+ Opt_defcontext = 3,
|
|
+ Opt_rootcontext = 4,
|
|
+ Opt_labelsupport = 5,
|
|
+ Opt_nextmntopt = 6,
|
|
+};
|
|
+
|
|
+enum sel_inos {
|
|
+ SEL_ROOT_INO = 2,
|
|
+ SEL_LOAD = 3,
|
|
+ SEL_ENFORCE = 4,
|
|
+ SEL_CONTEXT = 5,
|
|
+ SEL_ACCESS = 6,
|
|
+ SEL_CREATE = 7,
|
|
+ SEL_RELABEL = 8,
|
|
+ SEL_USER = 9,
|
|
+ SEL_POLICYVERS = 10,
|
|
+ SEL_COMMIT_BOOLS = 11,
|
|
+ SEL_MLS = 12,
|
|
+ SEL_DISABLE = 13,
|
|
+ SEL_MEMBER = 14,
|
|
+ SEL_CHECKREQPROT = 15,
|
|
+ SEL_COMPAT_NET = 16,
|
|
+ SEL_REJECT_UNKNOWN = 17,
|
|
+ SEL_DENY_UNKNOWN = 18,
|
|
+ SEL_STATUS = 19,
|
|
+ SEL_POLICY = 20,
|
|
+ SEL_VALIDATE_TRANS = 21,
|
|
+ SEL_INO_NEXT = 22,
|
|
+};
|
|
+
|
|
+struct selinux_fs_info {
|
|
+ struct dentry *bool_dir;
|
|
+ unsigned int bool_num;
|
|
+ char **bool_pending_names;
|
|
+ unsigned int *bool_pending_values;
|
|
+ struct dentry *class_dir;
|
|
+ long unsigned int last_class_ino;
|
|
+ bool policy_opened;
|
|
+ struct dentry *policycap_dir;
|
|
+ struct mutex mutex;
|
|
+ long unsigned int last_ino;
|
|
+ struct selinux_state *state;
|
|
+ struct super_block *sb;
|
|
+};
|
|
+
|
|
+struct policy_load_memory {
|
|
+ size_t len;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SELNL_MSG_SETENFORCE = 16,
|
|
+ SELNL_MSG_POLICYLOAD = 17,
|
|
+ SELNL_MSG_MAX = 18,
|
|
+};
|
|
+
|
|
+enum selinux_nlgroups {
|
|
+ SELNLGRP_NONE = 0,
|
|
+ SELNLGRP_AVC = 1,
|
|
+ __SELNLGRP_MAX = 2,
|
|
+};
|
|
+
|
|
+struct selnl_msg_setenforce {
|
|
+ __s32 val;
|
|
+};
|
|
+
|
|
+struct selnl_msg_policyload {
|
|
+ __u32 seqno;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ XFRM_MSG_BASE = 16,
|
|
+ XFRM_MSG_NEWSA = 16,
|
|
+ XFRM_MSG_DELSA = 17,
|
|
+ XFRM_MSG_GETSA = 18,
|
|
+ XFRM_MSG_NEWPOLICY = 19,
|
|
+ XFRM_MSG_DELPOLICY = 20,
|
|
+ XFRM_MSG_GETPOLICY = 21,
|
|
+ XFRM_MSG_ALLOCSPI = 22,
|
|
+ XFRM_MSG_ACQUIRE = 23,
|
|
+ XFRM_MSG_EXPIRE = 24,
|
|
+ XFRM_MSG_UPDPOLICY = 25,
|
|
+ XFRM_MSG_UPDSA = 26,
|
|
+ XFRM_MSG_POLEXPIRE = 27,
|
|
+ XFRM_MSG_FLUSHSA = 28,
|
|
+ XFRM_MSG_FLUSHPOLICY = 29,
|
|
+ XFRM_MSG_NEWAE = 30,
|
|
+ XFRM_MSG_GETAE = 31,
|
|
+ XFRM_MSG_REPORT = 32,
|
|
+ XFRM_MSG_MIGRATE = 33,
|
|
+ XFRM_MSG_NEWSADINFO = 34,
|
|
+ XFRM_MSG_GETSADINFO = 35,
|
|
+ XFRM_MSG_NEWSPDINFO = 36,
|
|
+ XFRM_MSG_GETSPDINFO = 37,
|
|
+ XFRM_MSG_MAPPING = 38,
|
|
+ __XFRM_MSG_MAX = 39,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ RTM_BASE = 16,
|
|
+ RTM_NEWLINK = 16,
|
|
+ RTM_DELLINK = 17,
|
|
+ RTM_GETLINK = 18,
|
|
+ RTM_SETLINK = 19,
|
|
+ RTM_NEWADDR = 20,
|
|
+ RTM_DELADDR = 21,
|
|
+ RTM_GETADDR = 22,
|
|
+ RTM_NEWROUTE = 24,
|
|
+ RTM_DELROUTE = 25,
|
|
+ RTM_GETROUTE = 26,
|
|
+ RTM_NEWNEIGH = 28,
|
|
+ RTM_DELNEIGH = 29,
|
|
+ RTM_GETNEIGH = 30,
|
|
+ RTM_NEWRULE = 32,
|
|
+ RTM_DELRULE = 33,
|
|
+ RTM_GETRULE = 34,
|
|
+ RTM_NEWQDISC = 36,
|
|
+ RTM_DELQDISC = 37,
|
|
+ RTM_GETQDISC = 38,
|
|
+ RTM_NEWTCLASS = 40,
|
|
+ RTM_DELTCLASS = 41,
|
|
+ RTM_GETTCLASS = 42,
|
|
+ RTM_NEWTFILTER = 44,
|
|
+ RTM_DELTFILTER = 45,
|
|
+ RTM_GETTFILTER = 46,
|
|
+ RTM_NEWACTION = 48,
|
|
+ RTM_DELACTION = 49,
|
|
+ RTM_GETACTION = 50,
|
|
+ RTM_NEWPREFIX = 52,
|
|
+ RTM_GETMULTICAST = 58,
|
|
+ RTM_GETANYCAST = 62,
|
|
+ RTM_NEWNEIGHTBL = 64,
|
|
+ RTM_GETNEIGHTBL = 66,
|
|
+ RTM_SETNEIGHTBL = 67,
|
|
+ RTM_NEWNDUSEROPT = 68,
|
|
+ RTM_NEWADDRLABEL = 72,
|
|
+ RTM_DELADDRLABEL = 73,
|
|
+ RTM_GETADDRLABEL = 74,
|
|
+ RTM_GETDCB = 78,
|
|
+ RTM_SETDCB = 79,
|
|
+ RTM_NEWNETCONF = 80,
|
|
+ RTM_DELNETCONF = 81,
|
|
+ RTM_GETNETCONF = 82,
|
|
+ RTM_NEWMDB = 84,
|
|
+ RTM_DELMDB = 85,
|
|
+ RTM_GETMDB = 86,
|
|
+ RTM_NEWNSID = 88,
|
|
+ RTM_DELNSID = 89,
|
|
+ RTM_GETNSID = 90,
|
|
+ RTM_NEWSTATS = 92,
|
|
+ RTM_GETSTATS = 94,
|
|
+ RTM_NEWCACHEREPORT = 96,
|
|
+ RTM_NEWCHAIN = 100,
|
|
+ RTM_DELCHAIN = 101,
|
|
+ RTM_GETCHAIN = 102,
|
|
+ __RTM_MAX = 103,
|
|
+};
|
|
+
|
|
+struct nlmsg_perm {
|
|
+ u16 nlmsg_type;
|
|
+ u32 perm;
|
|
+};
|
|
+
|
|
+struct netif_security_struct {
|
|
+ struct net *ns;
|
|
+ int ifindex;
|
|
+ u32 sid;
|
|
+};
|
|
+
|
|
+struct sel_netif {
|
|
+ struct list_head list;
|
|
+ struct netif_security_struct nsec;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+struct netnode_security_struct {
|
|
+ union {
|
|
+ __be32 ipv4;
|
|
+ struct in6_addr ipv6;
|
|
+ } addr;
|
|
+ u32 sid;
|
|
+ u16 family;
|
|
+};
|
|
+
|
|
+struct sel_netnode_bkt {
|
|
+ unsigned int size;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct sel_netnode {
|
|
+ struct netnode_security_struct nsec;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct netport_security_struct {
|
|
+ u32 sid;
|
|
+ u16 port;
|
|
+ u8 protocol;
|
|
+};
|
|
+
|
|
+struct sel_netport_bkt {
|
|
+ int size;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct sel_netport {
|
|
+ struct netport_security_struct psec;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct pkey_security_struct {
|
|
+ u64 subnet_prefix;
|
|
+ u16 pkey;
|
|
+ u32 sid;
|
|
+};
|
|
+
|
|
+struct sel_ib_pkey_bkt {
|
|
+ int size;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct sel_ib_pkey {
|
|
+ struct pkey_security_struct psec;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct path___3;
|
|
+
|
|
+struct ebitmap_node {
|
|
+ struct ebitmap_node *next;
|
|
+ long unsigned int maps[6];
|
|
+ u32 startbit;
|
|
+};
|
|
+
|
|
+struct ebitmap {
|
|
+ struct ebitmap_node *node;
|
|
+ u32 highbit;
|
|
+};
|
|
+
|
|
+struct policy_file {
|
|
+ char *data;
|
|
+ size_t len;
|
|
+};
|
|
+
|
|
+struct hashtab_node {
|
|
+ void *key;
|
|
+ void *datum;
|
|
+ struct hashtab_node *next;
|
|
+};
|
|
+
|
|
+struct hashtab {
|
|
+ struct hashtab_node **htable;
|
|
+ u32 size;
|
|
+ u32 nel;
|
|
+ u32 (*hash_value)(struct hashtab *, const void *);
|
|
+ int (*keycmp)(struct hashtab *, const void *, const void *);
|
|
+};
|
|
+
|
|
+struct hashtab_info {
|
|
+ u32 slots_used;
|
|
+ u32 max_chain_len;
|
|
+};
|
|
+
|
|
+struct symtab {
|
|
+ struct hashtab *table;
|
|
+ u32 nprim;
|
|
+};
|
|
+
|
|
+struct mls_level {
|
|
+ u32 sens;
|
|
+ struct ebitmap cat;
|
|
+};
|
|
+
|
|
+struct mls_range {
|
|
+ struct mls_level level[2];
|
|
+};
|
|
+
|
|
+struct context___2 {
|
|
+ u32 user;
|
|
+ u32 role;
|
|
+ u32 type;
|
|
+ u32 len;
|
|
+ struct mls_range range;
|
|
+ char *str;
|
|
+};
|
|
+
|
|
+struct sidtab_node {
|
|
+ u32 sid;
|
|
+ struct context___2 context;
|
|
+ struct sidtab_node *next;
|
|
+};
|
|
+
|
|
+struct sidtab {
|
|
+ struct sidtab_node **htable;
|
|
+ unsigned int nel;
|
|
+ unsigned int next_sid;
|
|
+ unsigned char shutdown;
|
|
+ struct sidtab_node *cache[3];
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct avtab_key {
|
|
+ u16 source_type;
|
|
+ u16 target_type;
|
|
+ u16 target_class;
|
|
+ u16 specified;
|
|
+};
|
|
+
|
|
+struct avtab_extended_perms {
|
|
+ u8 specified;
|
|
+ u8 driver;
|
|
+ struct extended_perms_data perms;
|
|
+};
|
|
+
|
|
+struct avtab_datum {
|
|
+ union {
|
|
+ u32 data;
|
|
+ struct avtab_extended_perms *xperms;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct avtab_node {
|
|
+ struct avtab_key key;
|
|
+ struct avtab_datum datum;
|
|
+ struct avtab_node *next;
|
|
+};
|
|
+
|
|
+struct avtab {
|
|
+ struct flex_array *htable;
|
|
+ u32 nel;
|
|
+ u32 nslot;
|
|
+ u32 mask;
|
|
+};
|
|
+
|
|
+struct type_set;
|
|
+
|
|
+struct constraint_expr {
|
|
+ u32 expr_type;
|
|
+ u32 attr;
|
|
+ u32 op;
|
|
+ struct ebitmap names;
|
|
+ struct type_set *type_names;
|
|
+ struct constraint_expr *next;
|
|
+};
|
|
+
|
|
+struct type_set {
|
|
+ struct ebitmap types;
|
|
+ struct ebitmap negset;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct constraint_node {
|
|
+ u32 permissions;
|
|
+ struct constraint_expr *expr;
|
|
+ struct constraint_node *next;
|
|
+};
|
|
+
|
|
+struct common_datum {
|
|
+ u32 value;
|
|
+ struct symtab permissions;
|
|
+};
|
|
+
|
|
+struct class_datum {
|
|
+ u32 value;
|
|
+ char *comkey;
|
|
+ struct common_datum *comdatum;
|
|
+ struct symtab permissions;
|
|
+ struct constraint_node *constraints;
|
|
+ struct constraint_node *validatetrans;
|
|
+ char default_user;
|
|
+ char default_role;
|
|
+ char default_type;
|
|
+ char default_range;
|
|
+};
|
|
+
|
|
+struct role_datum {
|
|
+ u32 value;
|
|
+ u32 bounds;
|
|
+ struct ebitmap dominates;
|
|
+ struct ebitmap types;
|
|
+};
|
|
+
|
|
+struct role_trans {
|
|
+ u32 role;
|
|
+ u32 type;
|
|
+ u32 tclass;
|
|
+ u32 new_role;
|
|
+ struct role_trans *next;
|
|
+};
|
|
+
|
|
+struct role_allow {
|
|
+ u32 role;
|
|
+ u32 new_role;
|
|
+ struct role_allow *next;
|
|
+};
|
|
+
|
|
+struct user_datum {
|
|
+ u32 value;
|
|
+ u32 bounds;
|
|
+ struct ebitmap roles;
|
|
+ struct mls_range range;
|
|
+ struct mls_level dfltlevel;
|
|
+};
|
|
+
|
|
+struct cond_bool_datum {
|
|
+ __u32 value;
|
|
+ int state;
|
|
+};
|
|
+
|
|
+struct ocontext {
|
|
+ union {
|
|
+ char *name;
|
|
+ struct {
|
|
+ u8 protocol;
|
|
+ u16 low_port;
|
|
+ u16 high_port;
|
|
+ } port;
|
|
+ struct {
|
|
+ u32 addr;
|
|
+ u32 mask;
|
|
+ } node;
|
|
+ struct {
|
|
+ u32 addr[4];
|
|
+ u32 mask[4];
|
|
+ } node6;
|
|
+ struct {
|
|
+ u64 subnet_prefix;
|
|
+ u16 low_pkey;
|
|
+ u16 high_pkey;
|
|
+ } ibpkey;
|
|
+ struct {
|
|
+ char *dev_name;
|
|
+ u8 port;
|
|
+ } ibendport;
|
|
+ } u;
|
|
+ union {
|
|
+ u32 sclass;
|
|
+ u32 behavior;
|
|
+ } v;
|
|
+ struct context___2 context[2];
|
|
+ u32 sid[2];
|
|
+ struct ocontext *next;
|
|
+};
|
|
+
|
|
+struct genfs {
|
|
+ char *fstype;
|
|
+ struct ocontext *head;
|
|
+ struct genfs *next;
|
|
+};
|
|
+
|
|
+struct cond_node;
|
|
+
|
|
+struct policydb {
|
|
+ int mls_enabled;
|
|
+ struct symtab symtab[8];
|
|
+ struct flex_array *sym_val_to_name[8];
|
|
+ struct class_datum **class_val_to_struct;
|
|
+ struct role_datum **role_val_to_struct;
|
|
+ struct user_datum **user_val_to_struct;
|
|
+ struct flex_array *type_val_to_struct_array;
|
|
+ struct avtab te_avtab;
|
|
+ struct role_trans *role_tr;
|
|
+ struct ebitmap filename_trans_ttypes;
|
|
+ struct hashtab *filename_trans;
|
|
+ struct cond_bool_datum **bool_val_to_struct;
|
|
+ struct avtab te_cond_avtab;
|
|
+ struct cond_node *cond_list;
|
|
+ struct role_allow *role_allow;
|
|
+ struct ocontext *ocontexts[9];
|
|
+ struct genfs *genfs;
|
|
+ struct hashtab *range_tr;
|
|
+ struct flex_array *type_attr_map_array;
|
|
+ struct ebitmap policycaps;
|
|
+ struct ebitmap permissive_map;
|
|
+ size_t len;
|
|
+ unsigned int policyvers;
|
|
+ unsigned int reject_unknown: 1;
|
|
+ unsigned int allow_unknown: 1;
|
|
+ u16 process_class;
|
|
+ u32 process_trans_perms;
|
|
+};
|
|
+
|
|
+struct selinux_mapping;
|
|
+
|
|
+struct selinux_map {
|
|
+ struct selinux_mapping *mapping;
|
|
+ u16 size;
|
|
+};
|
|
+
|
|
+struct selinux_ss {
|
|
+ struct sidtab sidtab;
|
|
+ struct policydb policydb;
|
|
+ rwlock_t policy_rwlock;
|
|
+ u32 latest_granting;
|
|
+ struct selinux_map map;
|
|
+ struct page *status_page;
|
|
+ struct mutex status_lock;
|
|
+};
|
|
+
|
|
+struct perm_datum {
|
|
+ u32 value;
|
|
+};
|
|
+
|
|
+struct filename_trans {
|
|
+ u32 stype;
|
|
+ u32 ttype;
|
|
+ u16 tclass;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct filename_trans_datum {
|
|
+ u32 otype;
|
|
+};
|
|
+
|
|
+struct type_datum {
|
|
+ u32 value;
|
|
+ u32 bounds;
|
|
+ unsigned char primary;
|
|
+ unsigned char attribute;
|
|
+};
|
|
+
|
|
+struct level_datum {
|
|
+ struct mls_level *level;
|
|
+ unsigned char isalias;
|
|
+};
|
|
+
|
|
+struct cat_datum {
|
|
+ u32 value;
|
|
+ unsigned char isalias;
|
|
+};
|
|
+
|
|
+struct range_trans {
|
|
+ u32 source_type;
|
|
+ u32 target_type;
|
|
+ u32 target_class;
|
|
+};
|
|
+
|
|
+struct cond_expr;
|
|
+
|
|
+struct cond_av_list;
|
|
+
|
|
+struct cond_node {
|
|
+ int cur_state;
|
|
+ struct cond_expr *expr;
|
|
+ struct cond_av_list *true_list;
|
|
+ struct cond_av_list *false_list;
|
|
+ struct cond_node *next;
|
|
+};
|
|
+
|
|
+struct policy_data {
|
|
+ struct policydb *p;
|
|
+ void *fp;
|
|
+};
|
|
+
|
|
+struct cond_expr {
|
|
+ __u32 expr_type;
|
|
+ __u32 bool;
|
|
+ struct cond_expr *next;
|
|
+};
|
|
+
|
|
+struct cond_av_list {
|
|
+ struct avtab_node *node;
|
|
+ struct cond_av_list *next;
|
|
+};
|
|
+
|
|
+struct selinux_mapping {
|
|
+ u16 value;
|
|
+ unsigned int num_perms;
|
|
+ u32 perms[32];
|
|
+};
|
|
+
|
|
+struct policydb_compat_info {
|
|
+ int version;
|
|
+ int sym_num;
|
|
+ int ocon_num;
|
|
+};
|
|
+
|
|
+struct convert_context_args {
|
|
+ struct selinux_state *state;
|
|
+ struct policydb *oldp;
|
|
+ struct policydb *newp;
|
|
+};
|
|
+
|
|
+struct selinux_audit_rule {
|
|
+ u32 au_seqno;
|
|
+ struct context___2 au_ctxt;
|
|
+};
|
|
+
|
|
+struct cond_insertf_data {
|
|
+ struct policydb *p;
|
|
+ struct cond_av_list *other;
|
|
+ struct cond_av_list *head;
|
|
+ struct cond_av_list *tail;
|
|
+};
|
|
+
|
|
+struct selinux_kernel_status {
|
|
+ u32 version;
|
|
+ u32 sequence;
|
|
+ u32 enforcing;
|
|
+ u32 policyload;
|
|
+ u32 deny_unknown;
|
|
+};
|
|
+
|
|
+struct xfrm_offload {
|
|
+ struct {
|
|
+ __u32 low;
|
|
+ __u32 hi;
|
|
+ } seq;
|
|
+ __u32 flags;
|
|
+ __u32 status;
|
|
+ __u8 proto;
|
|
+};
|
|
+
|
|
+struct sec_path {
|
|
+ refcount_t refcnt;
|
|
+ int len;
|
|
+ int olen;
|
|
+ struct xfrm_state *xvec[6];
|
|
+ struct xfrm_offload ovec[1];
|
|
+};
|
|
+
|
|
+struct rt6key {
|
|
+ struct in6_addr addr;
|
|
+ int plen;
|
|
+};
|
|
+
|
|
+struct fib6_nh {
|
|
+ struct in6_addr nh_gw;
|
|
+ struct net_device *nh_dev;
|
|
+ struct lwtunnel_state *nh_lwtstate;
|
|
+ unsigned int nh_flags;
|
|
+ atomic_t nh_upper_bound;
|
|
+ int nh_weight;
|
|
+};
|
|
+
|
|
+struct fib6_node;
|
|
+
|
|
+struct rt6_exception_bucket;
|
|
+
|
|
+struct fib6_info {
|
|
+ struct fib6_table *fib6_table;
|
|
+ struct fib6_info *fib6_next;
|
|
+ struct fib6_node *fib6_node;
|
|
+ struct list_head fib6_siblings;
|
|
+ unsigned int fib6_nsiblings;
|
|
+ atomic_t fib6_ref;
|
|
+ long unsigned int expires;
|
|
+ struct dst_metrics *fib6_metrics;
|
|
+ struct rt6key fib6_dst;
|
|
+ u32 fib6_flags;
|
|
+ struct rt6key fib6_src;
|
|
+ struct rt6key fib6_prefsrc;
|
|
+ struct rt6_info **rt6i_pcpu;
|
|
+ struct rt6_exception_bucket *rt6i_exception_bucket;
|
|
+ long unsigned int last_probe;
|
|
+ u32 fib6_metric;
|
|
+ u8 fib6_protocol;
|
|
+ u8 fib6_type;
|
|
+ u8 exception_bucket_flushed: 1;
|
|
+ u8 should_flush: 1;
|
|
+ u8 dst_nocount: 1;
|
|
+ u8 dst_nopolicy: 1;
|
|
+ u8 dst_host: 1;
|
|
+ u8 fib6_destroying: 1;
|
|
+ u8 unused: 2;
|
|
+ struct fib6_nh fib6_nh;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct uncached_list;
|
|
+
|
|
+struct rt6_info {
|
|
+ struct dst_entry dst;
|
|
+ struct fib6_info *from;
|
|
+ struct rt6key rt6i_dst;
|
|
+ struct rt6key rt6i_src;
|
|
+ struct in6_addr rt6i_gateway;
|
|
+ struct inet6_dev *rt6i_idev;
|
|
+ u32 rt6i_flags;
|
|
+ struct rt6key rt6i_prefsrc;
|
|
+ struct list_head rt6i_uncached;
|
|
+ struct uncached_list *rt6i_uncached_list;
|
|
+ short unsigned int rt6i_nfheader_len;
|
|
+};
|
|
+
|
|
+struct rt6_statistics {
|
|
+ __u32 fib_nodes;
|
|
+ __u32 fib_route_nodes;
|
|
+ __u32 fib_rt_entries;
|
|
+ __u32 fib_rt_cache;
|
|
+ __u32 fib_discarded_routes;
|
|
+ atomic_t fib_rt_alloc;
|
|
+ atomic_t fib_rt_uncache;
|
|
+};
|
|
+
|
|
+struct fib6_node {
|
|
+ struct fib6_node *parent;
|
|
+ struct fib6_node *left;
|
|
+ struct fib6_node *right;
|
|
+ struct fib6_info *leaf;
|
|
+ __u16 fn_bit;
|
|
+ __u16 fn_flags;
|
|
+ int fn_sernum;
|
|
+ struct fib6_info *rr_ptr;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct fib6_table {
|
|
+ struct hlist_node tb6_hlist;
|
|
+ u32 tb6_id;
|
|
+ spinlock_t tb6_lock;
|
|
+ struct fib6_node tb6_root;
|
|
+ struct inet_peer_base tb6_peers;
|
|
+ unsigned int flags;
|
|
+ unsigned int fib_seq;
|
|
+};
|
|
+
|
|
+typedef union {
|
|
+ __be32 a4;
|
|
+ __be32 a6[4];
|
|
+ struct in6_addr in6;
|
|
+} xfrm_address_t;
|
|
+
|
|
+struct xfrm_id {
|
|
+ xfrm_address_t daddr;
|
|
+ __be32 spi;
|
|
+ __u8 proto;
|
|
+};
|
|
+
|
|
+struct xfrm_selector {
|
|
+ xfrm_address_t daddr;
|
|
+ xfrm_address_t saddr;
|
|
+ __be16 dport;
|
|
+ __be16 dport_mask;
|
|
+ __be16 sport;
|
|
+ __be16 sport_mask;
|
|
+ __u16 family;
|
|
+ __u8 prefixlen_d;
|
|
+ __u8 prefixlen_s;
|
|
+ __u8 proto;
|
|
+ int ifindex;
|
|
+ __kernel_uid32_t user;
|
|
+};
|
|
+
|
|
+struct xfrm_lifetime_cfg {
|
|
+ __u64 soft_byte_limit;
|
|
+ __u64 hard_byte_limit;
|
|
+ __u64 soft_packet_limit;
|
|
+ __u64 hard_packet_limit;
|
|
+ __u64 soft_add_expires_seconds;
|
|
+ __u64 hard_add_expires_seconds;
|
|
+ __u64 soft_use_expires_seconds;
|
|
+ __u64 hard_use_expires_seconds;
|
|
+};
|
|
+
|
|
+struct xfrm_lifetime_cur {
|
|
+ __u64 bytes;
|
|
+ __u64 packets;
|
|
+ __u64 add_time;
|
|
+ __u64 use_time;
|
|
+};
|
|
+
|
|
+struct xfrm_replay_state {
|
|
+ __u32 oseq;
|
|
+ __u32 seq;
|
|
+ __u32 bitmap;
|
|
+};
|
|
+
|
|
+struct xfrm_replay_state_esn {
|
|
+ unsigned int bmp_len;
|
|
+ __u32 oseq;
|
|
+ __u32 seq;
|
|
+ __u32 oseq_hi;
|
|
+ __u32 seq_hi;
|
|
+ __u32 replay_window;
|
|
+ __u32 bmp[0];
|
|
+};
|
|
+
|
|
+struct xfrm_algo {
|
|
+ char alg_name[64];
|
|
+ unsigned int alg_key_len;
|
|
+ char alg_key[0];
|
|
+};
|
|
+
|
|
+struct xfrm_algo_auth {
|
|
+ char alg_name[64];
|
|
+ unsigned int alg_key_len;
|
|
+ unsigned int alg_trunc_len;
|
|
+ char alg_key[0];
|
|
+};
|
|
+
|
|
+struct xfrm_algo_aead {
|
|
+ char alg_name[64];
|
|
+ unsigned int alg_key_len;
|
|
+ unsigned int alg_icv_len;
|
|
+ char alg_key[0];
|
|
+};
|
|
+
|
|
+struct xfrm_stats {
|
|
+ __u32 replay_window;
|
|
+ __u32 replay;
|
|
+ __u32 integrity_failed;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ XFRM_POLICY_TYPE_MAIN = 0,
|
|
+ XFRM_POLICY_TYPE_SUB = 1,
|
|
+ XFRM_POLICY_TYPE_MAX = 2,
|
|
+ XFRM_POLICY_TYPE_ANY = 255,
|
|
+};
|
|
+
|
|
+struct xfrm_encap_tmpl {
|
|
+ __u16 encap_type;
|
|
+ __be16 encap_sport;
|
|
+ __be16 encap_dport;
|
|
+ xfrm_address_t encap_oa;
|
|
+};
|
|
+
|
|
+struct xfrm_mark {
|
|
+ __u32 v;
|
|
+ __u32 m;
|
|
+};
|
|
+
|
|
+struct xfrm_address_filter {
|
|
+ xfrm_address_t saddr;
|
|
+ xfrm_address_t daddr;
|
|
+ __u16 family;
|
|
+ __u8 splen;
|
|
+ __u8 dplen;
|
|
+};
|
|
+
|
|
+struct xfrm_state_walk {
|
|
+ struct list_head all;
|
|
+ u8 state;
|
|
+ u8 dying;
|
|
+ u8 proto;
|
|
+ u32 seq;
|
|
+ struct xfrm_address_filter *filter;
|
|
+ long unsigned int kabi_reserved1;
|
|
+};
|
|
+
|
|
+struct xfrm_state_offload {
|
|
+ struct net_device *dev;
|
|
+ long unsigned int offload_handle;
|
|
+ unsigned int num_exthdrs;
|
|
+ u8 flags;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct xfrm_replay;
|
|
+
|
|
+struct xfrm_type;
|
|
+
|
|
+struct xfrm_mode;
|
|
+
|
|
+struct xfrm_type_offload;
|
|
+
|
|
+struct xfrm_state {
|
|
+ possible_net_t xs_net;
|
|
+ union {
|
|
+ struct hlist_node gclist;
|
|
+ struct hlist_node bydst;
|
|
+ };
|
|
+ struct hlist_node bysrc;
|
|
+ struct hlist_node byspi;
|
|
+ refcount_t refcnt;
|
|
+ spinlock_t lock;
|
|
+ struct xfrm_id id;
|
|
+ struct xfrm_selector sel;
|
|
+ struct xfrm_mark mark;
|
|
+ u32 if_id;
|
|
+ u32 tfcpad;
|
|
+ u32 genid;
|
|
+ struct xfrm_state_walk km;
|
|
+ struct {
|
|
+ u32 reqid;
|
|
+ u8 mode;
|
|
+ u8 replay_window;
|
|
+ u8 aalgo;
|
|
+ u8 ealgo;
|
|
+ u8 calgo;
|
|
+ u8 flags;
|
|
+ u16 family;
|
|
+ xfrm_address_t saddr;
|
|
+ int header_len;
|
|
+ int trailer_len;
|
|
+ u32 extra_flags;
|
|
+ struct xfrm_mark smark;
|
|
+ } props;
|
|
+ struct xfrm_lifetime_cfg lft;
|
|
+ struct xfrm_algo_auth *aalg;
|
|
+ struct xfrm_algo *ealg;
|
|
+ struct xfrm_algo *calg;
|
|
+ struct xfrm_algo_aead *aead;
|
|
+ const char *geniv;
|
|
+ struct xfrm_encap_tmpl *encap;
|
|
+ xfrm_address_t *coaddr;
|
|
+ struct xfrm_state *tunnel;
|
|
+ atomic_t tunnel_users;
|
|
+ struct xfrm_replay_state replay;
|
|
+ struct xfrm_replay_state_esn *replay_esn;
|
|
+ struct xfrm_replay_state preplay;
|
|
+ struct xfrm_replay_state_esn *preplay_esn;
|
|
+ const struct xfrm_replay *repl;
|
|
+ u32 xflags;
|
|
+ u32 replay_maxage;
|
|
+ u32 replay_maxdiff;
|
|
+ struct timer_list rtimer;
|
|
+ struct xfrm_stats stats;
|
|
+ struct xfrm_lifetime_cur curlft;
|
|
+ struct tasklet_hrtimer mtimer;
|
|
+ struct xfrm_state_offload xso;
|
|
+ long int saved_tmo;
|
|
+ time64_t lastused;
|
|
+ struct page_frag xfrag;
|
|
+ const struct xfrm_type *type;
|
|
+ struct xfrm_mode *inner_mode;
|
|
+ struct xfrm_mode *inner_mode_iaf;
|
|
+ struct xfrm_mode *outer_mode;
|
|
+ const struct xfrm_type_offload *type_offload;
|
|
+ struct xfrm_sec_ctx *security;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct xfrm_policy_walk_entry {
|
|
+ struct list_head all;
|
|
+ u8 dead;
|
|
+};
|
|
+
|
|
+struct xfrm_policy_queue {
|
|
+ struct sk_buff_head hold_queue;
|
|
+ struct timer_list hold_timer;
|
|
+ long unsigned int timeout;
|
|
+};
|
|
+
|
|
+struct xfrm_tmpl {
|
|
+ struct xfrm_id id;
|
|
+ xfrm_address_t saddr;
|
|
+ short unsigned int encap_family;
|
|
+ u32 reqid;
|
|
+ u8 mode;
|
|
+ u8 share;
|
|
+ u8 optional;
|
|
+ u8 allalgs;
|
|
+ u32 aalgos;
|
|
+ u32 ealgos;
|
|
+ u32 calgos;
|
|
+};
|
|
+
|
|
+struct xfrm_policy {
|
|
+ possible_net_t xp_net;
|
|
+ struct hlist_node bydst;
|
|
+ struct hlist_node byidx;
|
|
+ rwlock_t lock;
|
|
+ refcount_t refcnt;
|
|
+ struct timer_list timer;
|
|
+ atomic_t genid;
|
|
+ u32 priority;
|
|
+ u32 index;
|
|
+ u32 if_id;
|
|
+ struct xfrm_mark mark;
|
|
+ struct xfrm_selector selector;
|
|
+ struct xfrm_lifetime_cfg lft;
|
|
+ struct xfrm_lifetime_cur curlft;
|
|
+ struct xfrm_policy_walk_entry walk;
|
|
+ struct xfrm_policy_queue polq;
|
|
+ u8 type;
|
|
+ u8 action;
|
|
+ u8 flags;
|
|
+ u8 xfrm_nr;
|
|
+ u16 family;
|
|
+ struct xfrm_sec_ctx *security;
|
|
+ struct xfrm_tmpl xfrm_vec[6];
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct udp_hslot;
|
|
+
|
|
+struct udp_table {
|
|
+ struct udp_hslot *hash;
|
|
+ struct udp_hslot *hash2;
|
|
+ unsigned int mask;
|
|
+ unsigned int log;
|
|
+};
|
|
+
|
|
+struct rtable {
|
|
+ struct dst_entry dst;
|
|
+ int rt_genid;
|
|
+ unsigned int rt_flags;
|
|
+ __u16 rt_type;
|
|
+ __u8 rt_is_input;
|
|
+ __u8 rt_uses_gateway;
|
|
+ int rt_iif;
|
|
+ __be32 rt_gateway;
|
|
+ u32 rt_mtu_locked: 1;
|
|
+ u32 rt_pmtu: 31;
|
|
+ struct list_head rt_uncached;
|
|
+ struct uncached_list *rt_uncached_list;
|
|
+};
|
|
+
|
|
+struct rt6_exception_bucket {
|
|
+ struct hlist_head chain;
|
|
+ int depth;
|
|
+};
|
|
+
|
|
+struct xfrm_replay {
|
|
+ void (*advance)(struct xfrm_state *, __be32);
|
|
+ int (*check)(struct xfrm_state *, struct sk_buff *, __be32);
|
|
+ int (*recheck)(struct xfrm_state *, struct sk_buff *, __be32);
|
|
+ void (*notify)(struct xfrm_state *, int);
|
|
+ int (*overflow)(struct xfrm_state *, struct sk_buff *);
|
|
+};
|
|
+
|
|
+struct xfrm_type {
|
|
+ char *description;
|
|
+ struct module *owner;
|
|
+ u8 proto;
|
|
+ u8 flags;
|
|
+ int (*init_state)(struct xfrm_state *);
|
|
+ void (*destructor)(struct xfrm_state *);
|
|
+ int (*input)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*output)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*reject)(struct xfrm_state *, struct sk_buff *, const struct flowi *);
|
|
+ int (*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
|
|
+ u32 (*get_mtu)(struct xfrm_state *, int);
|
|
+};
|
|
+
|
|
+struct xfrm_state_afinfo;
|
|
+
|
|
+struct xfrm_mode {
|
|
+ int (*input2)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*input)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*output2)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*output)(struct xfrm_state *, struct sk_buff *);
|
|
+ struct sk_buff * (*gso_segment)(struct xfrm_state *, struct sk_buff *, netdev_features_t);
|
|
+ void (*xmit)(struct xfrm_state *, struct sk_buff *);
|
|
+ struct xfrm_state_afinfo *afinfo;
|
|
+ struct module *owner;
|
|
+ unsigned int encap;
|
|
+ int flags;
|
|
+};
|
|
+
|
|
+struct xfrm_type_offload {
|
|
+ char *description;
|
|
+ struct module *owner;
|
|
+ u8 proto;
|
|
+ void (*encap)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*input_tail)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*xmit)(struct xfrm_state *, struct sk_buff *, netdev_features_t);
|
|
+};
|
|
+
|
|
+struct xfrm_dst {
|
|
+ union {
|
|
+ struct dst_entry dst;
|
|
+ struct rtable rt;
|
|
+ struct rt6_info rt6;
|
|
+ } u;
|
|
+ struct dst_entry *route;
|
|
+ struct dst_entry *child;
|
|
+ struct dst_entry *path;
|
|
+ struct xfrm_policy *pols[2];
|
|
+ int num_pols;
|
|
+ int num_xfrms;
|
|
+ u32 xfrm_genid;
|
|
+ u32 policy_genid;
|
|
+ u32 route_mtu_cached;
|
|
+ u32 child_mtu_cached;
|
|
+ u32 route_cookie;
|
|
+ u32 path_cookie;
|
|
+};
|
|
+
|
|
+struct xfrm_state_afinfo {
|
|
+ unsigned int family;
|
|
+ unsigned int proto;
|
|
+ __be16 eth_proto;
|
|
+ struct module *owner;
|
|
+ const struct xfrm_type *type_map[256];
|
|
+ const struct xfrm_type_offload *type_offload_map[256];
|
|
+ struct xfrm_mode *mode_map[5];
|
|
+ int (*init_flags)(struct xfrm_state *);
|
|
+ void (*init_tempsel)(struct xfrm_selector *, const struct flowi *);
|
|
+ void (*init_temprop)(struct xfrm_state *, const struct xfrm_tmpl *, const xfrm_address_t *, const xfrm_address_t *);
|
|
+ int (*tmpl_sort)(struct xfrm_tmpl **, struct xfrm_tmpl **, int);
|
|
+ int (*state_sort)(struct xfrm_state **, struct xfrm_state **, int);
|
|
+ int (*output)(struct net *, struct sock *, struct sk_buff *);
|
|
+ int (*output_finish)(struct sock *, struct sk_buff *);
|
|
+ int (*extract_input)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*extract_output)(struct xfrm_state *, struct sk_buff *);
|
|
+ int (*transport_finish)(struct sk_buff *, int);
|
|
+ void (*local_error)(struct sk_buff *, u32);
|
|
+};
|
|
+
|
|
+struct udp_hslot {
|
|
+ struct hlist_head head;
|
|
+ int count;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct sockaddr_un {
|
|
+ __kernel_sa_family_t sun_family;
|
|
+ char sun_path[108];
|
|
+};
|
|
+
|
|
+struct unix_address {
|
|
+ refcount_t refcnt;
|
|
+ int len;
|
|
+ unsigned int hash;
|
|
+ struct sockaddr_un name[0];
|
|
+};
|
|
+
|
|
+struct unix_sock {
|
|
+ struct sock sk;
|
|
+ struct unix_address *addr;
|
|
+ struct path path;
|
|
+ struct mutex iolock;
|
|
+ struct mutex bindlock;
|
|
+ struct sock *peer;
|
|
+ struct list_head link;
|
|
+ atomic_long_t inflight;
|
|
+ spinlock_t lock;
|
|
+ long unsigned int gc_flags;
|
|
+ struct socket_wq peer_wq;
|
|
+ wait_queue_entry_t peer_wake;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct ptrace_relation {
|
|
+ struct task_struct *tracer;
|
|
+ struct task_struct *tracee;
|
|
+ bool invalid;
|
|
+ struct list_head node;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct access_report_info {
|
|
+ struct callback_head work;
|
|
+ const char *access;
|
|
+ struct task_struct *target;
|
|
+ struct task_struct *agent;
|
|
+};
|
|
+
|
|
+enum devcg_behavior {
|
|
+ DEVCG_DEFAULT_NONE = 0,
|
|
+ DEVCG_DEFAULT_ALLOW = 1,
|
|
+ DEVCG_DEFAULT_DENY = 2,
|
|
+};
|
|
+
|
|
+struct dev_exception_item {
|
|
+ u32 major;
|
|
+ u32 minor;
|
|
+ short int type;
|
|
+ short int access;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct dev_cgroup {
|
|
+ struct cgroup_subsys_state css;
|
|
+ struct list_head exceptions;
|
|
+ enum devcg_behavior behavior;
|
|
+};
|
|
+
|
|
+enum integrity_status {
|
|
+ INTEGRITY_PASS = 0,
|
|
+ INTEGRITY_PASS_IMMUTABLE = 1,
|
|
+ INTEGRITY_FAIL = 2,
|
|
+ INTEGRITY_NOLABEL = 3,
|
|
+ INTEGRITY_NOXATTRS = 4,
|
|
+ INTEGRITY_UNKNOWN = 5,
|
|
+};
|
|
+
|
|
+struct ima_digest_data {
|
|
+ u8 algo;
|
|
+ u8 length;
|
|
+ union {
|
|
+ struct {
|
|
+ u8 unused;
|
|
+ u8 type;
|
|
+ } sha1;
|
|
+ struct {
|
|
+ u8 type;
|
|
+ u8 algo;
|
|
+ } ng;
|
|
+ u8 data[2];
|
|
+ } xattr;
|
|
+ u8 digest[0];
|
|
+};
|
|
+
|
|
+struct integrity_iint_cache {
|
|
+ struct rb_node rb_node;
|
|
+ struct mutex mutex;
|
|
+ struct inode *inode;
|
|
+ u64 version;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int measured_pcrs;
|
|
+ long unsigned int atomic_flags;
|
|
+ enum integrity_status ima_file_status: 4;
|
|
+ enum integrity_status ima_mmap_status: 4;
|
|
+ enum integrity_status ima_bprm_status: 4;
|
|
+ enum integrity_status ima_read_status: 4;
|
|
+ enum integrity_status ima_creds_status: 4;
|
|
+ enum integrity_status evm_status: 4;
|
|
+ struct ima_digest_data *ima_hash;
|
|
+};
|
|
+
|
|
+struct asymmetric_key_id;
|
|
+
|
|
+struct public_key_signature {
|
|
+ struct asymmetric_key_id *auth_ids[2];
|
|
+ u8 *s;
|
|
+ u32 s_size;
|
|
+ u8 *digest;
|
|
+ u8 digest_size;
|
|
+ const char *pkey_algo;
|
|
+ const char *hash_algo;
|
|
+};
|
|
+
|
|
+struct asymmetric_key_id {
|
|
+ short unsigned int len;
|
|
+ unsigned char data[0];
|
|
+};
|
|
+
|
|
+struct signature_v2_hdr {
|
|
+ uint8_t type;
|
|
+ uint8_t version;
|
|
+ uint8_t hash_algo;
|
|
+ __be32 keyid;
|
|
+ __be16 sig_size;
|
|
+ uint8_t sig[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tpm_chip;
|
|
+
|
|
+struct evm_ima_xattr_data {
|
|
+ u8 type;
|
|
+ u8 digest[32];
|
|
+};
|
|
+
|
|
+enum ima_show_type {
|
|
+ IMA_SHOW_BINARY = 0,
|
|
+ IMA_SHOW_BINARY_NO_FIELD_LEN = 1,
|
|
+ IMA_SHOW_BINARY_OLD_STRING_FMT = 2,
|
|
+ IMA_SHOW_ASCII = 3,
|
|
+};
|
|
+
|
|
+struct ima_event_data {
|
|
+ struct integrity_iint_cache *iint;
|
|
+ struct file *file;
|
|
+ const unsigned char *filename;
|
|
+ struct evm_ima_xattr_data *xattr_value;
|
|
+ int xattr_len;
|
|
+ const char *violation;
|
|
+};
|
|
+
|
|
+struct ima_field_data {
|
|
+ u8 *data;
|
|
+ u32 len;
|
|
+};
|
|
+
|
|
+struct ima_template_field {
|
|
+ const char field_id[16];
|
|
+ int (*field_init)(struct ima_event_data *, struct ima_field_data *);
|
|
+ void (*field_show)(struct seq_file *, enum ima_show_type, struct ima_field_data *);
|
|
+};
|
|
+
|
|
+struct ima_template_desc {
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+ char *fmt;
|
|
+ int num_fields;
|
|
+ struct ima_template_field **fields;
|
|
+};
|
|
+
|
|
+struct ima_template_entry {
|
|
+ int pcr;
|
|
+ u8 digest[20];
|
|
+ struct ima_template_desc *template_desc;
|
|
+ u32 template_data_len;
|
|
+ struct ima_field_data template_data[0];
|
|
+};
|
|
+
|
|
+struct ima_queue_entry {
|
|
+ struct hlist_node hnext;
|
|
+ struct list_head later;
|
|
+ struct ima_template_entry *entry;
|
|
+};
|
|
+
|
|
+struct ima_h_table {
|
|
+ atomic_long_t len;
|
|
+ atomic_long_t violations;
|
|
+ struct hlist_head queue[512];
|
|
+};
|
|
+
|
|
+enum ima_fs_flags {
|
|
+ IMA_FS_BUSY = 0,
|
|
+};
|
|
+
|
|
+enum evm_ima_xattr_type {
|
|
+ IMA_XATTR_DIGEST = 1,
|
|
+ EVM_XATTR_HMAC = 2,
|
|
+ EVM_IMA_XATTR_DIGSIG = 3,
|
|
+ IMA_XATTR_DIGEST_NG = 4,
|
|
+ EVM_XATTR_PORTABLE_DIGSIG = 5,
|
|
+ IMA_XATTR_LAST = 6,
|
|
+};
|
|
+
|
|
+enum ima_hooks {
|
|
+ NONE___2 = 0,
|
|
+ FILE_CHECK = 1,
|
|
+ MMAP_CHECK = 2,
|
|
+ BPRM_CHECK = 3,
|
|
+ CREDS_CHECK = 4,
|
|
+ POST_SETATTR = 5,
|
|
+ MODULE_CHECK = 6,
|
|
+ FIRMWARE_CHECK = 7,
|
|
+ KEXEC_KERNEL_CHECK = 8,
|
|
+ KEXEC_INITRAMFS_CHECK = 9,
|
|
+ POLICY_CHECK = 10,
|
|
+ MAX_CHECK = 11,
|
|
+};
|
|
+
|
|
+struct crypto_wait {
|
|
+ struct completion completion;
|
|
+ int err;
|
|
+};
|
|
+
|
|
+struct hash_alg_common {
|
|
+ unsigned int digestsize;
|
|
+ unsigned int statesize;
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+struct ahash_request {
|
|
+ struct crypto_async_request base;
|
|
+ unsigned int nbytes;
|
|
+ struct scatterlist *src;
|
|
+ u8 *result;
|
|
+ void *priv;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct crypto_ahash {
|
|
+ int (*init)(struct ahash_request *);
|
|
+ int (*update)(struct ahash_request *);
|
|
+ int (*final)(struct ahash_request *);
|
|
+ int (*finup)(struct ahash_request *);
|
|
+ int (*digest)(struct ahash_request *);
|
|
+ int (*export)(struct ahash_request *, void *);
|
|
+ int (*import)(struct ahash_request *, const void *);
|
|
+ int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int);
|
|
+ unsigned int reqsize;
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+enum tpm_pcrs {
|
|
+ TPM_PCR0 = 0,
|
|
+ TPM_PCR8 = 8,
|
|
+};
|
|
+
|
|
+enum lsm_rule_types {
|
|
+ LSM_OBJ_USER = 0,
|
|
+ LSM_OBJ_ROLE = 1,
|
|
+ LSM_OBJ_TYPE = 2,
|
|
+ LSM_SUBJ_USER = 3,
|
|
+ LSM_SUBJ_ROLE = 4,
|
|
+ LSM_SUBJ_TYPE = 5,
|
|
+};
|
|
+
|
|
+enum policy_types {
|
|
+ ORIGINAL_TCB = 1,
|
|
+ DEFAULT_TCB = 2,
|
|
+};
|
|
+
|
|
+struct ima_rule_entry {
|
|
+ struct list_head list;
|
|
+ int action;
|
|
+ unsigned int flags;
|
|
+ enum ima_hooks func;
|
|
+ int mask;
|
|
+ long unsigned int fsmagic;
|
|
+ uuid_t fsuuid;
|
|
+ kuid_t uid;
|
|
+ kuid_t fowner;
|
|
+ bool (*uid_op)(kuid_t, kuid_t);
|
|
+ bool (*fowner_op)(kuid_t, kuid_t);
|
|
+ int pcr;
|
|
+ struct {
|
|
+ void *rule;
|
|
+ void *args_p;
|
|
+ int type;
|
|
+ } lsm[6];
|
|
+ char *fsname;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ Opt_err___10 = -1,
|
|
+ Opt_measure = 1,
|
|
+ Opt_dont_measure = 2,
|
|
+ Opt_appraise = 3,
|
|
+ Opt_dont_appraise = 4,
|
|
+ Opt_audit = 5,
|
|
+ Opt_hash___2 = 6,
|
|
+ Opt_dont_hash = 7,
|
|
+ Opt_obj_user = 8,
|
|
+ Opt_obj_role = 9,
|
|
+ Opt_obj_type = 10,
|
|
+ Opt_subj_user = 11,
|
|
+ Opt_subj_role = 12,
|
|
+ Opt_subj_type = 13,
|
|
+ Opt_func = 14,
|
|
+ Opt_mask = 15,
|
|
+ Opt_fsmagic = 16,
|
|
+ Opt_fsname = 17,
|
|
+ Opt_fsuuid = 18,
|
|
+ Opt_uid_eq = 19,
|
|
+ Opt_euid_eq = 20,
|
|
+ Opt_fowner_eq = 21,
|
|
+ Opt_uid_gt = 22,
|
|
+ Opt_euid_gt = 23,
|
|
+ Opt_fowner_gt = 24,
|
|
+ Opt_uid_lt = 25,
|
|
+ Opt_euid_lt = 26,
|
|
+ Opt_fowner_lt = 27,
|
|
+ Opt_appraise_type = 28,
|
|
+ Opt_permit_directio = 29,
|
|
+ Opt_pcr = 30,
|
|
+};
|
|
+
|
|
+struct ima_kexec_hdr {
|
|
+ u16 version;
|
|
+ u16 _reserved0;
|
|
+ u32 _reserved1;
|
|
+ u64 buffer_size;
|
|
+ u64 count;
|
|
+};
|
|
+
|
|
+enum header_fields {
|
|
+ HDR_PCR = 0,
|
|
+ HDR_DIGEST = 1,
|
|
+ HDR_TEMPLATE_NAME = 2,
|
|
+ HDR_TEMPLATE_DATA = 3,
|
|
+ HDR__LAST = 4,
|
|
+};
|
|
+
|
|
+enum data_formats {
|
|
+ DATA_FMT_DIGEST = 0,
|
|
+ DATA_FMT_DIGEST_WITH_ALGO = 1,
|
|
+ DATA_FMT_STRING = 2,
|
|
+ DATA_FMT_HEX = 3,
|
|
+};
|
|
+
|
|
+struct xattr_list {
|
|
+ struct list_head list;
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+struct evm_digest {
|
|
+ struct ima_digest_data hdr;
|
|
+ char digest[64];
|
|
+};
|
|
+
|
|
+struct h_misc {
|
|
+ long unsigned int ino;
|
|
+ __u32 generation;
|
|
+ uid_t uid;
|
|
+ gid_t gid;
|
|
+ umode_t mode;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CRYPTO_MSG_ALG_REQUEST = 0,
|
|
+ CRYPTO_MSG_ALG_REGISTER = 1,
|
|
+};
|
|
+
|
|
+struct crypto_larval {
|
|
+ struct crypto_alg alg;
|
|
+ struct crypto_alg *adult;
|
|
+ struct completion completion;
|
|
+ u32 mask;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CRYPTOA_UNSPEC = 0,
|
|
+ CRYPTOA_ALG = 1,
|
|
+ CRYPTOA_TYPE = 2,
|
|
+ CRYPTOA_U32 = 3,
|
|
+ __CRYPTOA_MAX = 4,
|
|
+};
|
|
+
|
|
+struct crypto_attr_alg {
|
|
+ char name[128];
|
|
+};
|
|
+
|
|
+struct crypto_attr_u32 {
|
|
+ u32 num;
|
|
+};
|
|
+
|
|
+struct rtattr {
|
|
+ short unsigned int rta_len;
|
|
+ short unsigned int rta_type;
|
|
+};
|
|
+
|
|
+struct crypto_queue {
|
|
+ struct list_head list;
|
|
+ struct list_head *backlog;
|
|
+ unsigned int qlen;
|
|
+ unsigned int max_qlen;
|
|
+};
|
|
+
|
|
+struct aead_instance {
|
|
+ void (*free)(struct aead_instance *);
|
|
+ union {
|
|
+ struct {
|
|
+ char head[72];
|
|
+ struct crypto_instance base;
|
|
+ } s;
|
|
+ struct aead_alg alg;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct crypto_aead_spawn {
|
|
+ struct crypto_spawn base;
|
|
+};
|
|
+
|
|
+struct aead_geniv_ctx {
|
|
+ spinlock_t lock;
|
|
+ struct crypto_aead *child;
|
|
+ struct crypto_skcipher *sknull;
|
|
+ u8 salt[0];
|
|
+};
|
|
+
|
|
+struct crypto_rng;
|
|
+
|
|
+struct rng_alg {
|
|
+ int (*generate)(struct crypto_rng *, const u8 *, unsigned int, u8 *, unsigned int);
|
|
+ int (*seed)(struct crypto_rng *, const u8 *, unsigned int);
|
|
+ void (*set_ent)(struct crypto_rng *, const u8 *, unsigned int);
|
|
+ unsigned int seedsize;
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+struct crypto_rng {
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+enum crypto_attr_type_t {
|
|
+ CRYPTOCFGA_UNSPEC = 0,
|
|
+ CRYPTOCFGA_PRIORITY_VAL = 1,
|
|
+ CRYPTOCFGA_REPORT_LARVAL = 2,
|
|
+ CRYPTOCFGA_REPORT_HASH = 3,
|
|
+ CRYPTOCFGA_REPORT_BLKCIPHER = 4,
|
|
+ CRYPTOCFGA_REPORT_AEAD = 5,
|
|
+ CRYPTOCFGA_REPORT_COMPRESS = 6,
|
|
+ CRYPTOCFGA_REPORT_RNG = 7,
|
|
+ CRYPTOCFGA_REPORT_CIPHER = 8,
|
|
+ CRYPTOCFGA_REPORT_AKCIPHER = 9,
|
|
+ CRYPTOCFGA_REPORT_KPP = 10,
|
|
+ CRYPTOCFGA_REPORT_ACOMP = 11,
|
|
+ __CRYPTOCFGA_MAX = 12,
|
|
+};
|
|
+
|
|
+struct crypto_report_aead {
|
|
+ char type[64];
|
|
+ char geniv[64];
|
|
+ unsigned int blocksize;
|
|
+ unsigned int maxauthsize;
|
|
+ unsigned int ivsize;
|
|
+};
|
|
+
|
|
+struct ablkcipher_walk {
|
|
+ struct {
|
|
+ struct page *page;
|
|
+ unsigned int offset;
|
|
+ } src;
|
|
+ struct {
|
|
+ struct page *page;
|
|
+ unsigned int offset;
|
|
+ } dst;
|
|
+ struct scatter_walk in;
|
|
+ unsigned int nbytes;
|
|
+ struct scatter_walk out;
|
|
+ unsigned int total;
|
|
+ struct list_head buffers;
|
|
+ u8 *iv_buffer;
|
|
+ u8 *iv;
|
|
+ int flags;
|
|
+ unsigned int blocksize;
|
|
+};
|
|
+
|
|
+struct crypto_report_blkcipher {
|
|
+ char type[64];
|
|
+ char geniv[64];
|
|
+ unsigned int blocksize;
|
|
+ unsigned int min_keysize;
|
|
+ unsigned int max_keysize;
|
|
+ unsigned int ivsize;
|
|
+};
|
|
+
|
|
+struct ablkcipher_buffer {
|
|
+ struct list_head entry;
|
|
+ struct scatter_walk dst;
|
|
+ unsigned int len;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ABLKCIPHER_WALK_SLOW = 1,
|
|
+};
|
|
+
|
|
+struct blkcipher_walk {
|
|
+ union {
|
|
+ struct {
|
|
+ struct page *page;
|
|
+ long unsigned int offset;
|
|
+ } phys;
|
|
+ struct {
|
|
+ u8 *page;
|
|
+ u8 *addr;
|
|
+ } virt;
|
|
+ } src;
|
|
+ union {
|
|
+ struct {
|
|
+ struct page *page;
|
|
+ long unsigned int offset;
|
|
+ } phys;
|
|
+ struct {
|
|
+ u8 *page;
|
|
+ u8 *addr;
|
|
+ } virt;
|
|
+ } dst;
|
|
+ struct scatter_walk in;
|
|
+ unsigned int nbytes;
|
|
+ struct scatter_walk out;
|
|
+ unsigned int total;
|
|
+ void *page;
|
|
+ u8 *buffer;
|
|
+ u8 *iv;
|
|
+ unsigned int ivsize;
|
|
+ int flags;
|
|
+ unsigned int walk_blocksize;
|
|
+ unsigned int cipher_blocksize;
|
|
+ unsigned int alignmask;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BLKCIPHER_WALK_PHYS = 1,
|
|
+ BLKCIPHER_WALK_SLOW = 2,
|
|
+ BLKCIPHER_WALK_COPY = 4,
|
|
+ BLKCIPHER_WALK_DIFF = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SKCIPHER_WALK_PHYS = 1,
|
|
+ SKCIPHER_WALK_SLOW = 2,
|
|
+ SKCIPHER_WALK_COPY = 4,
|
|
+ SKCIPHER_WALK_DIFF = 8,
|
|
+ SKCIPHER_WALK_SLEEP = 16,
|
|
+};
|
|
+
|
|
+struct skcipher_walk_buffer {
|
|
+ struct list_head entry;
|
|
+ struct scatter_walk dst;
|
|
+ unsigned int len;
|
|
+ u8 *data;
|
|
+ u8 buffer[0];
|
|
+};
|
|
+
|
|
+struct ahash_alg {
|
|
+ int (*init)(struct ahash_request *);
|
|
+ int (*update)(struct ahash_request *);
|
|
+ int (*final)(struct ahash_request *);
|
|
+ int (*finup)(struct ahash_request *);
|
|
+ int (*digest)(struct ahash_request *);
|
|
+ int (*export)(struct ahash_request *, void *);
|
|
+ int (*import)(struct ahash_request *, const void *);
|
|
+ int (*setkey)(struct crypto_ahash *, const u8 *, unsigned int);
|
|
+ struct hash_alg_common halg;
|
|
+};
|
|
+
|
|
+struct crypto_hash_walk {
|
|
+ char *data;
|
|
+ unsigned int offset;
|
|
+ unsigned int alignmask;
|
|
+ struct page *pg;
|
|
+ unsigned int entrylen;
|
|
+ unsigned int total;
|
|
+ struct scatterlist *sg;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct ahash_instance {
|
|
+ struct ahash_alg alg;
|
|
+};
|
|
+
|
|
+struct crypto_ahash_spawn {
|
|
+ struct crypto_spawn base;
|
|
+};
|
|
+
|
|
+struct crypto_report_hash {
|
|
+ char type[64];
|
|
+ unsigned int blocksize;
|
|
+ unsigned int digestsize;
|
|
+};
|
|
+
|
|
+struct ahash_request_priv {
|
|
+ crypto_completion_t complete;
|
|
+ void *data;
|
|
+ u8 *result;
|
|
+ u32 flags;
|
|
+ void *ubuf[0];
|
|
+};
|
|
+
|
|
+struct shash_instance {
|
|
+ struct shash_alg alg;
|
|
+};
|
|
+
|
|
+struct crypto_shash_spawn {
|
|
+ struct crypto_spawn base;
|
|
+};
|
|
+
|
|
+struct crypto_report_akcipher {
|
|
+ char type[64];
|
|
+};
|
|
+
|
|
+struct akcipher_request {
|
|
+ struct crypto_async_request base;
|
|
+ struct scatterlist *src;
|
|
+ struct scatterlist *dst;
|
|
+ unsigned int src_len;
|
|
+ unsigned int dst_len;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct crypto_akcipher {
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct akcipher_alg {
|
|
+ int (*sign)(struct akcipher_request *);
|
|
+ int (*verify)(struct akcipher_request *);
|
|
+ int (*encrypt)(struct akcipher_request *);
|
|
+ int (*decrypt)(struct akcipher_request *);
|
|
+ int (*set_pub_key)(struct crypto_akcipher *, const void *, unsigned int);
|
|
+ int (*set_priv_key)(struct crypto_akcipher *, const void *, unsigned int);
|
|
+ unsigned int (*max_size)(struct crypto_akcipher *);
|
|
+ int (*init)(struct crypto_akcipher *);
|
|
+ void (*exit)(struct crypto_akcipher *);
|
|
+ unsigned int reqsize;
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+struct akcipher_instance {
|
|
+ void (*free)(struct akcipher_instance *);
|
|
+ union {
|
|
+ struct {
|
|
+ char head[80];
|
|
+ struct crypto_instance base;
|
|
+ } s;
|
|
+ struct akcipher_alg alg;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct crypto_akcipher_spawn {
|
|
+ struct crypto_spawn base;
|
|
+};
|
|
+
|
|
+struct crypto_report_kpp {
|
|
+ char type[64];
|
|
+};
|
|
+
|
|
+struct kpp_request {
|
|
+ struct crypto_async_request base;
|
|
+ struct scatterlist *src;
|
|
+ struct scatterlist *dst;
|
|
+ unsigned int src_len;
|
|
+ unsigned int dst_len;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct crypto_kpp {
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct kpp_alg {
|
|
+ int (*set_secret)(struct crypto_kpp *, const void *, unsigned int);
|
|
+ int (*generate_public_key)(struct kpp_request *);
|
|
+ int (*compute_shared_secret)(struct kpp_request *);
|
|
+ unsigned int (*max_size)(struct crypto_kpp *);
|
|
+ int (*init)(struct crypto_kpp *);
|
|
+ void (*exit)(struct crypto_kpp *);
|
|
+ unsigned int reqsize;
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+enum asn1_class {
|
|
+ ASN1_UNIV = 0,
|
|
+ ASN1_APPL = 1,
|
|
+ ASN1_CONT = 2,
|
|
+ ASN1_PRIV = 3,
|
|
+};
|
|
+
|
|
+enum asn1_method {
|
|
+ ASN1_PRIM = 0,
|
|
+ ASN1_CONS = 1,
|
|
+};
|
|
+
|
|
+enum asn1_tag {
|
|
+ ASN1_EOC = 0,
|
|
+ ASN1_BOOL = 1,
|
|
+ ASN1_INT = 2,
|
|
+ ASN1_BTS = 3,
|
|
+ ASN1_OTS = 4,
|
|
+ ASN1_NULL = 5,
|
|
+ ASN1_OID = 6,
|
|
+ ASN1_ODE = 7,
|
|
+ ASN1_EXT = 8,
|
|
+ ASN1_REAL = 9,
|
|
+ ASN1_ENUM = 10,
|
|
+ ASN1_EPDV = 11,
|
|
+ ASN1_UTF8STR = 12,
|
|
+ ASN1_RELOID = 13,
|
|
+ ASN1_SEQ = 16,
|
|
+ ASN1_SET = 17,
|
|
+ ASN1_NUMSTR = 18,
|
|
+ ASN1_PRNSTR = 19,
|
|
+ ASN1_TEXSTR = 20,
|
|
+ ASN1_VIDSTR = 21,
|
|
+ ASN1_IA5STR = 22,
|
|
+ ASN1_UNITIM = 23,
|
|
+ ASN1_GENTIM = 24,
|
|
+ ASN1_GRASTR = 25,
|
|
+ ASN1_VISSTR = 26,
|
|
+ ASN1_GENSTR = 27,
|
|
+ ASN1_UNISTR = 28,
|
|
+ ASN1_CHRSTR = 29,
|
|
+ ASN1_BMPSTR = 30,
|
|
+ ASN1_LONG_TAG = 31,
|
|
+};
|
|
+
|
|
+typedef int (*asn1_action_t)(void *, size_t, unsigned char, const void *, size_t);
|
|
+
|
|
+struct asn1_decoder {
|
|
+ const unsigned char *machine;
|
|
+ size_t machlen;
|
|
+ const asn1_action_t *actions;
|
|
+};
|
|
+
|
|
+enum asn1_opcode {
|
|
+ ASN1_OP_MATCH = 0,
|
|
+ ASN1_OP_MATCH_OR_SKIP = 1,
|
|
+ ASN1_OP_MATCH_ACT = 2,
|
|
+ ASN1_OP_MATCH_ACT_OR_SKIP = 3,
|
|
+ ASN1_OP_MATCH_JUMP = 4,
|
|
+ ASN1_OP_MATCH_JUMP_OR_SKIP = 5,
|
|
+ ASN1_OP_MATCH_ANY = 8,
|
|
+ ASN1_OP_MATCH_ANY_OR_SKIP = 9,
|
|
+ ASN1_OP_MATCH_ANY_ACT = 10,
|
|
+ ASN1_OP_MATCH_ANY_ACT_OR_SKIP = 11,
|
|
+ ASN1_OP_COND_MATCH_OR_SKIP = 17,
|
|
+ ASN1_OP_COND_MATCH_ACT_OR_SKIP = 19,
|
|
+ ASN1_OP_COND_MATCH_JUMP_OR_SKIP = 21,
|
|
+ ASN1_OP_COND_MATCH_ANY = 24,
|
|
+ ASN1_OP_COND_MATCH_ANY_OR_SKIP = 25,
|
|
+ ASN1_OP_COND_MATCH_ANY_ACT = 26,
|
|
+ ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP = 27,
|
|
+ ASN1_OP_COND_FAIL = 28,
|
|
+ ASN1_OP_COMPLETE = 29,
|
|
+ ASN1_OP_ACT = 30,
|
|
+ ASN1_OP_MAYBE_ACT = 31,
|
|
+ ASN1_OP_END_SEQ = 32,
|
|
+ ASN1_OP_END_SET = 33,
|
|
+ ASN1_OP_END_SEQ_OF = 34,
|
|
+ ASN1_OP_END_SET_OF = 35,
|
|
+ ASN1_OP_END_SEQ_ACT = 36,
|
|
+ ASN1_OP_END_SET_ACT = 37,
|
|
+ ASN1_OP_END_SEQ_OF_ACT = 38,
|
|
+ ASN1_OP_END_SET_OF_ACT = 39,
|
|
+ ASN1_OP_RETURN = 40,
|
|
+ ASN1_OP__NR = 41,
|
|
+};
|
|
+
|
|
+enum rsapubkey_actions {
|
|
+ ACT_rsa_get_e = 0,
|
|
+ ACT_rsa_get_n = 1,
|
|
+ NR__rsapubkey_actions = 2,
|
|
+};
|
|
+
|
|
+enum rsaprivkey_actions {
|
|
+ ACT_rsa_get_d = 0,
|
|
+ ACT_rsa_get_dp = 1,
|
|
+ ACT_rsa_get_dq = 2,
|
|
+ ACT_rsa_get_e___2 = 3,
|
|
+ ACT_rsa_get_n___2 = 4,
|
|
+ ACT_rsa_get_p = 5,
|
|
+ ACT_rsa_get_q = 6,
|
|
+ ACT_rsa_get_qinv = 7,
|
|
+ NR__rsaprivkey_actions = 8,
|
|
+};
|
|
+
|
|
+typedef long unsigned int mpi_limb_t;
|
|
+
|
|
+struct gcry_mpi {
|
|
+ int alloced;
|
|
+ int nlimbs;
|
|
+ int nbits;
|
|
+ int sign;
|
|
+ unsigned int flags;
|
|
+ mpi_limb_t *d;
|
|
+};
|
|
+
|
|
+typedef struct gcry_mpi *MPI;
|
|
+
|
|
+struct rsa_key {
|
|
+ const u8 *n;
|
|
+ const u8 *e;
|
|
+ const u8 *d;
|
|
+ const u8 *p;
|
|
+ const u8 *q;
|
|
+ const u8 *dp;
|
|
+ const u8 *dq;
|
|
+ const u8 *qinv;
|
|
+ size_t n_sz;
|
|
+ size_t e_sz;
|
|
+ size_t d_sz;
|
|
+ size_t p_sz;
|
|
+ size_t q_sz;
|
|
+ size_t dp_sz;
|
|
+ size_t dq_sz;
|
|
+ size_t qinv_sz;
|
|
+};
|
|
+
|
|
+struct rsa_mpi_key {
|
|
+ MPI n;
|
|
+ MPI e;
|
|
+ MPI d;
|
|
+};
|
|
+
|
|
+struct crypto_template___2;
|
|
+
|
|
+struct asn1_decoder___2;
|
|
+
|
|
+struct rsa_asn1_template {
|
|
+ const char *name;
|
|
+ const u8 *data;
|
|
+ size_t size;
|
|
+};
|
|
+
|
|
+struct pkcs1pad_ctx {
|
|
+ struct crypto_akcipher *child;
|
|
+ unsigned int key_size;
|
|
+};
|
|
+
|
|
+struct pkcs1pad_inst_ctx {
|
|
+ struct crypto_akcipher_spawn spawn;
|
|
+ const struct rsa_asn1_template *digest_info;
|
|
+};
|
|
+
|
|
+struct pkcs1pad_request {
|
|
+ struct scatterlist in_sg[2];
|
|
+ struct scatterlist out_sg[1];
|
|
+ uint8_t *in_buf;
|
|
+ uint8_t *out_buf;
|
|
+ struct akcipher_request child_req;
|
|
+};
|
|
+
|
|
+struct crypto_report_acomp {
|
|
+ char type[64];
|
|
+};
|
|
+
|
|
+struct acomp_req {
|
|
+ struct crypto_async_request base;
|
|
+ struct scatterlist *src;
|
|
+ struct scatterlist *dst;
|
|
+ unsigned int slen;
|
|
+ unsigned int dlen;
|
|
+ u32 flags;
|
|
+ void *__ctx[0];
|
|
+};
|
|
+
|
|
+struct crypto_acomp {
|
|
+ int (*compress)(struct acomp_req *);
|
|
+ int (*decompress)(struct acomp_req *);
|
|
+ void (*dst_free)(struct scatterlist *);
|
|
+ unsigned int reqsize;
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct acomp_alg {
|
|
+ int (*compress)(struct acomp_req *);
|
|
+ int (*decompress)(struct acomp_req *);
|
|
+ void (*dst_free)(struct scatterlist *);
|
|
+ int (*init)(struct crypto_acomp *);
|
|
+ void (*exit)(struct crypto_acomp *);
|
|
+ unsigned int reqsize;
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+struct crypto_report_comp {
|
|
+ char type[64];
|
|
+};
|
|
+
|
|
+struct crypto_scomp {
|
|
+ struct crypto_tfm base;
|
|
+};
|
|
+
|
|
+struct scomp_alg {
|
|
+ void * (*alloc_ctx)(struct crypto_scomp *);
|
|
+ void (*free_ctx)(struct crypto_scomp *, void *);
|
|
+ int (*compress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *);
|
|
+ int (*decompress)(struct crypto_scomp *, const u8 *, unsigned int, u8 *, unsigned int *, void *);
|
|
+ struct crypto_alg base;
|
|
+};
|
|
+
|
|
+struct cryptomgr_param {
|
|
+ struct rtattr *tb[34];
|
|
+ struct {
|
|
+ struct rtattr attr;
|
|
+ struct crypto_attr_type data;
|
|
+ } type;
|
|
+ union {
|
|
+ struct rtattr attr;
|
|
+ struct {
|
|
+ struct rtattr attr;
|
|
+ struct crypto_attr_alg data;
|
|
+ } alg;
|
|
+ struct {
|
|
+ struct rtattr attr;
|
|
+ struct crypto_attr_u32 data;
|
|
+ } nu32;
|
|
+ } attrs[32];
|
|
+ char template[128];
|
|
+ struct crypto_larval *larval;
|
|
+ u32 otype;
|
|
+ u32 omask;
|
|
+};
|
|
+
|
|
+struct crypto_test_param {
|
|
+ char driver[128];
|
|
+ char alg[128];
|
|
+ u32 type;
|
|
+};
|
|
+
|
|
+struct drbg_string {
|
|
+ const unsigned char *buf;
|
|
+ size_t len;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct drbg_test_data {
|
|
+ struct drbg_string *testentropy;
|
|
+};
|
|
+
|
|
+struct hash_testvec {
|
|
+ const char *key;
|
|
+ const char *plaintext;
|
|
+ const char *digest;
|
|
+ unsigned char tap[8];
|
|
+ short unsigned int psize;
|
|
+ unsigned char np;
|
|
+ unsigned char ksize;
|
|
+};
|
|
+
|
|
+struct cipher_testvec {
|
|
+ const char *key;
|
|
+ const char *iv;
|
|
+ const char *ptext;
|
|
+ const char *ctext;
|
|
+ short unsigned int tap[8];
|
|
+ int np;
|
|
+ unsigned char also_non_np;
|
|
+ bool fail;
|
|
+ unsigned char wk;
|
|
+ unsigned char klen;
|
|
+ short unsigned int len;
|
|
+ bool fips_skip;
|
|
+ bool generates_iv;
|
|
+};
|
|
+
|
|
+struct aead_testvec {
|
|
+ const char *key;
|
|
+ const char *iv;
|
|
+ const char *input;
|
|
+ const char *assoc;
|
|
+ const char *result;
|
|
+ unsigned char tap[8];
|
|
+ unsigned char atap[8];
|
|
+ int np;
|
|
+ int anp;
|
|
+ bool fail;
|
|
+ unsigned char novrfy;
|
|
+ unsigned char wk;
|
|
+ unsigned char klen;
|
|
+ short unsigned int ilen;
|
|
+ short unsigned int alen;
|
|
+ short unsigned int rlen;
|
|
+};
|
|
+
|
|
+struct cprng_testvec {
|
|
+ const char *key;
|
|
+ const char *dt;
|
|
+ const char *v;
|
|
+ const char *result;
|
|
+ unsigned char klen;
|
|
+ short unsigned int dtlen;
|
|
+ short unsigned int vlen;
|
|
+ short unsigned int rlen;
|
|
+ short unsigned int loops;
|
|
+};
|
|
+
|
|
+struct drbg_testvec {
|
|
+ const unsigned char *entropy;
|
|
+ size_t entropylen;
|
|
+ const unsigned char *entpra;
|
|
+ const unsigned char *entprb;
|
|
+ size_t entprlen;
|
|
+ const unsigned char *addtla;
|
|
+ const unsigned char *addtlb;
|
|
+ size_t addtllen;
|
|
+ const unsigned char *pers;
|
|
+ size_t perslen;
|
|
+ const unsigned char *expected;
|
|
+ size_t expectedlen;
|
|
+};
|
|
+
|
|
+struct akcipher_testvec {
|
|
+ const unsigned char *key;
|
|
+ const unsigned char *m;
|
|
+ const unsigned char *c;
|
|
+ unsigned int key_len;
|
|
+ unsigned int m_size;
|
|
+ unsigned int c_size;
|
|
+ bool public_key_vec;
|
|
+ bool siggen_sigver_test;
|
|
+};
|
|
+
|
|
+struct kpp_testvec {
|
|
+ const unsigned char *secret;
|
|
+ const unsigned char *b_secret;
|
|
+ const unsigned char *b_public;
|
|
+ const unsigned char *expected_a_public;
|
|
+ const unsigned char *expected_ss;
|
|
+ short unsigned int secret_size;
|
|
+ short unsigned int b_secret_size;
|
|
+ short unsigned int b_public_size;
|
|
+ short unsigned int expected_a_public_size;
|
|
+ short unsigned int expected_ss_size;
|
|
+ bool genkey;
|
|
+};
|
|
+
|
|
+struct comp_testvec {
|
|
+ int inlen;
|
|
+ int outlen;
|
|
+ char input[512];
|
|
+ char output[512];
|
|
+};
|
|
+
|
|
+struct aead_test_suite {
|
|
+ struct {
|
|
+ const struct aead_testvec *vecs;
|
|
+ unsigned int count;
|
|
+ } enc;
|
|
+ struct {
|
|
+ const struct aead_testvec *vecs;
|
|
+ unsigned int count;
|
|
+ } dec;
|
|
+};
|
|
+
|
|
+struct cipher_test_suite {
|
|
+ const struct cipher_testvec *vecs;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct comp_test_suite {
|
|
+ struct {
|
|
+ const struct comp_testvec *vecs;
|
|
+ unsigned int count;
|
|
+ } comp;
|
|
+ struct {
|
|
+ const struct comp_testvec *vecs;
|
|
+ unsigned int count;
|
|
+ } decomp;
|
|
+};
|
|
+
|
|
+struct hash_test_suite {
|
|
+ const struct hash_testvec *vecs;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct cprng_test_suite {
|
|
+ const struct cprng_testvec *vecs;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct drbg_test_suite {
|
|
+ const struct drbg_testvec *vecs;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct akcipher_test_suite {
|
|
+ const struct akcipher_testvec *vecs;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct kpp_test_suite {
|
|
+ const struct kpp_testvec *vecs;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct alg_test_desc {
|
|
+ const char *alg;
|
|
+ int (*test)(const struct alg_test_desc *, const char *, u32, u32);
|
|
+ int fips_allowed;
|
|
+ union {
|
|
+ struct aead_test_suite aead;
|
|
+ struct cipher_test_suite cipher;
|
|
+ struct comp_test_suite comp;
|
|
+ struct hash_test_suite hash;
|
|
+ struct cprng_test_suite cprng;
|
|
+ struct drbg_test_suite drbg;
|
|
+ struct akcipher_test_suite akcipher;
|
|
+ struct kpp_test_suite kpp;
|
|
+ } suite;
|
|
+};
|
|
+
|
|
+enum hash_test {
|
|
+ HASH_TEST_DIGEST = 0,
|
|
+ HASH_TEST_FINAL = 1,
|
|
+ HASH_TEST_FINUP = 2,
|
|
+};
|
|
+
|
|
+struct hmac_ctx {
|
|
+ struct crypto_shash *hash;
|
|
+};
|
|
+
|
|
+struct md5_state {
|
|
+ u32 hash[4];
|
|
+ u32 block[16];
|
|
+ u64 byte_count;
|
|
+};
|
|
+
|
|
+struct gf128mul_4k {
|
|
+ be128 t[256];
|
|
+};
|
|
+
|
|
+struct gf128mul_64k {
|
|
+ struct gf128mul_4k *t[16];
|
|
+};
|
|
+
|
|
+struct crypto_ecb_ctx {
|
|
+ struct crypto_cipher *child;
|
|
+};
|
|
+
|
|
+struct crypto_cbc_ctx {
|
|
+ struct crypto_cipher *child;
|
|
+};
|
|
+
|
|
+struct crypto_cfb_ctx {
|
|
+ struct crypto_cipher *child;
|
|
+};
|
|
+
|
|
+struct crypto_ctr_ctx {
|
|
+ struct crypto_cipher *child;
|
|
+};
|
|
+
|
|
+struct crypto_rfc3686_ctx {
|
|
+ struct crypto_skcipher *child;
|
|
+ u8 nonce[4];
|
|
+};
|
|
+
|
|
+struct crypto_rfc3686_req_ctx {
|
|
+ u8 iv[16];
|
|
+ struct skcipher_request subreq;
|
|
+};
|
|
+
|
|
+struct gcm_instance_ctx {
|
|
+ struct crypto_skcipher_spawn ctr;
|
|
+ struct crypto_ahash_spawn ghash;
|
|
+};
|
|
+
|
|
+struct crypto_gcm_ctx {
|
|
+ struct crypto_skcipher *ctr;
|
|
+ struct crypto_ahash *ghash;
|
|
+};
|
|
+
|
|
+struct crypto_rfc4106_ctx {
|
|
+ struct crypto_aead *child;
|
|
+ u8 nonce[4];
|
|
+};
|
|
+
|
|
+struct crypto_rfc4106_req_ctx {
|
|
+ struct scatterlist src[3];
|
|
+ struct scatterlist dst[3];
|
|
+ struct aead_request subreq;
|
|
+};
|
|
+
|
|
+struct crypto_rfc4543_instance_ctx {
|
|
+ struct crypto_aead_spawn aead;
|
|
+};
|
|
+
|
|
+struct crypto_rfc4543_ctx {
|
|
+ struct crypto_aead *child;
|
|
+ struct crypto_skcipher *null;
|
|
+ u8 nonce[4];
|
|
+};
|
|
+
|
|
+struct crypto_rfc4543_req_ctx {
|
|
+ struct aead_request subreq;
|
|
+};
|
|
+
|
|
+struct crypto_gcm_ghash_ctx {
|
|
+ unsigned int cryptlen;
|
|
+ struct scatterlist *src;
|
|
+ int (*complete)(struct aead_request *, u32);
|
|
+};
|
|
+
|
|
+struct crypto_gcm_req_priv_ctx {
|
|
+ u8 iv[16];
|
|
+ u8 auth_tag[16];
|
|
+ u8 iauth_tag[16];
|
|
+ struct scatterlist src[3];
|
|
+ struct scatterlist dst[3];
|
|
+ struct scatterlist sg;
|
|
+ struct crypto_gcm_ghash_ctx ghash_ctx;
|
|
+ union {
|
|
+ struct ahash_request ahreq;
|
|
+ struct skcipher_request skreq;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct cryptd_ablkcipher {
|
|
+ struct crypto_ablkcipher base;
|
|
+};
|
|
+
|
|
+struct cryptd_skcipher {
|
|
+ struct crypto_skcipher base;
|
|
+};
|
|
+
|
|
+struct cryptd_ahash {
|
|
+ struct crypto_ahash base;
|
|
+};
|
|
+
|
|
+struct cryptd_cpu_queue {
|
|
+ struct crypto_queue queue;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+struct cryptd_queue {
|
|
+ struct cryptd_cpu_queue *cpu_queue;
|
|
+};
|
|
+
|
|
+struct cryptd_instance_ctx {
|
|
+ struct crypto_spawn spawn;
|
|
+ struct cryptd_queue *queue;
|
|
+};
|
|
+
|
|
+struct skcipherd_instance_ctx {
|
|
+ struct crypto_skcipher_spawn spawn;
|
|
+ struct cryptd_queue *queue;
|
|
+};
|
|
+
|
|
+struct hashd_instance_ctx {
|
|
+ struct crypto_shash_spawn spawn;
|
|
+ struct cryptd_queue *queue;
|
|
+};
|
|
+
|
|
+struct aead_instance_ctx {
|
|
+ struct crypto_aead_spawn aead_spawn;
|
|
+ struct cryptd_queue *queue;
|
|
+};
|
|
+
|
|
+struct cryptd_blkcipher_ctx {
|
|
+ atomic_t refcnt;
|
|
+ struct crypto_blkcipher *child;
|
|
+};
|
|
+
|
|
+struct cryptd_blkcipher_request_ctx {
|
|
+ crypto_completion_t complete;
|
|
+};
|
|
+
|
|
+struct cryptd_skcipher_ctx {
|
|
+ atomic_t refcnt;
|
|
+ struct crypto_skcipher *child;
|
|
+};
|
|
+
|
|
+struct cryptd_skcipher_request_ctx {
|
|
+ crypto_completion_t complete;
|
|
+};
|
|
+
|
|
+struct cryptd_hash_ctx {
|
|
+ atomic_t refcnt;
|
|
+ struct crypto_shash *child;
|
|
+};
|
|
+
|
|
+struct cryptd_hash_request_ctx {
|
|
+ crypto_completion_t complete;
|
|
+ struct shash_desc desc;
|
|
+};
|
|
+
|
|
+struct cryptd_aead_ctx {
|
|
+ atomic_t refcnt;
|
|
+ struct crypto_aead *child;
|
|
+};
|
|
+
|
|
+struct cryptd_aead_request_ctx {
|
|
+ crypto_completion_t complete;
|
|
+};
|
|
+
|
|
+typedef unsigned char Byte;
|
|
+
|
|
+typedef long unsigned int uLong;
|
|
+
|
|
+struct internal_state;
|
|
+
|
|
+struct z_stream_s {
|
|
+ const Byte *next_in;
|
|
+ uLong avail_in;
|
|
+ uLong total_in;
|
|
+ Byte *next_out;
|
|
+ uLong avail_out;
|
|
+ uLong total_out;
|
|
+ char *msg;
|
|
+ struct internal_state *state;
|
|
+ void *workspace;
|
|
+ int data_type;
|
|
+ uLong adler;
|
|
+ uLong reserved;
|
|
+};
|
|
+
|
|
+struct internal_state {
|
|
+ int dummy;
|
|
+};
|
|
+
|
|
+struct deflate_ctx {
|
|
+ struct z_stream_s comp_stream;
|
|
+ struct z_stream_s decomp_stream;
|
|
+};
|
|
+
|
|
+struct chksum_ctx {
|
|
+ u32 key;
|
|
+};
|
|
+
|
|
+struct chksum_desc_ctx {
|
|
+ u32 crc;
|
|
+};
|
|
+
|
|
+struct chksum_desc_ctx___2 {
|
|
+ __u16 crc;
|
|
+};
|
|
+
|
|
+struct lzo_ctx {
|
|
+ void *lzo_comp_mem;
|
|
+};
|
|
+
|
|
+struct crypto_report_rng {
|
|
+ char type[64];
|
|
+ unsigned int seedsize;
|
|
+};
|
|
+
|
|
+struct random_ready_callback {
|
|
+ struct list_head list;
|
|
+ void (*func)(struct random_ready_callback *);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+typedef uint32_t drbg_flag_t;
|
|
+
|
|
+struct drbg_core {
|
|
+ drbg_flag_t flags;
|
|
+ __u8 statelen;
|
|
+ __u8 blocklen_bytes;
|
|
+ char cra_name[128];
|
|
+ char backend_cra_name[128];
|
|
+};
|
|
+
|
|
+struct drbg_state;
|
|
+
|
|
+struct drbg_state_ops {
|
|
+ int (*update)(struct drbg_state *, struct list_head *, int);
|
|
+ int (*generate)(struct drbg_state *, unsigned char *, unsigned int, struct list_head *);
|
|
+ int (*crypto_init)(struct drbg_state *);
|
|
+ int (*crypto_fini)(struct drbg_state *);
|
|
+};
|
|
+
|
|
+struct drbg_state {
|
|
+ struct mutex drbg_mutex;
|
|
+ unsigned char *V;
|
|
+ unsigned char *Vbuf;
|
|
+ unsigned char *C;
|
|
+ unsigned char *Cbuf;
|
|
+ size_t reseed_ctr;
|
|
+ size_t reseed_threshold;
|
|
+ unsigned char *scratchpad;
|
|
+ unsigned char *scratchpadbuf;
|
|
+ void *priv_data;
|
|
+ struct crypto_skcipher *ctr_handle;
|
|
+ struct skcipher_request *ctr_req;
|
|
+ __u8 *outscratchpadbuf;
|
|
+ __u8 *outscratchpad;
|
|
+ struct crypto_wait ctr_wait;
|
|
+ struct scatterlist sg_in;
|
|
+ struct scatterlist sg_out;
|
|
+ bool seeded;
|
|
+ bool pr;
|
|
+ struct work_struct seed_work;
|
|
+ struct crypto_rng *jent;
|
|
+ const struct drbg_state_ops *d_ops;
|
|
+ const struct drbg_core *core;
|
|
+ struct drbg_string test_data;
|
|
+ struct random_ready_callback random_ready;
|
|
+};
|
|
+
|
|
+enum drbg_prefixes {
|
|
+ DRBG_PREFIX0 = 0,
|
|
+ DRBG_PREFIX1 = 1,
|
|
+ DRBG_PREFIX2 = 2,
|
|
+ DRBG_PREFIX3 = 3,
|
|
+};
|
|
+
|
|
+struct s {
|
|
+ __be32 conv;
|
|
+};
|
|
+
|
|
+struct rand_data {
|
|
+ __u64 data;
|
|
+ __u64 old_data;
|
|
+ __u64 prev_time;
|
|
+ __u64 last_delta;
|
|
+ __s64 last_delta2;
|
|
+ unsigned int stuck: 1;
|
|
+ unsigned int osr;
|
|
+ unsigned int stir: 1;
|
|
+ unsigned int disable_unbias: 1;
|
|
+ unsigned char *mem;
|
|
+ unsigned int memlocation;
|
|
+ unsigned int memblocks;
|
|
+ unsigned int memblocksize;
|
|
+ unsigned int memaccessloops;
|
|
+};
|
|
+
|
|
+union c {
|
|
+ __u64 u64;
|
|
+ __u32 u32[2];
|
|
+};
|
|
+
|
|
+struct rand_data___2;
|
|
+
|
|
+struct jitterentropy {
|
|
+ spinlock_t jent_lock;
|
|
+ struct rand_data___2 *entropy_collector;
|
|
+};
|
|
+
|
|
+struct ghash_ctx {
|
|
+ struct gf128mul_4k *gf128;
|
|
+};
|
|
+
|
|
+struct ghash_desc_ctx {
|
|
+ u8 buffer[16];
|
|
+ u32 bytes;
|
|
+};
|
|
+
|
|
+struct sockaddr_alg {
|
|
+ __u16 salg_family;
|
|
+ __u8 salg_type[14];
|
|
+ __u32 salg_feat;
|
|
+ __u32 salg_mask;
|
|
+ __u8 salg_name[64];
|
|
+};
|
|
+
|
|
+struct af_alg_iv {
|
|
+ __u32 ivlen;
|
|
+ __u8 iv[0];
|
|
+};
|
|
+
|
|
+struct cmsghdr {
|
|
+ __kernel_size_t cmsg_len;
|
|
+ int cmsg_level;
|
|
+ int cmsg_type;
|
|
+};
|
|
+
|
|
+struct net_proto_family {
|
|
+ int family;
|
|
+ int (*create)(struct net *, struct socket *, int, int);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SOCK_WAKE_IO = 0,
|
|
+ SOCK_WAKE_WAITD = 1,
|
|
+ SOCK_WAKE_SPACE = 2,
|
|
+ SOCK_WAKE_URG = 3,
|
|
+};
|
|
+
|
|
+struct af_alg_type;
|
|
+
|
|
+struct alg_sock {
|
|
+ struct sock sk;
|
|
+ struct sock *parent;
|
|
+ unsigned int refcnt;
|
|
+ unsigned int nokey_refcnt;
|
|
+ const struct af_alg_type *type;
|
|
+ void *private;
|
|
+};
|
|
+
|
|
+struct af_alg_type {
|
|
+ void * (*bind)(const char *, u32, u32);
|
|
+ void (*release)(void *);
|
|
+ int (*setkey)(void *, const u8 *, unsigned int);
|
|
+ int (*accept)(void *, struct sock *);
|
|
+ int (*accept_nokey)(void *, struct sock *);
|
|
+ int (*setauthsize)(void *, unsigned int);
|
|
+ struct proto_ops *ops;
|
|
+ struct proto_ops *ops_nokey;
|
|
+ struct module *owner;
|
|
+ char name[14];
|
|
+};
|
|
+
|
|
+struct af_alg_control {
|
|
+ struct af_alg_iv *iv;
|
|
+ int op;
|
|
+ unsigned int aead_assoclen;
|
|
+};
|
|
+
|
|
+struct af_alg_sgl {
|
|
+ struct scatterlist sg[17];
|
|
+ struct page *pages[16];
|
|
+ unsigned int npages;
|
|
+};
|
|
+
|
|
+struct af_alg_tsgl {
|
|
+ struct list_head list;
|
|
+ unsigned int cur;
|
|
+ struct scatterlist sg[0];
|
|
+};
|
|
+
|
|
+struct af_alg_rsgl {
|
|
+ struct af_alg_sgl sgl;
|
|
+ struct list_head list;
|
|
+ size_t sg_num_bytes;
|
|
+};
|
|
+
|
|
+struct af_alg_async_req {
|
|
+ struct kiocb *iocb;
|
|
+ struct sock *sk;
|
|
+ struct af_alg_rsgl first_rsgl;
|
|
+ struct af_alg_rsgl *last_rsgl;
|
|
+ struct list_head rsgl_list;
|
|
+ struct scatterlist *tsgl;
|
|
+ unsigned int tsgl_entries;
|
|
+ unsigned int outlen;
|
|
+ unsigned int areqlen;
|
|
+ union {
|
|
+ struct aead_request aead_req;
|
|
+ struct skcipher_request skcipher_req;
|
|
+ } cra_u;
|
|
+};
|
|
+
|
|
+struct af_alg_ctx {
|
|
+ struct list_head tsgl_list;
|
|
+ void *iv;
|
|
+ size_t aead_assoclen;
|
|
+ struct crypto_wait wait;
|
|
+ size_t used;
|
|
+ atomic_t rcvused;
|
|
+ bool more;
|
|
+ bool merge;
|
|
+ bool enc;
|
|
+ unsigned int len;
|
|
+};
|
|
+
|
|
+struct alg_type_list {
|
|
+ const struct af_alg_type *type;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct hash_ctx {
|
|
+ struct af_alg_sgl sgl;
|
|
+ u8 *result;
|
|
+ struct crypto_wait wait;
|
|
+ unsigned int len;
|
|
+ bool more;
|
|
+ struct ahash_request req;
|
|
+};
|
|
+
|
|
+struct rng_ctx {
|
|
+ unsigned int len;
|
|
+ struct crypto_rng *drng;
|
|
+};
|
|
+
|
|
+struct aead_tfm {
|
|
+ struct crypto_aead *aead;
|
|
+ struct crypto_skcipher *null_tfm;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ZSTD_error_no_error = 0,
|
|
+ ZSTD_error_GENERIC = 1,
|
|
+ ZSTD_error_prefix_unknown = 2,
|
|
+ ZSTD_error_version_unsupported = 3,
|
|
+ ZSTD_error_parameter_unknown = 4,
|
|
+ ZSTD_error_frameParameter_unsupported = 5,
|
|
+ ZSTD_error_frameParameter_unsupportedBy32bits = 6,
|
|
+ ZSTD_error_frameParameter_windowTooLarge = 7,
|
|
+ ZSTD_error_compressionParameter_unsupported = 8,
|
|
+ ZSTD_error_init_missing = 9,
|
|
+ ZSTD_error_memory_allocation = 10,
|
|
+ ZSTD_error_stage_wrong = 11,
|
|
+ ZSTD_error_dstSize_tooSmall = 12,
|
|
+ ZSTD_error_srcSize_wrong = 13,
|
|
+ ZSTD_error_corruption_detected = 14,
|
|
+ ZSTD_error_checksum_wrong = 15,
|
|
+ ZSTD_error_tableLog_tooLarge = 16,
|
|
+ ZSTD_error_maxSymbolValue_tooLarge = 17,
|
|
+ ZSTD_error_maxSymbolValue_tooSmall = 18,
|
|
+ ZSTD_error_dictionary_corrupted = 19,
|
|
+ ZSTD_error_dictionary_wrong = 20,
|
|
+ ZSTD_error_dictionaryCreation_failed = 21,
|
|
+ ZSTD_error_maxCode = 22,
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ ZSTD_fast = 0,
|
|
+ ZSTD_dfast = 1,
|
|
+ ZSTD_greedy = 2,
|
|
+ ZSTD_lazy = 3,
|
|
+ ZSTD_lazy2 = 4,
|
|
+ ZSTD_btlazy2 = 5,
|
|
+ ZSTD_btopt = 6,
|
|
+ ZSTD_btopt2 = 7,
|
|
+} ZSTD_strategy;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int windowLog;
|
|
+ unsigned int chainLog;
|
|
+ unsigned int hashLog;
|
|
+ unsigned int searchLog;
|
|
+ unsigned int searchLength;
|
|
+ unsigned int targetLength;
|
|
+ ZSTD_strategy strategy;
|
|
+} ZSTD_compressionParameters;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned int contentSizeFlag;
|
|
+ unsigned int checksumFlag;
|
|
+ unsigned int noDictIDFlag;
|
|
+} ZSTD_frameParameters;
|
|
+
|
|
+typedef struct {
|
|
+ ZSTD_compressionParameters cParams;
|
|
+ ZSTD_frameParameters fParams;
|
|
+} ZSTD_parameters;
|
|
+
|
|
+struct ZSTD_CCtx_s;
|
|
+
|
|
+typedef struct ZSTD_CCtx_s ZSTD_CCtx;
|
|
+
|
|
+struct ZSTD_DCtx_s;
|
|
+
|
|
+typedef struct ZSTD_DCtx_s ZSTD_DCtx;
|
|
+
|
|
+struct zstd_ctx {
|
|
+ ZSTD_CCtx *cctx;
|
|
+ ZSTD_DCtx *dctx;
|
|
+ void *cwksp;
|
|
+ void *dwksp;
|
|
+};
|
|
+
|
|
+enum asymmetric_payload_bits {
|
|
+ asym_crypto = 0,
|
|
+ asym_subtype = 1,
|
|
+ asym_key_ids = 2,
|
|
+ asym_auth = 3,
|
|
+};
|
|
+
|
|
+struct asymmetric_key_ids {
|
|
+ void *id[2];
|
|
+};
|
|
+
|
|
+struct asymmetric_key_subtype___2 {
|
|
+ struct module *owner;
|
|
+ const char *name;
|
|
+ short unsigned int name_len;
|
|
+ void (*describe)(const struct key *, struct seq_file *);
|
|
+ void (*destroy)(void *, void *);
|
|
+ int (*verify_signature)(const struct key *, const struct public_key_signature *);
|
|
+};
|
|
+
|
|
+struct asymmetric_key_parser {
|
|
+ struct list_head link;
|
|
+ struct module *owner;
|
|
+ const char *name;
|
|
+ int (*parse)(struct key_preparsed_payload *);
|
|
+};
|
|
+
|
|
+struct public_key {
|
|
+ void *key;
|
|
+ u32 keylen;
|
|
+ const char *id_type;
|
|
+ const char *pkey_algo;
|
|
+};
|
|
+
|
|
+enum x509_actions {
|
|
+ ACT_x509_extract_key_data = 0,
|
|
+ ACT_x509_extract_name_segment = 1,
|
|
+ ACT_x509_note_OID = 2,
|
|
+ ACT_x509_note_issuer = 3,
|
|
+ ACT_x509_note_not_after = 4,
|
|
+ ACT_x509_note_not_before = 5,
|
|
+ ACT_x509_note_pkey_algo = 6,
|
|
+ ACT_x509_note_serial = 7,
|
|
+ ACT_x509_note_signature = 8,
|
|
+ ACT_x509_note_subject = 9,
|
|
+ ACT_x509_note_tbs_certificate = 10,
|
|
+ ACT_x509_process_extension = 11,
|
|
+ NR__x509_actions = 12,
|
|
+};
|
|
+
|
|
+enum x509_akid_actions {
|
|
+ ACT_x509_akid_note_kid = 0,
|
|
+ ACT_x509_akid_note_name = 1,
|
|
+ ACT_x509_akid_note_serial = 2,
|
|
+ ACT_x509_extract_name_segment___2 = 3,
|
|
+ ACT_x509_note_OID___2 = 4,
|
|
+ NR__x509_akid_actions = 5,
|
|
+};
|
|
+
|
|
+enum OID {
|
|
+ OID_id_dsa_with_sha1 = 0,
|
|
+ OID_id_dsa = 1,
|
|
+ OID_id_ecdsa_with_sha1 = 2,
|
|
+ OID_id_ecPublicKey = 3,
|
|
+ OID_rsaEncryption = 4,
|
|
+ OID_md2WithRSAEncryption = 5,
|
|
+ OID_md3WithRSAEncryption = 6,
|
|
+ OID_md4WithRSAEncryption = 7,
|
|
+ OID_sha1WithRSAEncryption = 8,
|
|
+ OID_sha256WithRSAEncryption = 9,
|
|
+ OID_sha384WithRSAEncryption = 10,
|
|
+ OID_sha512WithRSAEncryption = 11,
|
|
+ OID_sha224WithRSAEncryption = 12,
|
|
+ OID_data = 13,
|
|
+ OID_signed_data = 14,
|
|
+ OID_email_address = 15,
|
|
+ OID_contentType = 16,
|
|
+ OID_messageDigest = 17,
|
|
+ OID_signingTime = 18,
|
|
+ OID_smimeCapabilites = 19,
|
|
+ OID_smimeAuthenticatedAttrs = 20,
|
|
+ OID_md2 = 21,
|
|
+ OID_md4 = 22,
|
|
+ OID_md5 = 23,
|
|
+ OID_msIndirectData = 24,
|
|
+ OID_msStatementType = 25,
|
|
+ OID_msSpOpusInfo = 26,
|
|
+ OID_msPeImageDataObjId = 27,
|
|
+ OID_msIndividualSPKeyPurpose = 28,
|
|
+ OID_msOutlookExpress = 29,
|
|
+ OID_certAuthInfoAccess = 30,
|
|
+ OID_sha1 = 31,
|
|
+ OID_sha256 = 32,
|
|
+ OID_sha384 = 33,
|
|
+ OID_sha512 = 34,
|
|
+ OID_sha224 = 35,
|
|
+ OID_commonName = 36,
|
|
+ OID_surname = 37,
|
|
+ OID_countryName = 38,
|
|
+ OID_locality = 39,
|
|
+ OID_stateOrProvinceName = 40,
|
|
+ OID_organizationName = 41,
|
|
+ OID_organizationUnitName = 42,
|
|
+ OID_title = 43,
|
|
+ OID_description = 44,
|
|
+ OID_name = 45,
|
|
+ OID_givenName = 46,
|
|
+ OID_initials = 47,
|
|
+ OID_generationalQualifier = 48,
|
|
+ OID_subjectKeyIdentifier = 49,
|
|
+ OID_keyUsage = 50,
|
|
+ OID_subjectAltName = 51,
|
|
+ OID_issuerAltName = 52,
|
|
+ OID_basicConstraints = 53,
|
|
+ OID_crlDistributionPoints = 54,
|
|
+ OID_certPolicies = 55,
|
|
+ OID_authorityKeyIdentifier = 56,
|
|
+ OID_extKeyUsage = 57,
|
|
+ OID__NR = 58,
|
|
+};
|
|
+
|
|
+struct x509_certificate {
|
|
+ struct x509_certificate *next;
|
|
+ struct x509_certificate *signer;
|
|
+ struct public_key *pub;
|
|
+ struct public_key_signature *sig;
|
|
+ char *issuer;
|
|
+ char *subject;
|
|
+ struct asymmetric_key_id *id;
|
|
+ struct asymmetric_key_id *skid;
|
|
+ time64_t valid_from;
|
|
+ time64_t valid_to;
|
|
+ const void *tbs;
|
|
+ unsigned int tbs_size;
|
|
+ unsigned int raw_sig_size;
|
|
+ const void *raw_sig;
|
|
+ const void *raw_serial;
|
|
+ unsigned int raw_serial_size;
|
|
+ unsigned int raw_issuer_size;
|
|
+ const void *raw_issuer;
|
|
+ const void *raw_subject;
|
|
+ unsigned int raw_subject_size;
|
|
+ unsigned int raw_skid_size;
|
|
+ const void *raw_skid;
|
|
+ unsigned int index;
|
|
+ bool seen;
|
|
+ bool verified;
|
|
+ bool self_signed;
|
|
+ bool unsupported_key;
|
|
+ bool unsupported_sig;
|
|
+ bool blacklisted;
|
|
+};
|
|
+
|
|
+struct x509_parse_context {
|
|
+ struct x509_certificate *cert;
|
|
+ long unsigned int data;
|
|
+ const void *cert_start;
|
|
+ const void *key;
|
|
+ size_t key_size;
|
|
+ enum OID last_oid;
|
|
+ enum OID algo_oid;
|
|
+ unsigned char nr_mpi;
|
|
+ u8 o_size;
|
|
+ u8 cn_size;
|
|
+ u8 email_size;
|
|
+ u16 o_offset;
|
|
+ u16 cn_offset;
|
|
+ u16 email_offset;
|
|
+ unsigned int raw_akid_size;
|
|
+ const void *raw_akid;
|
|
+ const void *akid_raw_issuer;
|
|
+ unsigned int akid_raw_issuer_size;
|
|
+};
|
|
+
|
|
+enum pkcs7_actions {
|
|
+ ACT_pkcs7_check_content_type = 0,
|
|
+ ACT_pkcs7_extract_cert = 1,
|
|
+ ACT_pkcs7_note_OID = 2,
|
|
+ ACT_pkcs7_note_certificate_list = 3,
|
|
+ ACT_pkcs7_note_content = 4,
|
|
+ ACT_pkcs7_note_data = 5,
|
|
+ ACT_pkcs7_note_signed_info = 6,
|
|
+ ACT_pkcs7_note_signeddata_version = 7,
|
|
+ ACT_pkcs7_note_signerinfo_version = 8,
|
|
+ ACT_pkcs7_sig_note_authenticated_attr = 9,
|
|
+ ACT_pkcs7_sig_note_digest_algo = 10,
|
|
+ ACT_pkcs7_sig_note_issuer = 11,
|
|
+ ACT_pkcs7_sig_note_pkey_algo = 12,
|
|
+ ACT_pkcs7_sig_note_serial = 13,
|
|
+ ACT_pkcs7_sig_note_set_of_authattrs = 14,
|
|
+ ACT_pkcs7_sig_note_signature = 15,
|
|
+ ACT_pkcs7_sig_note_skid = 16,
|
|
+ NR__pkcs7_actions = 17,
|
|
+};
|
|
+
|
|
+struct pkcs7_signed_info {
|
|
+ struct pkcs7_signed_info *next;
|
|
+ struct x509_certificate *signer;
|
|
+ unsigned int index;
|
|
+ bool unsupported_crypto;
|
|
+ bool blacklisted;
|
|
+ const void *msgdigest;
|
|
+ unsigned int msgdigest_len;
|
|
+ unsigned int authattrs_len;
|
|
+ const void *authattrs;
|
|
+ long unsigned int aa_set;
|
|
+ time64_t signing_time;
|
|
+ struct public_key_signature *sig;
|
|
+};
|
|
+
|
|
+struct pkcs7_message___2 {
|
|
+ struct x509_certificate *certs;
|
|
+ struct x509_certificate *crl;
|
|
+ struct pkcs7_signed_info *signed_infos;
|
|
+ u8 version;
|
|
+ bool have_authattrs;
|
|
+ enum OID data_type;
|
|
+ size_t data_len;
|
|
+ size_t data_hdrlen;
|
|
+ const void *data;
|
|
+};
|
|
+
|
|
+struct pkcs7_parse_context {
|
|
+ struct pkcs7_message___2 *msg;
|
|
+ struct pkcs7_signed_info *sinfo;
|
|
+ struct pkcs7_signed_info **ppsinfo;
|
|
+ struct x509_certificate *certs;
|
|
+ struct x509_certificate **ppcerts;
|
|
+ long unsigned int data;
|
|
+ enum OID last_oid;
|
|
+ unsigned int x509_index;
|
|
+ unsigned int sinfo_index;
|
|
+ const void *raw_serial;
|
|
+ unsigned int raw_serial_size;
|
|
+ unsigned int raw_issuer_size;
|
|
+ const void *raw_issuer;
|
|
+ const void *raw_skid;
|
|
+ unsigned int raw_skid_size;
|
|
+ bool expect_skid;
|
|
+};
|
|
+
|
|
+struct mz_hdr {
|
|
+ uint16_t magic;
|
|
+ uint16_t lbsize;
|
|
+ uint16_t blocks;
|
|
+ uint16_t relocs;
|
|
+ uint16_t hdrsize;
|
|
+ uint16_t min_extra_pps;
|
|
+ uint16_t max_extra_pps;
|
|
+ uint16_t ss;
|
|
+ uint16_t sp;
|
|
+ uint16_t checksum;
|
|
+ uint16_t ip;
|
|
+ uint16_t cs;
|
|
+ uint16_t reloc_table_offset;
|
|
+ uint16_t overlay_num;
|
|
+ uint16_t reserved0[4];
|
|
+ uint16_t oem_id;
|
|
+ uint16_t oem_info;
|
|
+ uint16_t reserved1[10];
|
|
+ uint32_t peaddr;
|
|
+ char message[64];
|
|
+};
|
|
+
|
|
+struct pe_hdr {
|
|
+ uint32_t magic;
|
|
+ uint16_t machine;
|
|
+ uint16_t sections;
|
|
+ uint32_t timestamp;
|
|
+ uint32_t symbol_table;
|
|
+ uint32_t symbols;
|
|
+ uint16_t opt_hdr_size;
|
|
+ uint16_t flags;
|
|
+};
|
|
+
|
|
+struct pe32_opt_hdr {
|
|
+ uint16_t magic;
|
|
+ uint8_t ld_major;
|
|
+ uint8_t ld_minor;
|
|
+ uint32_t text_size;
|
|
+ uint32_t data_size;
|
|
+ uint32_t bss_size;
|
|
+ uint32_t entry_point;
|
|
+ uint32_t code_base;
|
|
+ uint32_t data_base;
|
|
+ uint32_t image_base;
|
|
+ uint32_t section_align;
|
|
+ uint32_t file_align;
|
|
+ uint16_t os_major;
|
|
+ uint16_t os_minor;
|
|
+ uint16_t image_major;
|
|
+ uint16_t image_minor;
|
|
+ uint16_t subsys_major;
|
|
+ uint16_t subsys_minor;
|
|
+ uint32_t win32_version;
|
|
+ uint32_t image_size;
|
|
+ uint32_t header_size;
|
|
+ uint32_t csum;
|
|
+ uint16_t subsys;
|
|
+ uint16_t dll_flags;
|
|
+ uint32_t stack_size_req;
|
|
+ uint32_t stack_size;
|
|
+ uint32_t heap_size_req;
|
|
+ uint32_t heap_size;
|
|
+ uint32_t loader_flags;
|
|
+ uint32_t data_dirs;
|
|
+};
|
|
+
|
|
+struct pe32plus_opt_hdr {
|
|
+ uint16_t magic;
|
|
+ uint8_t ld_major;
|
|
+ uint8_t ld_minor;
|
|
+ uint32_t text_size;
|
|
+ uint32_t data_size;
|
|
+ uint32_t bss_size;
|
|
+ uint32_t entry_point;
|
|
+ uint32_t code_base;
|
|
+ uint64_t image_base;
|
|
+ uint32_t section_align;
|
|
+ uint32_t file_align;
|
|
+ uint16_t os_major;
|
|
+ uint16_t os_minor;
|
|
+ uint16_t image_major;
|
|
+ uint16_t image_minor;
|
|
+ uint16_t subsys_major;
|
|
+ uint16_t subsys_minor;
|
|
+ uint32_t win32_version;
|
|
+ uint32_t image_size;
|
|
+ uint32_t header_size;
|
|
+ uint32_t csum;
|
|
+ uint16_t subsys;
|
|
+ uint16_t dll_flags;
|
|
+ uint64_t stack_size_req;
|
|
+ uint64_t stack_size;
|
|
+ uint64_t heap_size_req;
|
|
+ uint64_t heap_size;
|
|
+ uint32_t loader_flags;
|
|
+ uint32_t data_dirs;
|
|
+};
|
|
+
|
|
+struct data_dirent {
|
|
+ uint32_t virtual_address;
|
|
+ uint32_t size;
|
|
+};
|
|
+
|
|
+struct data_directory {
|
|
+ struct data_dirent exports;
|
|
+ struct data_dirent imports;
|
|
+ struct data_dirent resources;
|
|
+ struct data_dirent exceptions;
|
|
+ struct data_dirent certs;
|
|
+ struct data_dirent base_relocations;
|
|
+ struct data_dirent debug;
|
|
+ struct data_dirent arch;
|
|
+ struct data_dirent global_ptr;
|
|
+ struct data_dirent tls;
|
|
+ struct data_dirent load_config;
|
|
+ struct data_dirent bound_imports;
|
|
+ struct data_dirent import_addrs;
|
|
+ struct data_dirent delay_imports;
|
|
+ struct data_dirent clr_runtime_hdr;
|
|
+ struct data_dirent reserved;
|
|
+};
|
|
+
|
|
+struct section_header {
|
|
+ char name[8];
|
|
+ uint32_t virtual_size;
|
|
+ uint32_t virtual_address;
|
|
+ uint32_t raw_data_size;
|
|
+ uint32_t data_addr;
|
|
+ uint32_t relocs;
|
|
+ uint32_t line_numbers;
|
|
+ uint16_t num_relocs;
|
|
+ uint16_t num_lin_numbers;
|
|
+ uint32_t flags;
|
|
+};
|
|
+
|
|
+struct win_certificate {
|
|
+ uint32_t length;
|
|
+ uint16_t revision;
|
|
+ uint16_t cert_type;
|
|
+};
|
|
+
|
|
+struct pefile_context {
|
|
+ unsigned int header_size;
|
|
+ unsigned int image_checksum_offset;
|
|
+ unsigned int cert_dirent_offset;
|
|
+ unsigned int n_data_dirents;
|
|
+ unsigned int n_sections;
|
|
+ unsigned int certs_size;
|
|
+ unsigned int sig_offset;
|
|
+ unsigned int sig_len;
|
|
+ const struct section_header *secs;
|
|
+ const void *digest;
|
|
+ unsigned int digest_len;
|
|
+ const char *digest_algo;
|
|
+};
|
|
+
|
|
+enum mscode_actions {
|
|
+ ACT_mscode_note_content_type = 0,
|
|
+ ACT_mscode_note_digest = 1,
|
|
+ ACT_mscode_note_digest_algo = 2,
|
|
+ NR__mscode_actions = 3,
|
|
+};
|
|
+
|
|
+struct simd_skcipher_alg___2 {
|
|
+ const char *ialg_name;
|
|
+ struct skcipher_alg alg;
|
|
+};
|
|
+
|
|
+struct simd_skcipher_ctx {
|
|
+ struct cryptd_skcipher *cryptd_tfm;
|
|
+};
|
|
+
|
|
+struct biovec_slab {
|
|
+ int nr_vecs;
|
|
+ char *name;
|
|
+ struct kmem_cache *slab;
|
|
+};
|
|
+
|
|
+enum rq_qos_id {
|
|
+ RQ_QOS_WBT = 0,
|
|
+ RQ_QOS_CGROUP = 1,
|
|
+};
|
|
+
|
|
+struct rq_qos_ops;
|
|
+
|
|
+struct rq_qos {
|
|
+ struct rq_qos_ops *ops;
|
|
+ struct request_queue *q;
|
|
+ enum rq_qos_id id;
|
|
+ struct rq_qos *next;
|
|
+};
|
|
+
|
|
+struct rq_map_data {
|
|
+ struct page **pages;
|
|
+ int page_order;
|
|
+ int nr_entries;
|
|
+ long unsigned int offset;
|
|
+ int null_mapped;
|
|
+ int from_user;
|
|
+};
|
|
+
|
|
+struct rq_qos_ops {
|
|
+ void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
|
|
+ void (*track)(struct rq_qos *, struct request *, struct bio *);
|
|
+ void (*issue)(struct rq_qos *, struct request *);
|
|
+ void (*requeue)(struct rq_qos *, struct request *);
|
|
+ void (*done)(struct rq_qos *, struct request *);
|
|
+ void (*done_bio)(struct rq_qos *, struct bio *);
|
|
+ void (*cleanup)(struct rq_qos *, struct bio *);
|
|
+ void (*exit)(struct rq_qos *);
|
|
+};
|
|
+
|
|
+struct bio_slab {
|
|
+ struct kmem_cache *slab;
|
|
+ unsigned int slab_ref;
|
|
+ unsigned int slab_size;
|
|
+ char name[8];
|
|
+};
|
|
+
|
|
+struct bio_map_data {
|
|
+ int is_our_pages;
|
|
+ struct iov_iter iter;
|
|
+ struct iovec iov[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ELV_MQUEUE_MAY = 0,
|
|
+ ELV_MQUEUE_NO = 1,
|
|
+ ELV_MQUEUE_MUST = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BLK_MQ_F_SHOULD_MERGE = 1,
|
|
+ BLK_MQ_F_TAG_SHARED = 2,
|
|
+ BLK_MQ_F_SG_MERGE = 4,
|
|
+ BLK_MQ_F_BLOCKING = 32,
|
|
+ BLK_MQ_F_NO_SCHED = 64,
|
|
+ BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
|
|
+ BLK_MQ_F_ALLOC_POLICY_BITS = 1,
|
|
+ BLK_MQ_S_STOPPED = 0,
|
|
+ BLK_MQ_S_TAG_ACTIVE = 1,
|
|
+ BLK_MQ_S_SCHED_RESTART = 2,
|
|
+ BLK_MQ_MAX_DEPTH = 10240,
|
|
+ BLK_MQ_CPU_WORK_BATCH = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ WBT_RWQ_BG = 0,
|
|
+ WBT_RWQ_KSWAPD = 1,
|
|
+ WBT_RWQ_DISCARD = 2,
|
|
+ WBT_NUM_RWQ = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BLKPREP_OK = 0,
|
|
+ BLKPREP_KILL = 1,
|
|
+ BLKPREP_DEFER = 2,
|
|
+ BLKPREP_INVALID = 3,
|
|
+};
|
|
+
|
|
+struct blk_plug_cb;
|
|
+
|
|
+typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
|
|
+
|
|
+struct blk_plug_cb {
|
|
+ struct list_head list;
|
|
+ blk_plug_cb_fn callback;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BLK_MQ_REQ_NOWAIT = 1,
|
|
+ BLK_MQ_REQ_RESERVED = 2,
|
|
+ BLK_MQ_REQ_INTERNAL = 4,
|
|
+ BLK_MQ_REQ_PREEMPT = 8,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_buffer {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ size_t size;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_rq_requeue {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ char rwbs[8];
|
|
+ u32 __data_loc_cmd;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_rq_complete {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ int error;
|
|
+ char rwbs[8];
|
|
+ u32 __data_loc_cmd;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_rq {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ unsigned int bytes;
|
|
+ char rwbs[8];
|
|
+ char comm[16];
|
|
+ u32 __data_loc_cmd;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_bio_bounce {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ char rwbs[8];
|
|
+ char comm[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_bio_complete {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ int error;
|
|
+ char rwbs[8];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_bio_merge {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ char rwbs[8];
|
|
+ char comm[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_bio_queue {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ char rwbs[8];
|
|
+ char comm[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_get_rq {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ char rwbs[8];
|
|
+ char comm[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_plug {
|
|
+ struct trace_entry ent;
|
|
+ char comm[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_unplug {
|
|
+ struct trace_entry ent;
|
|
+ int nr_rq;
|
|
+ char comm[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_split {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ sector_t new_sector;
|
|
+ char rwbs[8];
|
|
+ char comm[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_bio_remap {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ dev_t old_dev;
|
|
+ sector_t old_sector;
|
|
+ char rwbs[8];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_block_rq_remap {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ sector_t sector;
|
|
+ unsigned int nr_sector;
|
|
+ dev_t old_dev;
|
|
+ sector_t old_sector;
|
|
+ unsigned int nr_bios;
|
|
+ char rwbs[8];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_block_buffer {};
|
|
+
|
|
+struct trace_event_data_offsets_block_rq_requeue {
|
|
+ u32 cmd;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_block_rq_complete {
|
|
+ u32 cmd;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_block_rq {
|
|
+ u32 cmd;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_block_bio_bounce {};
|
|
+
|
|
+struct trace_event_data_offsets_block_bio_complete {};
|
|
+
|
|
+struct trace_event_data_offsets_block_bio_merge {};
|
|
+
|
|
+struct trace_event_data_offsets_block_bio_queue {};
|
|
+
|
|
+struct trace_event_data_offsets_block_get_rq {};
|
|
+
|
|
+struct trace_event_data_offsets_block_plug {};
|
|
+
|
|
+struct trace_event_data_offsets_block_unplug {};
|
|
+
|
|
+struct trace_event_data_offsets_block_split {};
|
|
+
|
|
+struct trace_event_data_offsets_block_bio_remap {};
|
|
+
|
|
+struct trace_event_data_offsets_block_rq_remap {};
|
|
+
|
|
+struct request_queue_wrapper {
|
|
+ struct request_queue q;
|
|
+ struct mutex mq_freeze_lock;
|
|
+ int mq_freeze_depth;
|
|
+};
|
|
+
|
|
+struct queue_sysfs_entry {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct request_queue *, char *);
|
|
+ ssize_t (*store)(struct request_queue *, const char *, size_t);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ REQ_FSEQ_PREFLUSH = 1,
|
|
+ REQ_FSEQ_DATA = 2,
|
|
+ REQ_FSEQ_POSTFLUSH = 4,
|
|
+ REQ_FSEQ_DONE = 8,
|
|
+ REQ_FSEQ_ACTIONS = 7,
|
|
+ FLUSH_PENDING_TIMEOUT = 5000,
|
|
+};
|
|
+
|
|
+enum blk_default_limits {
|
|
+ BLK_MAX_SEGMENTS = 128,
|
|
+ BLK_SAFE_MAX_SECTORS = 255,
|
|
+ BLK_DEF_MAX_SECTORS = 2560,
|
|
+ BLK_MAX_SEGMENT_SIZE = 65536,
|
|
+ BLK_SEG_BOUNDARY_MASK = -1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ICQ_EXITED = 4,
|
|
+ ICQ_DESTROYED = 8,
|
|
+};
|
|
+
|
|
+typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
|
|
+
|
|
+enum {
|
|
+ BLK_MQ_UNIQUE_TAG_BITS = 16,
|
|
+ BLK_MQ_UNIQUE_TAG_MASK = 65535,
|
|
+};
|
|
+
|
|
+struct blk_mq_tags_wrapper {
|
|
+ struct blk_mq_tags tags;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BLK_MQ_TAG_FAIL = -1,
|
|
+ BLK_MQ_TAG_MIN = 1,
|
|
+ BLK_MQ_TAG_MAX = -2,
|
|
+};
|
|
+
|
|
+struct mq_inflight {
|
|
+ struct hd_struct *part;
|
|
+ unsigned int *inflight;
|
|
+};
|
|
+
|
|
+struct flush_busy_ctx_data {
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
+ struct list_head *list;
|
|
+};
|
|
+
|
|
+struct dispatch_rq_data {
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
+ struct request *rq;
|
|
+};
|
|
+
|
|
+struct blk_mq_qe_pair {
|
|
+ struct list_head node;
|
|
+ struct request_queue *q;
|
|
+ struct elevator_type *type;
|
|
+};
|
|
+
|
|
+typedef void busy_iter_fn(struct blk_mq_hw_ctx *, struct request *, void *, bool);
|
|
+
|
|
+typedef void busy_tag_iter_fn(struct request *, void *, bool);
|
|
+
|
|
+struct bt_iter_data {
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
+ busy_iter_fn *fn;
|
|
+ void *data;
|
|
+ bool reserved;
|
|
+};
|
|
+
|
|
+struct bt_tags_iter_data {
|
|
+ struct blk_mq_tags *tags;
|
|
+ busy_tag_iter_fn *fn;
|
|
+ void *data;
|
|
+ bool reserved;
|
|
+};
|
|
+
|
|
+struct blk_queue_stats {
|
|
+ struct list_head callbacks;
|
|
+ spinlock_t lock;
|
|
+ bool enable_accounting;
|
|
+};
|
|
+
|
|
+struct blk_mq_ctx_sysfs_entry {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct blk_mq_ctx *, char *);
|
|
+ ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct blk_mq_hw_ctx_sysfs_entry {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
|
|
+ ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct disk_part_iter {
|
|
+ struct gendisk *disk;
|
|
+ struct hd_struct *part;
|
|
+ int idx;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct hd_geometry {
|
|
+ unsigned char heads;
|
|
+ unsigned char sectors;
|
|
+ short unsigned int cylinders;
|
|
+ long unsigned int start;
|
|
+};
|
|
+
|
|
+struct blkpg_ioctl_arg {
|
|
+ int op;
|
|
+ int flags;
|
|
+ int datalen;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct blkpg_partition {
|
|
+ long long int start;
|
|
+ long long int length;
|
|
+ int pno;
|
|
+ char devname[64];
|
|
+ char volname[64];
|
|
+};
|
|
+
|
|
+struct pr_reservation {
|
|
+ __u64 key;
|
|
+ __u32 type;
|
|
+ __u32 flags;
|
|
+};
|
|
+
|
|
+struct pr_registration {
|
|
+ __u64 old_key;
|
|
+ __u64 new_key;
|
|
+ __u32 flags;
|
|
+ __u32 __pad;
|
|
+};
|
|
+
|
|
+struct pr_preempt {
|
|
+ __u64 old_key;
|
|
+ __u64 new_key;
|
|
+ __u32 type;
|
|
+ __u32 flags;
|
|
+};
|
|
+
|
|
+struct pr_clear {
|
|
+ __u64 key;
|
|
+ __u32 flags;
|
|
+ __u32 __pad;
|
|
+};
|
|
+
|
|
+struct klist {
|
|
+ spinlock_t k_lock;
|
|
+ struct list_head k_list;
|
|
+ void (*get)(struct klist_node *);
|
|
+ void (*put)(struct klist_node *);
|
|
+};
|
|
+
|
|
+struct klist_iter {
|
|
+ struct klist *i_klist;
|
|
+ struct klist_node *i_cur;
|
|
+};
|
|
+
|
|
+struct class_dev_iter {
|
|
+ struct klist_iter ki;
|
|
+ const struct device_type *type;
|
|
+};
|
|
+
|
|
+struct disk_events {
|
|
+ struct list_head node;
|
|
+ struct gendisk *disk;
|
|
+ spinlock_t lock;
|
|
+ struct mutex block_mutex;
|
|
+ int block;
|
|
+ unsigned int pending;
|
|
+ unsigned int clearing;
|
|
+ long int poll_msecs;
|
|
+ struct delayed_work dwork;
|
|
+};
|
|
+
|
|
+struct blk_major_name {
|
|
+ struct blk_major_name *next;
|
|
+ int major;
|
|
+ char name[16];
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ struct page *v;
|
|
+} Sector;
|
|
+
|
|
+struct parsed_partitions {
|
|
+ struct block_device *bdev;
|
|
+ char name[32];
|
|
+ struct {
|
|
+ sector_t from;
|
|
+ sector_t size;
|
|
+ int flags;
|
|
+ bool has_info;
|
|
+ struct partition_meta_info info;
|
|
+ } *parts;
|
|
+ int next;
|
|
+ int limit;
|
|
+ bool access_beyond_eod;
|
|
+ char *pp_buf;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IOPRIO_WHO_PROCESS = 1,
|
|
+ IOPRIO_WHO_PGRP = 2,
|
|
+ IOPRIO_WHO_USER = 3,
|
|
+};
|
|
+
|
|
+struct RigidDiskBlock {
|
|
+ __u32 rdb_ID;
|
|
+ __be32 rdb_SummedLongs;
|
|
+ __s32 rdb_ChkSum;
|
|
+ __u32 rdb_HostID;
|
|
+ __be32 rdb_BlockBytes;
|
|
+ __u32 rdb_Flags;
|
|
+ __u32 rdb_BadBlockList;
|
|
+ __be32 rdb_PartitionList;
|
|
+ __u32 rdb_FileSysHeaderList;
|
|
+ __u32 rdb_DriveInit;
|
|
+ __u32 rdb_Reserved1[6];
|
|
+ __u32 rdb_Cylinders;
|
|
+ __u32 rdb_Sectors;
|
|
+ __u32 rdb_Heads;
|
|
+ __u32 rdb_Interleave;
|
|
+ __u32 rdb_Park;
|
|
+ __u32 rdb_Reserved2[3];
|
|
+ __u32 rdb_WritePreComp;
|
|
+ __u32 rdb_ReducedWrite;
|
|
+ __u32 rdb_StepRate;
|
|
+ __u32 rdb_Reserved3[5];
|
|
+ __u32 rdb_RDBBlocksLo;
|
|
+ __u32 rdb_RDBBlocksHi;
|
|
+ __u32 rdb_LoCylinder;
|
|
+ __u32 rdb_HiCylinder;
|
|
+ __u32 rdb_CylBlocks;
|
|
+ __u32 rdb_AutoParkSeconds;
|
|
+ __u32 rdb_HighRDSKBlock;
|
|
+ __u32 rdb_Reserved4;
|
|
+ char rdb_DiskVendor[8];
|
|
+ char rdb_DiskProduct[16];
|
|
+ char rdb_DiskRevision[4];
|
|
+ char rdb_ControllerVendor[8];
|
|
+ char rdb_ControllerProduct[16];
|
|
+ char rdb_ControllerRevision[4];
|
|
+ __u32 rdb_Reserved5[10];
|
|
+};
|
|
+
|
|
+struct PartitionBlock {
|
|
+ __be32 pb_ID;
|
|
+ __be32 pb_SummedLongs;
|
|
+ __s32 pb_ChkSum;
|
|
+ __u32 pb_HostID;
|
|
+ __be32 pb_Next;
|
|
+ __u32 pb_Flags;
|
|
+ __u32 pb_Reserved1[2];
|
|
+ __u32 pb_DevFlags;
|
|
+ __u8 pb_DriveName[32];
|
|
+ __u32 pb_Reserved2[15];
|
|
+ __be32 pb_Environment[17];
|
|
+ __u32 pb_EReserved[15];
|
|
+};
|
|
+
|
|
+struct mac_partition {
|
|
+ __be16 signature;
|
|
+ __be16 res1;
|
|
+ __be32 map_count;
|
|
+ __be32 start_block;
|
|
+ __be32 block_count;
|
|
+ char name[32];
|
|
+ char type[32];
|
|
+ __be32 data_start;
|
|
+ __be32 data_count;
|
|
+ __be32 status;
|
|
+ __be32 boot_start;
|
|
+ __be32 boot_size;
|
|
+ __be32 boot_load;
|
|
+ __be32 boot_load2;
|
|
+ __be32 boot_entry;
|
|
+ __be32 boot_entry2;
|
|
+ __be32 boot_cksum;
|
|
+ char processor[16];
|
|
+};
|
|
+
|
|
+struct mac_driver_desc {
|
|
+ __be16 signature;
|
|
+ __be16 block_size;
|
|
+ __be32 block_count;
|
|
+};
|
|
+
|
|
+struct fat_boot_sector {
|
|
+ __u8 ignored[3];
|
|
+ __u8 system_id[8];
|
|
+ __u8 sector_size[2];
|
|
+ __u8 sec_per_clus;
|
|
+ __le16 reserved;
|
|
+ __u8 fats;
|
|
+ __u8 dir_entries[2];
|
|
+ __u8 sectors[2];
|
|
+ __u8 media;
|
|
+ __le16 fat_length;
|
|
+ __le16 secs_track;
|
|
+ __le16 heads;
|
|
+ __le32 hidden;
|
|
+ __le32 total_sect;
|
|
+ union {
|
|
+ struct {
|
|
+ __u8 drive_number;
|
|
+ __u8 state;
|
|
+ __u8 signature;
|
|
+ __u8 vol_id[4];
|
|
+ __u8 vol_label[11];
|
|
+ __u8 fs_type[8];
|
|
+ } fat16;
|
|
+ struct {
|
|
+ __le32 length;
|
|
+ __le16 flags;
|
|
+ __u8 version[2];
|
|
+ __le32 root_cluster;
|
|
+ __le16 info_sector;
|
|
+ __le16 backup_boot;
|
|
+ __le16 reserved2[6];
|
|
+ __u8 drive_number;
|
|
+ __u8 state;
|
|
+ __u8 signature;
|
|
+ __u8 vol_id[4];
|
|
+ __u8 vol_label[11];
|
|
+ __u8 fs_type[8];
|
|
+ } fat32;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DOS_EXTENDED_PARTITION = 5,
|
|
+ LINUX_EXTENDED_PARTITION = 133,
|
|
+ WIN98_EXTENDED_PARTITION = 15,
|
|
+ SUN_WHOLE_DISK = 5,
|
|
+ LINUX_SWAP_PARTITION = 130,
|
|
+ LINUX_DATA_PARTITION = 131,
|
|
+ LINUX_LVM_PARTITION = 142,
|
|
+ LINUX_RAID_PARTITION = 253,
|
|
+ SOLARIS_X86_PARTITION = 130,
|
|
+ NEW_SOLARIS_X86_PARTITION = 191,
|
|
+ DM6_AUX1PARTITION = 81,
|
|
+ DM6_AUX3PARTITION = 83,
|
|
+ DM6_PARTITION = 84,
|
|
+ EZD_PARTITION = 85,
|
|
+ FREEBSD_PARTITION = 165,
|
|
+ OPENBSD_PARTITION = 166,
|
|
+ NETBSD_PARTITION = 169,
|
|
+ BSDI_PARTITION = 183,
|
|
+ MINIX_PARTITION = 129,
|
|
+ UNIXWARE_PARTITION = 99,
|
|
+};
|
|
+
|
|
+struct partition {
|
|
+ unsigned char boot_ind;
|
|
+ unsigned char head;
|
|
+ unsigned char sector;
|
|
+ unsigned char cyl;
|
|
+ unsigned char sys_ind;
|
|
+ unsigned char end_head;
|
|
+ unsigned char end_sector;
|
|
+ unsigned char end_cyl;
|
|
+ __le32 start_sect;
|
|
+ __le32 nr_sects;
|
|
+};
|
|
+
|
|
+struct solaris_x86_slice {
|
|
+ __le16 s_tag;
|
|
+ __le16 s_flag;
|
|
+ __le32 s_start;
|
|
+ __le32 s_size;
|
|
+};
|
|
+
|
|
+struct solaris_x86_vtoc {
|
|
+ unsigned int v_bootinfo[3];
|
|
+ __le32 v_sanity;
|
|
+ __le32 v_version;
|
|
+ char v_volume[8];
|
|
+ __le16 v_sectorsz;
|
|
+ __le16 v_nparts;
|
|
+ unsigned int v_reserved[10];
|
|
+ struct solaris_x86_slice v_slice[16];
|
|
+ unsigned int timestamp[16];
|
|
+ char v_asciilabel[128];
|
|
+};
|
|
+
|
|
+struct bsd_partition {
|
|
+ __le32 p_size;
|
|
+ __le32 p_offset;
|
|
+ __le32 p_fsize;
|
|
+ __u8 p_fstype;
|
|
+ __u8 p_frag;
|
|
+ __le16 p_cpg;
|
|
+};
|
|
+
|
|
+struct bsd_disklabel {
|
|
+ __le32 d_magic;
|
|
+ __s16 d_type;
|
|
+ __s16 d_subtype;
|
|
+ char d_typename[16];
|
|
+ char d_packname[16];
|
|
+ __u32 d_secsize;
|
|
+ __u32 d_nsectors;
|
|
+ __u32 d_ntracks;
|
|
+ __u32 d_ncylinders;
|
|
+ __u32 d_secpercyl;
|
|
+ __u32 d_secperunit;
|
|
+ __u16 d_sparespertrack;
|
|
+ __u16 d_sparespercyl;
|
|
+ __u32 d_acylinders;
|
|
+ __u16 d_rpm;
|
|
+ __u16 d_interleave;
|
|
+ __u16 d_trackskew;
|
|
+ __u16 d_cylskew;
|
|
+ __u32 d_headswitch;
|
|
+ __u32 d_trkseek;
|
|
+ __u32 d_flags;
|
|
+ __u32 d_drivedata[5];
|
|
+ __u32 d_spare[5];
|
|
+ __le32 d_magic2;
|
|
+ __le16 d_checksum;
|
|
+ __le16 d_npartitions;
|
|
+ __le32 d_bbsize;
|
|
+ __le32 d_sbsize;
|
|
+ struct bsd_partition d_partitions[16];
|
|
+};
|
|
+
|
|
+struct unixware_slice {
|
|
+ __le16 s_label;
|
|
+ __le16 s_flags;
|
|
+ __le32 start_sect;
|
|
+ __le32 nr_sects;
|
|
+};
|
|
+
|
|
+struct unixware_vtoc {
|
|
+ __le32 v_magic;
|
|
+ __le32 v_version;
|
|
+ char v_name[8];
|
|
+ __le16 v_nslices;
|
|
+ __le16 v_unknown1;
|
|
+ __le32 v_reserved[10];
|
|
+ struct unixware_slice v_slice[16];
|
|
+};
|
|
+
|
|
+struct unixware_disklabel {
|
|
+ __le32 d_type;
|
|
+ __le32 d_magic;
|
|
+ __le32 d_version;
|
|
+ char d_serial[12];
|
|
+ __le32 d_ncylinders;
|
|
+ __le32 d_ntracks;
|
|
+ __le32 d_nsectors;
|
|
+ __le32 d_secsize;
|
|
+ __le32 d_part_start;
|
|
+ __le32 d_unknown1[12];
|
|
+ __le32 d_alt_tbl;
|
|
+ __le32 d_alt_len;
|
|
+ __le32 d_phys_cyl;
|
|
+ __le32 d_phys_trk;
|
|
+ __le32 d_phys_sec;
|
|
+ __le32 d_phys_bytes;
|
|
+ __le32 d_unknown2;
|
|
+ __le32 d_unknown3;
|
|
+ __le32 d_pad[8];
|
|
+ struct unixware_vtoc vtoc;
|
|
+};
|
|
+
|
|
+struct d_partition {
|
|
+ __le32 p_size;
|
|
+ __le32 p_offset;
|
|
+ __le32 p_fsize;
|
|
+ u8 p_fstype;
|
|
+ u8 p_frag;
|
|
+ __le16 p_cpg;
|
|
+};
|
|
+
|
|
+struct disklabel {
|
|
+ __le32 d_magic;
|
|
+ __le16 d_type;
|
|
+ __le16 d_subtype;
|
|
+ u8 d_typename[16];
|
|
+ u8 d_packname[16];
|
|
+ __le32 d_secsize;
|
|
+ __le32 d_nsectors;
|
|
+ __le32 d_ntracks;
|
|
+ __le32 d_ncylinders;
|
|
+ __le32 d_secpercyl;
|
|
+ __le32 d_secprtunit;
|
|
+ __le16 d_sparespertrack;
|
|
+ __le16 d_sparespercyl;
|
|
+ __le32 d_acylinders;
|
|
+ __le16 d_rpm;
|
|
+ __le16 d_interleave;
|
|
+ __le16 d_trackskew;
|
|
+ __le16 d_cylskew;
|
|
+ __le32 d_headswitch;
|
|
+ __le32 d_trkseek;
|
|
+ __le32 d_flags;
|
|
+ __le32 d_drivedata[5];
|
|
+ __le32 d_spare[5];
|
|
+ __le32 d_magic2;
|
|
+ __le16 d_checksum;
|
|
+ __le16 d_npartitions;
|
|
+ __le32 d_bbsize;
|
|
+ __le32 d_sbsize;
|
|
+ struct d_partition d_partitions[18];
|
|
+};
|
|
+
|
|
+struct sgi_volume {
|
|
+ s8 name[8];
|
|
+ __be32 block_num;
|
|
+ __be32 num_bytes;
|
|
+};
|
|
+
|
|
+struct sgi_partition {
|
|
+ __be32 num_blocks;
|
|
+ __be32 first_block;
|
|
+ __be32 type;
|
|
+};
|
|
+
|
|
+struct sgi_disklabel {
|
|
+ __be32 magic_mushroom;
|
|
+ __be16 root_part_num;
|
|
+ __be16 swap_part_num;
|
|
+ s8 boot_file[16];
|
|
+ u8 _unused0[48];
|
|
+ struct sgi_volume volume[15];
|
|
+ struct sgi_partition partitions[16];
|
|
+ __be32 csum;
|
|
+ __be32 _unused1;
|
|
+};
|
|
+
|
|
+struct sun_info {
|
|
+ __be16 id;
|
|
+ __be16 flags;
|
|
+};
|
|
+
|
|
+struct sun_vtoc {
|
|
+ __be32 version;
|
|
+ char volume[8];
|
|
+ __be16 nparts;
|
|
+ struct sun_info infos[8];
|
|
+ __be16 padding;
|
|
+ __be32 bootinfo[3];
|
|
+ __be32 sanity;
|
|
+ __be32 reserved[10];
|
|
+ __be32 timestamp[8];
|
|
+};
|
|
+
|
|
+struct sun_partition {
|
|
+ __be32 start_cylinder;
|
|
+ __be32 num_sectors;
|
|
+};
|
|
+
|
|
+struct sun_disklabel {
|
|
+ unsigned char info[128];
|
|
+ struct sun_vtoc vtoc;
|
|
+ __be32 write_reinstruct;
|
|
+ __be32 read_reinstruct;
|
|
+ unsigned char spare[148];
|
|
+ __be16 rspeed;
|
|
+ __be16 pcylcount;
|
|
+ __be16 sparecyl;
|
|
+ __be16 obs1;
|
|
+ __be16 obs2;
|
|
+ __be16 ilfact;
|
|
+ __be16 ncyl;
|
|
+ __be16 nacyl;
|
|
+ __be16 ntrks;
|
|
+ __be16 nsect;
|
|
+ __be16 obs3;
|
|
+ __be16 obs4;
|
|
+ struct sun_partition partitions[8];
|
|
+ __be16 magic;
|
|
+ __be16 csum;
|
|
+};
|
|
+
|
|
+struct _gpt_header {
|
|
+ __le64 signature;
|
|
+ __le32 revision;
|
|
+ __le32 header_size;
|
|
+ __le32 header_crc32;
|
|
+ __le32 reserved1;
|
|
+ __le64 my_lba;
|
|
+ __le64 alternate_lba;
|
|
+ __le64 first_usable_lba;
|
|
+ __le64 last_usable_lba;
|
|
+ efi_guid_t disk_guid;
|
|
+ __le64 partition_entry_lba;
|
|
+ __le32 num_partition_entries;
|
|
+ __le32 sizeof_partition_entry;
|
|
+ __le32 partition_entry_array_crc32;
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef struct _gpt_header gpt_header;
|
|
+
|
|
+struct _gpt_entry_attributes {
|
|
+ u64 required_to_function: 1;
|
|
+ u64 reserved: 47;
|
|
+ u64 type_guid_specific: 16;
|
|
+};
|
|
+
|
|
+typedef struct _gpt_entry_attributes gpt_entry_attributes;
|
|
+
|
|
+struct _gpt_entry {
|
|
+ efi_guid_t partition_type_guid;
|
|
+ efi_guid_t unique_partition_guid;
|
|
+ __le64 starting_lba;
|
|
+ __le64 ending_lba;
|
|
+ gpt_entry_attributes attributes;
|
|
+ __le16 partition_name[36];
|
|
+};
|
|
+
|
|
+typedef struct _gpt_entry gpt_entry;
|
|
+
|
|
+struct _gpt_mbr_record {
|
|
+ u8 boot_indicator;
|
|
+ u8 start_head;
|
|
+ u8 start_sector;
|
|
+ u8 start_track;
|
|
+ u8 os_type;
|
|
+ u8 end_head;
|
|
+ u8 end_sector;
|
|
+ u8 end_track;
|
|
+ __le32 starting_lba;
|
|
+ __le32 size_in_lba;
|
|
+};
|
|
+
|
|
+typedef struct _gpt_mbr_record gpt_mbr_record;
|
|
+
|
|
+struct _legacy_mbr {
|
|
+ u8 boot_code[440];
|
|
+ __le32 unique_mbr_signature;
|
|
+ __le16 unknown;
|
|
+ gpt_mbr_record partition_record[4];
|
|
+ __le16 signature;
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef struct _legacy_mbr legacy_mbr;
|
|
+
|
|
+struct d_partition___2 {
|
|
+ __le32 p_res;
|
|
+ u8 p_fstype;
|
|
+ u8 p_res2[3];
|
|
+ __le32 p_offset;
|
|
+ __le32 p_size;
|
|
+};
|
|
+
|
|
+struct disklabel___2 {
|
|
+ u8 d_reserved[270];
|
|
+ struct d_partition___2 d_partitions[2];
|
|
+ u8 d_blank[208];
|
|
+ __le16 d_magic;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct rq_wait {
|
|
+ wait_queue_head_t wait;
|
|
+ atomic_t inflight;
|
|
+};
|
|
+
|
|
+struct rq_depth {
|
|
+ unsigned int max_depth;
|
|
+ int scale_step;
|
|
+ bool scaled_max;
|
|
+ unsigned int queue_depth;
|
|
+ unsigned int default_depth;
|
|
+};
|
|
+
|
|
+struct request_sense;
|
|
+
|
|
+struct cdrom_generic_command {
|
|
+ unsigned char cmd[12];
|
|
+ unsigned char *buffer;
|
|
+ unsigned int buflen;
|
|
+ int stat;
|
|
+ struct request_sense *sense;
|
|
+ unsigned char data_direction;
|
|
+ int quiet;
|
|
+ int timeout;
|
|
+ void *reserved[1];
|
|
+};
|
|
+
|
|
+struct request_sense {
|
|
+ __u8 error_code: 7;
|
|
+ __u8 valid: 1;
|
|
+ __u8 segment_number;
|
|
+ __u8 sense_key: 4;
|
|
+ __u8 reserved2: 1;
|
|
+ __u8 ili: 1;
|
|
+ __u8 reserved1: 2;
|
|
+ __u8 information[4];
|
|
+ __u8 add_sense_len;
|
|
+ __u8 command_info[4];
|
|
+ __u8 asc;
|
|
+ __u8 ascq;
|
|
+ __u8 fruc;
|
|
+ __u8 sks[3];
|
|
+ __u8 asb[46];
|
|
+};
|
|
+
|
|
+struct scsi_ioctl_command {
|
|
+ unsigned int inlen;
|
|
+ unsigned int outlen;
|
|
+ unsigned char data[0];
|
|
+};
|
|
+
|
|
+enum scsi_device_event {
|
|
+ SDEV_EVT_MEDIA_CHANGE = 1,
|
|
+ SDEV_EVT_INQUIRY_CHANGE_REPORTED = 2,
|
|
+ SDEV_EVT_CAPACITY_CHANGE_REPORTED = 3,
|
|
+ SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED = 4,
|
|
+ SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED = 5,
|
|
+ SDEV_EVT_LUN_CHANGE_REPORTED = 6,
|
|
+ SDEV_EVT_ALUA_STATE_CHANGE_REPORTED = 7,
|
|
+ SDEV_EVT_POWER_ON_RESET_OCCURRED = 8,
|
|
+ SDEV_EVT_FIRST = 1,
|
|
+ SDEV_EVT_LAST = 8,
|
|
+ SDEV_EVT_MAXBITS = 9,
|
|
+};
|
|
+
|
|
+struct scsi_request {
|
|
+ unsigned char __cmd[16];
|
|
+ unsigned char *cmd;
|
|
+ short unsigned int cmd_len;
|
|
+ int result;
|
|
+ unsigned int sense_len;
|
|
+ unsigned int resid_len;
|
|
+ int retries;
|
|
+ void *sense;
|
|
+};
|
|
+
|
|
+struct blk_cmd_filter {
|
|
+ long unsigned int read_ok[4];
|
|
+ long unsigned int write_ok[4];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ OMAX_SB_LEN = 16,
|
|
+};
|
|
+
|
|
+struct bsg_device {
|
|
+ struct request_queue *queue;
|
|
+ spinlock_t lock;
|
|
+ struct hlist_node dev_list;
|
|
+ refcount_t ref_count;
|
|
+ char name[20];
|
|
+ int max_queue;
|
|
+};
|
|
+
|
|
+struct bsg_buffer {
|
|
+ unsigned int payload_len;
|
|
+ int sg_cnt;
|
|
+ struct scatterlist *sg_list;
|
|
+};
|
|
+
|
|
+struct bsg_job {
|
|
+ struct device *dev;
|
|
+ struct kref kref;
|
|
+ unsigned int timeout;
|
|
+ void *request;
|
|
+ void *reply;
|
|
+ unsigned int request_len;
|
|
+ unsigned int reply_len;
|
|
+ struct bsg_buffer request_payload;
|
|
+ struct bsg_buffer reply_payload;
|
|
+ int result;
|
|
+ unsigned int reply_payload_rcv_len;
|
|
+ void *dd_data;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+};
|
|
+
|
|
+struct blkg_stat {
|
|
+ struct percpu_counter cpu_cnt;
|
|
+ atomic64_t aux_cnt;
|
|
+};
|
|
+
|
|
+typedef struct blkcg_policy_data *blkcg_pol_alloc_cpd_fn(gfp_t);
|
|
+
|
|
+typedef void blkcg_pol_init_cpd_fn(struct blkcg_policy_data *);
|
|
+
|
|
+typedef void blkcg_pol_free_cpd_fn(struct blkcg_policy_data *);
|
|
+
|
|
+typedef void blkcg_pol_bind_cpd_fn(struct blkcg_policy_data *);
|
|
+
|
|
+typedef struct blkg_policy_data *blkcg_pol_alloc_pd_fn(gfp_t, int);
|
|
+
|
|
+typedef void blkcg_pol_init_pd_fn(struct blkg_policy_data *);
|
|
+
|
|
+typedef void blkcg_pol_online_pd_fn(struct blkg_policy_data *);
|
|
+
|
|
+typedef void blkcg_pol_offline_pd_fn(struct blkg_policy_data *);
|
|
+
|
|
+typedef void blkcg_pol_free_pd_fn(struct blkg_policy_data *);
|
|
+
|
|
+typedef void blkcg_pol_reset_pd_stats_fn(struct blkg_policy_data *);
|
|
+
|
|
+typedef size_t blkcg_pol_stat_pd_fn(struct blkg_policy_data *, char *, size_t);
|
|
+
|
|
+struct blkcg_policy {
|
|
+ int plid;
|
|
+ struct cftype *dfl_cftypes;
|
|
+ struct cftype *legacy_cftypes;
|
|
+ blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
|
|
+ blkcg_pol_init_cpd_fn *cpd_init_fn;
|
|
+ blkcg_pol_free_cpd_fn *cpd_free_fn;
|
|
+ blkcg_pol_bind_cpd_fn *cpd_bind_fn;
|
|
+ blkcg_pol_alloc_pd_fn *pd_alloc_fn;
|
|
+ blkcg_pol_init_pd_fn *pd_init_fn;
|
|
+ blkcg_pol_online_pd_fn *pd_online_fn;
|
|
+ blkcg_pol_offline_pd_fn *pd_offline_fn;
|
|
+ blkcg_pol_free_pd_fn *pd_free_fn;
|
|
+ blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
|
|
+ blkcg_pol_stat_pd_fn *pd_stat_fn;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct blkg_conf_ctx {
|
|
+ struct gendisk *disk;
|
|
+ struct blkcg_gq *blkg;
|
|
+ char *body;
|
|
+};
|
|
+
|
|
+struct throtl_service_queue {
|
|
+ struct throtl_service_queue *parent_sq;
|
|
+ struct list_head queued[2];
|
|
+ unsigned int nr_queued[2];
|
|
+ struct rb_root pending_tree;
|
|
+ struct rb_node *first_pending;
|
|
+ unsigned int nr_pending;
|
|
+ long unsigned int first_pending_disptime;
|
|
+ struct timer_list pending_timer;
|
|
+};
|
|
+
|
|
+struct latency_bucket {
|
|
+ long unsigned int total_latency;
|
|
+ int samples;
|
|
+};
|
|
+
|
|
+struct avg_latency_bucket {
|
|
+ long unsigned int latency;
|
|
+ bool valid;
|
|
+};
|
|
+
|
|
+struct throtl_data {
|
|
+ struct throtl_service_queue service_queue;
|
|
+ struct request_queue *queue;
|
|
+ unsigned int nr_queued[2];
|
|
+ unsigned int throtl_slice;
|
|
+ struct work_struct dispatch_work;
|
|
+ unsigned int limit_index;
|
|
+ bool limit_valid[2];
|
|
+ long unsigned int low_upgrade_time;
|
|
+ long unsigned int low_downgrade_time;
|
|
+ unsigned int scale;
|
|
+ struct latency_bucket tmp_buckets[18];
|
|
+ struct avg_latency_bucket avg_buckets[18];
|
|
+ struct latency_bucket *latency_buckets[2];
|
|
+ long unsigned int last_calculate_time;
|
|
+ long unsigned int filtered_latency;
|
|
+ bool track_bio_latency;
|
|
+};
|
|
+
|
|
+struct throtl_grp;
|
|
+
|
|
+struct throtl_qnode {
|
|
+ struct list_head node;
|
|
+ struct bio_list bios;
|
|
+ struct throtl_grp *tg;
|
|
+};
|
|
+
|
|
+struct throtl_grp {
|
|
+ struct blkg_policy_data pd;
|
|
+ struct rb_node rb_node;
|
|
+ struct throtl_data *td;
|
|
+ struct throtl_service_queue service_queue;
|
|
+ struct throtl_qnode qnode_on_self[2];
|
|
+ struct throtl_qnode qnode_on_parent[2];
|
|
+ long unsigned int disptime;
|
|
+ unsigned int flags;
|
|
+ bool has_rules[2];
|
|
+ uint64_t bps[4];
|
|
+ uint64_t bps_conf[4];
|
|
+ unsigned int iops[4];
|
|
+ unsigned int iops_conf[4];
|
|
+ uint64_t bytes_disp[2];
|
|
+ unsigned int io_disp[2];
|
|
+ long unsigned int last_low_overflow_time[2];
|
|
+ uint64_t last_bytes_disp[2];
|
|
+ unsigned int last_io_disp[2];
|
|
+ long unsigned int last_check_time;
|
|
+ long unsigned int latency_target;
|
|
+ long unsigned int latency_target_conf;
|
|
+ long unsigned int slice_start[2];
|
|
+ long unsigned int slice_end[2];
|
|
+ long unsigned int last_finish_time;
|
|
+ long unsigned int checked_last_finish_time;
|
|
+ long unsigned int avg_idletime;
|
|
+ long unsigned int idletime_threshold;
|
|
+ long unsigned int idletime_threshold_conf;
|
|
+ unsigned int bio_cnt;
|
|
+ unsigned int bad_bio_cnt;
|
|
+ long unsigned int bio_cnt_reset_time;
|
|
+};
|
|
+
|
|
+enum tg_state_flags {
|
|
+ THROTL_TG_PENDING = 1,
|
|
+ THROTL_TG_WAS_EMPTY = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LIMIT_LOW = 0,
|
|
+ LIMIT_MAX = 1,
|
|
+ LIMIT_CNT = 2,
|
|
+};
|
|
+
|
|
+struct noop_data {
|
|
+ struct list_head queue;
|
|
+};
|
|
+
|
|
+struct deadline_data {
|
|
+ struct rb_root sort_list[2];
|
|
+ struct list_head fifo_list[2];
|
|
+ struct request *next_rq[2];
|
|
+ unsigned int batching;
|
|
+ unsigned int starved;
|
|
+ int fifo_expire[2];
|
|
+ int fifo_batch;
|
|
+ int writes_starved;
|
|
+ int front_merges;
|
|
+};
|
|
+
|
|
+struct cfq_ttime {
|
|
+ u64 last_end_request;
|
|
+ u64 ttime_total;
|
|
+ u64 ttime_mean;
|
|
+ long unsigned int ttime_samples;
|
|
+};
|
|
+
|
|
+struct cfq_rb_root {
|
|
+ struct rb_root_cached rb;
|
|
+ struct rb_node *rb_rightmost;
|
|
+ unsigned int count;
|
|
+ u64 min_vdisktime;
|
|
+ struct cfq_ttime ttime;
|
|
+};
|
|
+
|
|
+struct cfq_data;
|
|
+
|
|
+struct cfq_group;
|
|
+
|
|
+struct cfq_queue {
|
|
+ int ref;
|
|
+ unsigned int flags;
|
|
+ struct cfq_data *cfqd;
|
|
+ struct rb_node rb_node;
|
|
+ u64 rb_key;
|
|
+ struct rb_node p_node;
|
|
+ struct rb_root *p_root;
|
|
+ struct rb_root sort_list;
|
|
+ struct request *next_rq;
|
|
+ int queued[2];
|
|
+ int allocated[2];
|
|
+ struct list_head fifo;
|
|
+ u64 dispatch_start;
|
|
+ u64 allocated_slice;
|
|
+ u64 slice_dispatch;
|
|
+ u64 slice_start;
|
|
+ u64 slice_end;
|
|
+ s64 slice_resid;
|
|
+ int prio_pending;
|
|
+ int dispatched;
|
|
+ short unsigned int ioprio;
|
|
+ short unsigned int org_ioprio;
|
|
+ short unsigned int ioprio_class;
|
|
+ short unsigned int org_ioprio_class;
|
|
+ pid_t pid;
|
|
+ u32 seek_history;
|
|
+ sector_t last_request_pos;
|
|
+ struct cfq_rb_root *service_tree;
|
|
+ struct cfq_queue *new_cfqq;
|
|
+ struct cfq_group *cfqg;
|
|
+ long unsigned int nr_sectors;
|
|
+};
|
|
+
|
|
+enum wl_class_t {
|
|
+ BE_WORKLOAD = 0,
|
|
+ RT_WORKLOAD = 1,
|
|
+ IDLE_WORKLOAD = 2,
|
|
+ CFQ_PRIO_NR = 3,
|
|
+};
|
|
+
|
|
+enum wl_type_t {
|
|
+ ASYNC_WORKLOAD = 0,
|
|
+ SYNC_NOIDLE_WORKLOAD = 1,
|
|
+ SYNC_WORKLOAD = 2,
|
|
+};
|
|
+
|
|
+struct cfq_io_cq;
|
|
+
|
|
+struct cfq_data {
|
|
+ struct request_queue *queue;
|
|
+ struct cfq_rb_root grp_service_tree;
|
|
+ struct cfq_group *root_group;
|
|
+ enum wl_class_t serving_wl_class;
|
|
+ enum wl_type_t serving_wl_type;
|
|
+ u64 workload_expires;
|
|
+ struct cfq_group *serving_group;
|
|
+ struct rb_root prio_trees[8];
|
|
+ unsigned int busy_queues;
|
|
+ unsigned int busy_sync_queues;
|
|
+ int rq_in_driver;
|
|
+ int rq_in_flight[2];
|
|
+ int rq_queued;
|
|
+ int hw_tag;
|
|
+ int hw_tag_est_depth;
|
|
+ unsigned int hw_tag_samples;
|
|
+ struct hrtimer idle_slice_timer;
|
|
+ struct work_struct unplug_work;
|
|
+ struct cfq_queue *active_queue;
|
|
+ struct cfq_io_cq *active_cic;
|
|
+ sector_t last_position;
|
|
+ unsigned int cfq_quantum;
|
|
+ unsigned int cfq_back_penalty;
|
|
+ unsigned int cfq_back_max;
|
|
+ unsigned int cfq_slice_async_rq;
|
|
+ unsigned int cfq_latency;
|
|
+ u64 cfq_fifo_expire[2];
|
|
+ u64 cfq_slice[2];
|
|
+ u64 cfq_slice_idle;
|
|
+ u64 cfq_group_idle;
|
|
+ u64 cfq_target_latency;
|
|
+ struct cfq_queue oom_cfqq;
|
|
+ u64 last_delayed_sync;
|
|
+};
|
|
+
|
|
+struct cfqg_stats {
|
|
+ struct blkg_rwstat merged;
|
|
+ struct blkg_rwstat service_time;
|
|
+ struct blkg_rwstat wait_time;
|
|
+ struct blkg_rwstat queued;
|
|
+ struct blkg_stat time;
|
|
+};
|
|
+
|
|
+struct cfq_group {
|
|
+ struct blkg_policy_data pd;
|
|
+ struct rb_node rb_node;
|
|
+ u64 vdisktime;
|
|
+ int nr_active;
|
|
+ unsigned int children_weight;
|
|
+ unsigned int vfraction;
|
|
+ unsigned int weight;
|
|
+ unsigned int new_weight;
|
|
+ unsigned int dev_weight;
|
|
+ unsigned int leaf_weight;
|
|
+ unsigned int new_leaf_weight;
|
|
+ unsigned int dev_leaf_weight;
|
|
+ int nr_cfqq;
|
|
+ unsigned int busy_queues_avg[3];
|
|
+ struct cfq_rb_root service_trees[6];
|
|
+ struct cfq_rb_root service_tree_idle;
|
|
+ u64 saved_wl_slice;
|
|
+ enum wl_type_t saved_wl_type;
|
|
+ enum wl_class_t saved_wl_class;
|
|
+ int dispatched;
|
|
+ struct cfq_ttime ttime;
|
|
+ struct cfqg_stats stats;
|
|
+ struct cfq_queue *async_cfqq[16];
|
|
+ struct cfq_queue *async_idle_cfqq;
|
|
+};
|
|
+
|
|
+struct cfq_group_data {
|
|
+ struct blkcg_policy_data cpd;
|
|
+ unsigned int weight;
|
|
+ unsigned int leaf_weight;
|
|
+};
|
|
+
|
|
+struct cfq_io_cq {
|
|
+ struct io_cq icq;
|
|
+ struct cfq_queue *cfqq[2];
|
|
+ struct cfq_ttime ttime;
|
|
+ int ioprio;
|
|
+ uint64_t blkcg_serial_nr;
|
|
+};
|
|
+
|
|
+enum cfqq_state_flags {
|
|
+ CFQ_CFQQ_FLAG_on_rr = 0,
|
|
+ CFQ_CFQQ_FLAG_wait_request = 1,
|
|
+ CFQ_CFQQ_FLAG_must_dispatch = 2,
|
|
+ CFQ_CFQQ_FLAG_must_alloc_slice = 3,
|
|
+ CFQ_CFQQ_FLAG_fifo_expire = 4,
|
|
+ CFQ_CFQQ_FLAG_idle_window = 5,
|
|
+ CFQ_CFQQ_FLAG_prio_changed = 6,
|
|
+ CFQ_CFQQ_FLAG_slice_new = 7,
|
|
+ CFQ_CFQQ_FLAG_sync = 8,
|
|
+ CFQ_CFQQ_FLAG_coop = 9,
|
|
+ CFQ_CFQQ_FLAG_split_coop = 10,
|
|
+ CFQ_CFQQ_FLAG_deep = 11,
|
|
+ CFQ_CFQQ_FLAG_wait_busy = 12,
|
|
+};
|
|
+
|
|
+struct deadline_data___2 {
|
|
+ struct rb_root sort_list[2];
|
|
+ struct list_head fifo_list[2];
|
|
+ struct request *next_rq[2];
|
|
+ unsigned int batching;
|
|
+ unsigned int starved;
|
|
+ int fifo_expire[2];
|
|
+ int fifo_batch;
|
|
+ int writes_starved;
|
|
+ int front_merges;
|
|
+ spinlock_t lock;
|
|
+ spinlock_t zone_lock;
|
|
+ struct list_head dispatch;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ KYBER_READ = 0,
|
|
+ KYBER_SYNC_WRITE = 1,
|
|
+ KYBER_OTHER = 2,
|
|
+ KYBER_NUM_DOMAINS = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ KYBER_MIN_DEPTH = 256,
|
|
+ KYBER_ASYNC_PERCENT = 75,
|
|
+};
|
|
+
|
|
+struct kyber_ctx_queue {
|
|
+ spinlock_t lock;
|
|
+ struct list_head rq_list[3];
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct kyber_queue_data {
|
|
+ struct request_queue *q;
|
|
+ struct blk_stat_callback *cb;
|
|
+ struct sbitmap_queue domain_tokens[3];
|
|
+ unsigned int async_depth;
|
|
+ u64 read_lat_nsec;
|
|
+ u64 write_lat_nsec;
|
|
+};
|
|
+
|
|
+struct kyber_hctx_data {
|
|
+ spinlock_t lock;
|
|
+ struct list_head rqs[3];
|
|
+ unsigned int cur_domain;
|
|
+ unsigned int batching;
|
|
+ struct kyber_ctx_queue *kcqs;
|
|
+ struct sbitmap kcq_map[3];
|
|
+ wait_queue_entry_t domain_wait[3];
|
|
+ struct sbq_wait_state *domain_ws[3];
|
|
+ atomic_t wait_index[3];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NONE___3 = 0,
|
|
+ GOOD = 1,
|
|
+ GREAT = 2,
|
|
+ BAD = -1,
|
|
+ AWFUL = -2,
|
|
+};
|
|
+
|
|
+struct flush_kcq_data {
|
|
+ struct kyber_hctx_data *khd;
|
|
+ unsigned int sched_domain;
|
|
+ struct list_head *list;
|
|
+};
|
|
+
|
|
+struct bfq_entity;
|
|
+
|
|
+struct bfq_service_tree {
|
|
+ struct rb_root active;
|
|
+ struct rb_root idle;
|
|
+ struct bfq_entity *first_idle;
|
|
+ struct bfq_entity *last_idle;
|
|
+ u64 vtime;
|
|
+ long unsigned int wsum;
|
|
+};
|
|
+
|
|
+struct bfq_weight_counter;
|
|
+
|
|
+struct bfq_sched_data;
|
|
+
|
|
+struct bfq_entity {
|
|
+ struct rb_node rb_node;
|
|
+ struct bfq_weight_counter *weight_counter;
|
|
+ bool on_st;
|
|
+ u64 start;
|
|
+ u64 finish;
|
|
+ struct rb_root *tree;
|
|
+ u64 min_start;
|
|
+ int service;
|
|
+ int budget;
|
|
+ int weight;
|
|
+ int new_weight;
|
|
+ int orig_weight;
|
|
+ struct bfq_entity *parent;
|
|
+ struct bfq_sched_data *my_sched_data;
|
|
+ struct bfq_sched_data *sched_data;
|
|
+ int prio_changed;
|
|
+};
|
|
+
|
|
+struct bfq_sched_data {
|
|
+ struct bfq_entity *in_service_entity;
|
|
+ struct bfq_entity *next_in_service;
|
|
+ struct bfq_service_tree service_tree[3];
|
|
+ long unsigned int bfq_class_idle_last_service;
|
|
+};
|
|
+
|
|
+struct bfq_weight_counter {
|
|
+ unsigned int weight;
|
|
+ unsigned int num_active;
|
|
+ struct rb_node weights_node;
|
|
+};
|
|
+
|
|
+struct bfq_ttime {
|
|
+ u64 last_end_request;
|
|
+ u64 ttime_total;
|
|
+ long unsigned int ttime_samples;
|
|
+ u64 ttime_mean;
|
|
+};
|
|
+
|
|
+struct bfq_data;
|
|
+
|
|
+struct bfq_io_cq;
|
|
+
|
|
+struct bfq_queue {
|
|
+ int ref;
|
|
+ struct bfq_data *bfqd;
|
|
+ short unsigned int ioprio;
|
|
+ short unsigned int ioprio_class;
|
|
+ short unsigned int new_ioprio;
|
|
+ short unsigned int new_ioprio_class;
|
|
+ struct bfq_queue *new_bfqq;
|
|
+ struct rb_node pos_node;
|
|
+ struct rb_root *pos_root;
|
|
+ struct rb_root sort_list;
|
|
+ struct request *next_rq;
|
|
+ int queued[2];
|
|
+ int allocated;
|
|
+ int meta_pending;
|
|
+ struct list_head fifo;
|
|
+ struct bfq_entity entity;
|
|
+ int max_budget;
|
|
+ long unsigned int budget_timeout;
|
|
+ int dispatched;
|
|
+ long unsigned int flags;
|
|
+ struct list_head bfqq_list;
|
|
+ struct bfq_ttime ttime;
|
|
+ u32 seek_history;
|
|
+ struct hlist_node burst_list_node;
|
|
+ sector_t last_request_pos;
|
|
+ unsigned int requests_within_timer;
|
|
+ pid_t pid;
|
|
+ struct bfq_io_cq *bic;
|
|
+ long unsigned int wr_cur_max_time;
|
|
+ long unsigned int soft_rt_next_start;
|
|
+ long unsigned int last_wr_start_finish;
|
|
+ unsigned int wr_coeff;
|
|
+ long unsigned int last_idle_bklogged;
|
|
+ long unsigned int service_from_backlogged;
|
|
+ long unsigned int service_from_wr;
|
|
+ long unsigned int wr_start_at_switch_to_srt;
|
|
+ long unsigned int split_time;
|
|
+ long unsigned int first_IO_time;
|
|
+ u32 max_service_rate;
|
|
+ unsigned int inject_coeff;
|
|
+ unsigned int injected_service;
|
|
+};
|
|
+
|
|
+struct bfq_group;
|
|
+
|
|
+struct bfq_data {
|
|
+ struct request_queue *queue;
|
|
+ struct list_head dispatch;
|
|
+ struct bfq_group *root_group;
|
|
+ struct rb_root queue_weights_tree;
|
|
+ struct rb_root group_weights_tree;
|
|
+ int busy_queues;
|
|
+ int wr_busy_queues;
|
|
+ int queued;
|
|
+ int rq_in_driver;
|
|
+ int max_rq_in_driver;
|
|
+ int hw_tag_samples;
|
|
+ int hw_tag;
|
|
+ int budgets_assigned;
|
|
+ struct hrtimer idle_slice_timer;
|
|
+ struct bfq_queue *in_service_queue;
|
|
+ sector_t last_position;
|
|
+ sector_t in_serv_last_pos;
|
|
+ u64 last_completion;
|
|
+ u64 first_dispatch;
|
|
+ u64 last_dispatch;
|
|
+ ktime_t last_budget_start;
|
|
+ ktime_t last_idling_start;
|
|
+ int peak_rate_samples;
|
|
+ u32 sequential_samples;
|
|
+ u64 tot_sectors_dispatched;
|
|
+ u32 last_rq_max_size;
|
|
+ u64 delta_from_first;
|
|
+ u32 peak_rate;
|
|
+ int bfq_max_budget;
|
|
+ struct list_head active_list;
|
|
+ struct list_head idle_list;
|
|
+ u64 bfq_fifo_expire[2];
|
|
+ unsigned int bfq_back_penalty;
|
|
+ unsigned int bfq_back_max;
|
|
+ u32 bfq_slice_idle;
|
|
+ int bfq_user_max_budget;
|
|
+ unsigned int bfq_timeout;
|
|
+ unsigned int bfq_requests_within_timer;
|
|
+ bool strict_guarantees;
|
|
+ long unsigned int last_ins_in_burst;
|
|
+ long unsigned int bfq_burst_interval;
|
|
+ int burst_size;
|
|
+ struct bfq_entity *burst_parent_entity;
|
|
+ long unsigned int bfq_large_burst_thresh;
|
|
+ bool large_burst;
|
|
+ struct hlist_head burst_list;
|
|
+ bool low_latency;
|
|
+ unsigned int bfq_wr_coeff;
|
|
+ unsigned int bfq_wr_max_time;
|
|
+ unsigned int bfq_wr_rt_max_time;
|
|
+ unsigned int bfq_wr_min_idle_time;
|
|
+ long unsigned int bfq_wr_min_inter_arr_async;
|
|
+ unsigned int bfq_wr_max_softrt_rate;
|
|
+ u64 rate_dur_prod;
|
|
+ struct bfq_queue oom_bfqq;
|
|
+ spinlock_t lock;
|
|
+ struct bfq_io_cq *bio_bic;
|
|
+ struct bfq_queue *bio_bfqq;
|
|
+ unsigned int word_depths[4];
|
|
+};
|
|
+
|
|
+struct bfq_io_cq {
|
|
+ struct io_cq icq;
|
|
+ struct bfq_queue *bfqq[2];
|
|
+ int ioprio;
|
|
+ uint64_t blkcg_serial_nr;
|
|
+ bool saved_has_short_ttime;
|
|
+ bool saved_IO_bound;
|
|
+ bool saved_in_large_burst;
|
|
+ bool was_in_burst_list;
|
|
+ long unsigned int saved_wr_coeff;
|
|
+ long unsigned int saved_last_wr_start_finish;
|
|
+ long unsigned int saved_wr_start_at_switch_to_srt;
|
|
+ unsigned int saved_wr_cur_max_time;
|
|
+ struct bfq_ttime saved_ttime;
|
|
+};
|
|
+
|
|
+struct bfqg_stats {};
|
|
+
|
|
+struct bfq_group {
|
|
+ struct blkg_policy_data pd;
|
|
+ char blkg_path[128];
|
|
+ int ref;
|
|
+ struct bfq_entity entity;
|
|
+ struct bfq_sched_data sched_data;
|
|
+ void *bfqd;
|
|
+ struct bfq_queue *async_bfqq[16];
|
|
+ struct bfq_queue *async_idle_bfqq;
|
|
+ struct bfq_entity *my_entity;
|
|
+ int active_entities;
|
|
+ struct rb_root rq_pos_tree;
|
|
+ struct bfqg_stats stats;
|
|
+};
|
|
+
|
|
+enum bfqq_state_flags {
|
|
+ BFQQF_just_created = 0,
|
|
+ BFQQF_busy = 1,
|
|
+ BFQQF_wait_request = 2,
|
|
+ BFQQF_non_blocking_wait_rq = 3,
|
|
+ BFQQF_fifo_expire = 4,
|
|
+ BFQQF_has_short_ttime = 5,
|
|
+ BFQQF_sync = 6,
|
|
+ BFQQF_IO_bound = 7,
|
|
+ BFQQF_in_large_burst = 8,
|
|
+ BFQQF_softrt_update = 9,
|
|
+ BFQQF_coop = 10,
|
|
+ BFQQF_split_coop = 11,
|
|
+};
|
|
+
|
|
+enum bfqq_expiration {
|
|
+ BFQQE_TOO_IDLE = 0,
|
|
+ BFQQE_BUDGET_TIMEOUT = 1,
|
|
+ BFQQE_BUDGET_EXHAUSTED = 2,
|
|
+ BFQQE_NO_MORE_REQUESTS = 3,
|
|
+ BFQQE_PREEMPTED = 4,
|
|
+};
|
|
+
|
|
+struct bfq_group_data {
|
|
+ struct blkcg_policy_data pd;
|
|
+ unsigned int weight;
|
|
+};
|
|
+
|
|
+struct cdrom_msf0 {
|
|
+ __u8 minute;
|
|
+ __u8 second;
|
|
+ __u8 frame;
|
|
+};
|
|
+
|
|
+union cdrom_addr {
|
|
+ struct cdrom_msf0 msf;
|
|
+ int lba;
|
|
+};
|
|
+
|
|
+struct cdrom_read_audio {
|
|
+ union cdrom_addr addr;
|
|
+ __u8 addr_format;
|
|
+ int nframes;
|
|
+ __u8 *buf;
|
|
+};
|
|
+
|
|
+struct compat_hd_geometry {
|
|
+ unsigned char heads;
|
|
+ unsigned char sectors;
|
|
+ short unsigned int cylinders;
|
|
+ u32 start;
|
|
+};
|
|
+
|
|
+struct compat_cdrom_read_audio {
|
|
+ union cdrom_addr addr;
|
|
+ u8 addr_format;
|
|
+ compat_int_t nframes;
|
|
+ compat_caddr_t buf;
|
|
+};
|
|
+
|
|
+struct compat_cdrom_generic_command {
|
|
+ unsigned char cmd[12];
|
|
+ compat_caddr_t buffer;
|
|
+ compat_uint_t buflen;
|
|
+ compat_int_t stat;
|
|
+ compat_caddr_t sense;
|
|
+ unsigned char data_direction;
|
|
+ compat_int_t quiet;
|
|
+ compat_int_t timeout;
|
|
+ compat_caddr_t reserved[1];
|
|
+};
|
|
+
|
|
+struct compat_blkpg_ioctl_arg {
|
|
+ compat_int_t op;
|
|
+ compat_int_t flags;
|
|
+ compat_int_t datalen;
|
|
+ compat_caddr_t data;
|
|
+};
|
|
+
|
|
+enum bip_flags {
|
|
+ BIP_BLOCK_INTEGRITY = 1,
|
|
+ BIP_MAPPED_INTEGRITY = 2,
|
|
+ BIP_CTRL_NOCHECK = 4,
|
|
+ BIP_DISK_NOCHECK = 8,
|
|
+ BIP_IP_CHECKSUM = 16,
|
|
+};
|
|
+
|
|
+enum blk_integrity_flags {
|
|
+ BLK_INTEGRITY_VERIFY = 1,
|
|
+ BLK_INTEGRITY_GENERATE = 2,
|
|
+ BLK_INTEGRITY_DEVICE_CAPABLE = 4,
|
|
+ BLK_INTEGRITY_IP_CHECKSUM = 8,
|
|
+};
|
|
+
|
|
+struct integrity_sysfs_entry {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct blk_integrity *, char *);
|
|
+ ssize_t (*store)(struct blk_integrity *, const char *, size_t);
|
|
+};
|
|
+
|
|
+enum t10_dif_type {
|
|
+ T10_PI_TYPE0_PROTECTION = 0,
|
|
+ T10_PI_TYPE1_PROTECTION = 1,
|
|
+ T10_PI_TYPE2_PROTECTION = 2,
|
|
+ T10_PI_TYPE3_PROTECTION = 3,
|
|
+};
|
|
+
|
|
+struct t10_pi_tuple {
|
|
+ __be16 guard_tag;
|
|
+ __be16 app_tag;
|
|
+ __be32 ref_tag;
|
|
+};
|
|
+
|
|
+typedef __be16 csum_fn(void *, unsigned int);
|
|
+
|
|
+struct virtio_device_id {
|
|
+ __u32 device;
|
|
+ __u32 vendor;
|
|
+};
|
|
+
|
|
+typedef __u16 __virtio16;
|
|
+
|
|
+typedef __u32 __virtio32;
|
|
+
|
|
+typedef __u64 __virtio64;
|
|
+
|
|
+struct vring_desc {
|
|
+ __virtio64 addr;
|
|
+ __virtio32 len;
|
|
+ __virtio16 flags;
|
|
+ __virtio16 next;
|
|
+};
|
|
+
|
|
+struct vring_avail {
|
|
+ __virtio16 flags;
|
|
+ __virtio16 idx;
|
|
+ __virtio16 ring[0];
|
|
+};
|
|
+
|
|
+struct vring_used_elem {
|
|
+ __virtio32 id;
|
|
+ __virtio32 len;
|
|
+};
|
|
+
|
|
+struct vring_used {
|
|
+ __virtio16 flags;
|
|
+ __virtio16 idx;
|
|
+ struct vring_used_elem ring[0];
|
|
+};
|
|
+
|
|
+struct vring {
|
|
+ unsigned int num;
|
|
+ struct vring_desc *desc;
|
|
+ struct vring_avail *avail;
|
|
+ struct vring_used *used;
|
|
+};
|
|
+
|
|
+struct vringh {
|
|
+ bool little_endian;
|
|
+ bool event_indices;
|
|
+ bool weak_barriers;
|
|
+ u16 last_avail_idx;
|
|
+ u16 last_used_idx;
|
|
+ u32 completed;
|
|
+ struct vring vring;
|
|
+ void (*notify)(struct vringh *);
|
|
+};
|
|
+
|
|
+struct virtio_device;
|
|
+
|
|
+typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
|
|
+
|
|
+struct virtio_config_ops;
|
|
+
|
|
+struct vringh_config_ops;
|
|
+
|
|
+struct virtio_device {
|
|
+ int index;
|
|
+ bool failed;
|
|
+ bool config_enabled;
|
|
+ bool config_change_pending;
|
|
+ spinlock_t config_lock;
|
|
+ struct device dev;
|
|
+ struct virtio_device_id id;
|
|
+ const struct virtio_config_ops *config;
|
|
+ const struct vringh_config_ops *vringh_config;
|
|
+ struct list_head vqs;
|
|
+ u64 features;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct vringh_config_ops {
|
|
+ int (*find_vrhs)(struct virtio_device *, unsigned int, struct vringh **, vrh_callback_t **);
|
|
+ void (*del_vrhs)(struct virtio_device *);
|
|
+};
|
|
+
|
|
+struct virtqueue {
|
|
+ struct list_head list;
|
|
+ void (*callback)(struct virtqueue *);
|
|
+ const char *name;
|
|
+ struct virtio_device *vdev;
|
|
+ unsigned int index;
|
|
+ unsigned int num_free;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+typedef void vq_callback_t(struct virtqueue *);
|
|
+
|
|
+struct virtio_config_ops {
|
|
+ void (*get)(struct virtio_device *, unsigned int, void *, unsigned int);
|
|
+ void (*set)(struct virtio_device *, unsigned int, const void *, unsigned int);
|
|
+ u32 (*generation)(struct virtio_device *);
|
|
+ u8 (*get_status)(struct virtio_device *);
|
|
+ void (*set_status)(struct virtio_device *, u8);
|
|
+ void (*reset)(struct virtio_device *);
|
|
+ int (*find_vqs)(struct virtio_device *, unsigned int, struct virtqueue **, vq_callback_t **, const char * const *, const bool *, struct irq_affinity *);
|
|
+ void (*del_vqs)(struct virtio_device *);
|
|
+ u64 (*get_features)(struct virtio_device *);
|
|
+ int (*finalize_features)(struct virtio_device *);
|
|
+ const char * (*bus_name)(struct virtio_device *);
|
|
+ int (*set_vq_affinity)(struct virtqueue *, const struct cpumask *);
|
|
+ const struct cpumask * (*get_vq_affinity)(struct virtio_device *, int);
|
|
+};
|
|
+
|
|
+struct irq_poll;
|
|
+
|
|
+typedef int irq_poll_fn(struct irq_poll *, int);
|
|
+
|
|
+struct irq_poll {
|
|
+ struct list_head list;
|
|
+ long unsigned int state;
|
|
+ int weight;
|
|
+ irq_poll_fn *poll;
|
|
+};
|
|
+
|
|
+enum rdma_restrack_type {
|
|
+ RDMA_RESTRACK_PD = 0,
|
|
+ RDMA_RESTRACK_CQ = 1,
|
|
+ RDMA_RESTRACK_QP = 2,
|
|
+ RDMA_RESTRACK_CM_ID = 3,
|
|
+ RDMA_RESTRACK_MR = 4,
|
|
+ RDMA_RESTRACK_MAX = 5,
|
|
+};
|
|
+
|
|
+struct rdma_restrack_entry;
|
|
+
|
|
+struct rdma_restrack_root {
|
|
+ struct rw_semaphore rwsem;
|
|
+ struct hlist_head hash[256];
|
|
+ int (*fill_res_entry)(struct sk_buff *, struct rdma_restrack_entry *);
|
|
+};
|
|
+
|
|
+struct rdma_restrack_entry {
|
|
+ bool valid;
|
|
+ struct kref kref;
|
|
+ struct completion comp;
|
|
+ struct task_struct *task;
|
|
+ const char *kern_name;
|
|
+ struct hlist_node node;
|
|
+ enum rdma_restrack_type type;
|
|
+};
|
|
+
|
|
+enum rdma_driver_id {
|
|
+ RDMA_DRIVER_UNKNOWN = 0,
|
|
+ RDMA_DRIVER_MLX5 = 1,
|
|
+ RDMA_DRIVER_MLX4 = 2,
|
|
+ RDMA_DRIVER_CXGB3 = 3,
|
|
+ RDMA_DRIVER_CXGB4 = 4,
|
|
+ RDMA_DRIVER_MTHCA = 5,
|
|
+ RDMA_DRIVER_BNXT_RE = 6,
|
|
+ RDMA_DRIVER_OCRDMA = 7,
|
|
+ RDMA_DRIVER_NES = 8,
|
|
+ RDMA_DRIVER_I40IW = 9,
|
|
+ RDMA_DRIVER_VMW_PVRDMA = 10,
|
|
+ RDMA_DRIVER_QEDR = 11,
|
|
+ RDMA_DRIVER_HNS = 12,
|
|
+ RDMA_DRIVER_USNIC = 13,
|
|
+ RDMA_DRIVER_RXE = 14,
|
|
+ RDMA_DRIVER_HFI1 = 15,
|
|
+ RDMA_DRIVER_QIB = 16,
|
|
+};
|
|
+
|
|
+enum ib_uverbs_flow_action_esp_keymat {
|
|
+ IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM = 0,
|
|
+};
|
|
+
|
|
+struct ib_uverbs_flow_action_esp_keymat_aes_gcm {
|
|
+ __u64 iv;
|
|
+ __u32 iv_algo;
|
|
+ __u32 salt;
|
|
+ __u32 icv_len;
|
|
+ __u32 key_len;
|
|
+ __u32 aes_key[8];
|
|
+};
|
|
+
|
|
+enum ib_uverbs_flow_action_esp_replay {
|
|
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE = 0,
|
|
+ IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP = 1,
|
|
+};
|
|
+
|
|
+struct ib_uverbs_flow_action_esp_replay_bmp {
|
|
+ __u32 size;
|
|
+};
|
|
+
|
|
+enum ib_gid_type {
|
|
+ IB_GID_TYPE_IB = 0,
|
|
+ IB_GID_TYPE_ROCE = 0,
|
|
+ IB_GID_TYPE_ROCE_UDP_ENCAP = 1,
|
|
+ IB_GID_TYPE_SIZE = 2,
|
|
+};
|
|
+
|
|
+struct ib_device;
|
|
+
|
|
+struct ib_gid_attr {
|
|
+ struct net_device *ndev;
|
|
+ struct ib_device *device;
|
|
+ union ib_gid gid;
|
|
+ enum ib_gid_type gid_type;
|
|
+ u16 index;
|
|
+ u8 port_num;
|
|
+};
|
|
+
|
|
+struct ib_event;
|
|
+
|
|
+struct ib_event_handler {
|
|
+ struct ib_device *device;
|
|
+ void (*handler)(struct ib_event_handler *, struct ib_event *);
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct ib_port_cache;
|
|
+
|
|
+struct ib_cache {
|
|
+ rwlock_t lock;
|
|
+ struct ib_event_handler event_handler;
|
|
+ struct ib_port_cache *ports;
|
|
+};
|
|
+
|
|
+struct iw_cm_verbs;
|
|
+
|
|
+enum rdma_link_layer {
|
|
+ IB_LINK_LAYER_UNSPECIFIED = 0,
|
|
+ IB_LINK_LAYER_INFINIBAND = 1,
|
|
+ IB_LINK_LAYER_ETHERNET = 2,
|
|
+};
|
|
+
|
|
+enum ib_srq_attr_mask {
|
|
+ IB_SRQ_MAX_WR = 1,
|
|
+ IB_SRQ_LIMIT = 2,
|
|
+};
|
|
+
|
|
+enum ib_cq_notify_flags {
|
|
+ IB_CQ_SOLICITED = 1,
|
|
+ IB_CQ_NEXT_COMP = 2,
|
|
+ IB_CQ_SOLICITED_MASK = 3,
|
|
+ IB_CQ_REPORT_MISSED_EVENTS = 4,
|
|
+};
|
|
+
|
|
+enum ib_mr_type {
|
|
+ IB_MR_TYPE_MEM_REG = 0,
|
|
+ IB_MR_TYPE_SIGNATURE = 1,
|
|
+ IB_MR_TYPE_SG_GAPS = 2,
|
|
+};
|
|
+
|
|
+enum ib_mw_type {
|
|
+ IB_MW_TYPE_1 = 1,
|
|
+ IB_MW_TYPE_2 = 2,
|
|
+};
|
|
+
|
|
+struct ib_mad_hdr;
|
|
+
|
|
+struct uverbs_attr_bundle;
|
|
+
|
|
+enum rdma_netdev_t {
|
|
+ RDMA_NETDEV_OPA_VNIC = 0,
|
|
+ RDMA_NETDEV_IPOIB = 1,
|
|
+};
|
|
+
|
|
+enum ib_atomic_cap {
|
|
+ IB_ATOMIC_NONE = 0,
|
|
+ IB_ATOMIC_HCA = 1,
|
|
+ IB_ATOMIC_GLOB = 2,
|
|
+};
|
|
+
|
|
+struct ib_odp_caps {
|
|
+ uint64_t general_caps;
|
|
+ struct {
|
|
+ uint32_t rc_odp_caps;
|
|
+ uint32_t uc_odp_caps;
|
|
+ uint32_t ud_odp_caps;
|
|
+ } per_transport_caps;
|
|
+};
|
|
+
|
|
+struct ib_rss_caps {
|
|
+ u32 supported_qpts;
|
|
+ u32 max_rwq_indirection_tables;
|
|
+ u32 max_rwq_indirection_table_size;
|
|
+};
|
|
+
|
|
+struct ib_tm_caps {
|
|
+ u32 max_rndv_hdr_size;
|
|
+ u32 max_num_tags;
|
|
+ u32 flags;
|
|
+ u32 max_ops;
|
|
+ u32 max_sge;
|
|
+};
|
|
+
|
|
+struct ib_cq_caps {
|
|
+ u16 max_cq_moderation_count;
|
|
+ u16 max_cq_moderation_period;
|
|
+};
|
|
+
|
|
+struct ib_device_attr {
|
|
+ u64 fw_ver;
|
|
+ __be64 sys_image_guid;
|
|
+ u64 max_mr_size;
|
|
+ u64 page_size_cap;
|
|
+ u32 vendor_id;
|
|
+ u32 vendor_part_id;
|
|
+ u32 hw_ver;
|
|
+ int max_qp;
|
|
+ int max_qp_wr;
|
|
+ u64 device_cap_flags;
|
|
+ int max_send_sge;
|
|
+ int max_recv_sge;
|
|
+ int max_sge_rd;
|
|
+ int max_cq;
|
|
+ int max_cqe;
|
|
+ int max_mr;
|
|
+ int max_pd;
|
|
+ int max_qp_rd_atom;
|
|
+ int max_ee_rd_atom;
|
|
+ int max_res_rd_atom;
|
|
+ int max_qp_init_rd_atom;
|
|
+ int max_ee_init_rd_atom;
|
|
+ enum ib_atomic_cap atomic_cap;
|
|
+ enum ib_atomic_cap masked_atomic_cap;
|
|
+ int max_ee;
|
|
+ int max_rdd;
|
|
+ int max_mw;
|
|
+ int max_raw_ipv6_qp;
|
|
+ int max_raw_ethy_qp;
|
|
+ int max_mcast_grp;
|
|
+ int max_mcast_qp_attach;
|
|
+ int max_total_mcast_qp_attach;
|
|
+ int max_ah;
|
|
+ int max_fmr;
|
|
+ int max_map_per_fmr;
|
|
+ int max_srq;
|
|
+ int max_srq_wr;
|
|
+ int max_srq_sge;
|
|
+ unsigned int max_fast_reg_page_list_len;
|
|
+ u16 max_pkeys;
|
|
+ u8 local_ca_ack_delay;
|
|
+ int sig_prot_cap;
|
|
+ int sig_guard_cap;
|
|
+ struct ib_odp_caps odp_caps;
|
|
+ uint64_t timestamp_mask;
|
|
+ uint64_t hca_core_clock;
|
|
+ struct ib_rss_caps rss_caps;
|
|
+ u32 max_wq_type_rq;
|
|
+ u32 raw_packet_caps;
|
|
+ struct ib_tm_caps tm_caps;
|
|
+ struct ib_cq_caps cq_caps;
|
|
+ u64 max_dm_size;
|
|
+};
|
|
+
|
|
+struct uverbs_object_tree_def;
|
|
+
|
|
+struct ib_port_immutable;
|
|
+
|
|
+struct ib_port_pkey_list;
|
|
+
|
|
+struct rdma_hw_stats;
|
|
+
|
|
+struct ib_udata;
|
|
+
|
|
+struct ib_port_attr;
|
|
+
|
|
+struct ib_device_modify;
|
|
+
|
|
+struct ib_port_modify;
|
|
+
|
|
+struct ib_ucontext;
|
|
+
|
|
+struct ib_pd;
|
|
+
|
|
+struct ib_ah;
|
|
+
|
|
+struct rdma_ah_attr;
|
|
+
|
|
+struct ib_srq;
|
|
+
|
|
+struct ib_srq_init_attr;
|
|
+
|
|
+struct ib_srq_attr;
|
|
+
|
|
+struct ib_recv_wr;
|
|
+
|
|
+struct ib_qp;
|
|
+
|
|
+struct ib_qp_init_attr;
|
|
+
|
|
+struct ib_qp_attr;
|
|
+
|
|
+struct ib_send_wr;
|
|
+
|
|
+struct ib_cq;
|
|
+
|
|
+struct ib_cq_init_attr;
|
|
+
|
|
+struct ib_wc;
|
|
+
|
|
+struct ib_mr;
|
|
+
|
|
+struct ib_mw;
|
|
+
|
|
+struct ib_fmr;
|
|
+
|
|
+struct ib_fmr_attr;
|
|
+
|
|
+struct ib_grh;
|
|
+
|
|
+struct ib_xrcd;
|
|
+
|
|
+struct ib_flow;
|
|
+
|
|
+struct ib_flow_attr;
|
|
+
|
|
+struct ib_mr_status;
|
|
+
|
|
+struct ib_wq;
|
|
+
|
|
+struct ib_wq_init_attr;
|
|
+
|
|
+struct ib_wq_attr;
|
|
+
|
|
+struct ib_rwq_ind_table;
|
|
+
|
|
+struct ib_rwq_ind_table_init_attr;
|
|
+
|
|
+struct ib_flow_action;
|
|
+
|
|
+struct ib_flow_action_attrs_esp;
|
|
+
|
|
+struct ib_dm;
|
|
+
|
|
+struct ib_dm_alloc_attr;
|
|
+
|
|
+struct ib_dm_mr_attr;
|
|
+
|
|
+struct ib_counters;
|
|
+
|
|
+struct ib_counters_read_attr;
|
|
+
|
|
+struct ib_device {
|
|
+ struct device *dma_device;
|
|
+ char name[64];
|
|
+ struct list_head event_handler_list;
|
|
+ spinlock_t event_handler_lock;
|
|
+ spinlock_t client_data_lock;
|
|
+ struct list_head core_list;
|
|
+ struct list_head client_data_list;
|
|
+ struct ib_cache cache;
|
|
+ struct ib_port_immutable *port_immutable;
|
|
+ int num_comp_vectors;
|
|
+ struct ib_port_pkey_list *port_pkey_list;
|
|
+ struct iw_cm_verbs *iwcm;
|
|
+ struct rdma_hw_stats * (*alloc_hw_stats)(struct ib_device *, u8);
|
|
+ int (*get_hw_stats)(struct ib_device *, struct rdma_hw_stats *, u8, int);
|
|
+ int (*query_device)(struct ib_device *, struct ib_device_attr *, struct ib_udata *);
|
|
+ int (*query_port)(struct ib_device *, u8, struct ib_port_attr *);
|
|
+ enum rdma_link_layer (*get_link_layer)(struct ib_device *, u8);
|
|
+ struct net_device * (*get_netdev)(struct ib_device *, u8);
|
|
+ int (*query_gid)(struct ib_device *, u8, int, union ib_gid *);
|
|
+ int (*add_gid)(const struct ib_gid_attr *, void **);
|
|
+ int (*del_gid)(const struct ib_gid_attr *, void **);
|
|
+ int (*query_pkey)(struct ib_device *, u8, u16, u16 *);
|
|
+ int (*modify_device)(struct ib_device *, int, struct ib_device_modify *);
|
|
+ int (*modify_port)(struct ib_device *, u8, int, struct ib_port_modify *);
|
|
+ struct ib_ucontext * (*alloc_ucontext)(struct ib_device *, struct ib_udata *);
|
|
+ int (*dealloc_ucontext)(struct ib_ucontext *);
|
|
+ int (*mmap)(struct ib_ucontext *, struct vm_area_struct *);
|
|
+ struct ib_pd * (*alloc_pd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *);
|
|
+ int (*dealloc_pd)(struct ib_pd *);
|
|
+ struct ib_ah * (*create_ah)(struct ib_pd *, struct rdma_ah_attr *, struct ib_udata *);
|
|
+ int (*modify_ah)(struct ib_ah *, struct rdma_ah_attr *);
|
|
+ int (*query_ah)(struct ib_ah *, struct rdma_ah_attr *);
|
|
+ int (*destroy_ah)(struct ib_ah *);
|
|
+ struct ib_srq * (*create_srq)(struct ib_pd *, struct ib_srq_init_attr *, struct ib_udata *);
|
|
+ int (*modify_srq)(struct ib_srq *, struct ib_srq_attr *, enum ib_srq_attr_mask, struct ib_udata *);
|
|
+ int (*query_srq)(struct ib_srq *, struct ib_srq_attr *);
|
|
+ int (*destroy_srq)(struct ib_srq *);
|
|
+ int (*post_srq_recv)(struct ib_srq *, const struct ib_recv_wr *, const struct ib_recv_wr **);
|
|
+ struct ib_qp * (*create_qp)(struct ib_pd *, struct ib_qp_init_attr *, struct ib_udata *);
|
|
+ int (*modify_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
|
|
+ int (*query_qp)(struct ib_qp *, struct ib_qp_attr *, int, struct ib_qp_init_attr *);
|
|
+ int (*destroy_qp)(struct ib_qp *);
|
|
+ int (*post_send)(struct ib_qp *, const struct ib_send_wr *, const struct ib_send_wr **);
|
|
+ int (*post_recv)(struct ib_qp *, const struct ib_recv_wr *, const struct ib_recv_wr **);
|
|
+ struct ib_cq * (*create_cq)(struct ib_device *, const struct ib_cq_init_attr *, struct ib_ucontext *, struct ib_udata *);
|
|
+ int (*modify_cq)(struct ib_cq *, u16, u16);
|
|
+ int (*destroy_cq)(struct ib_cq *);
|
|
+ int (*resize_cq)(struct ib_cq *, int, struct ib_udata *);
|
|
+ int (*poll_cq)(struct ib_cq *, int, struct ib_wc *);
|
|
+ int (*peek_cq)(struct ib_cq *, int);
|
|
+ int (*req_notify_cq)(struct ib_cq *, enum ib_cq_notify_flags);
|
|
+ int (*req_ncomp_notif)(struct ib_cq *, int);
|
|
+ struct ib_mr * (*get_dma_mr)(struct ib_pd *, int);
|
|
+ struct ib_mr * (*reg_user_mr)(struct ib_pd *, u64, u64, u64, int, struct ib_udata *);
|
|
+ int (*rereg_user_mr)(struct ib_mr *, int, u64, u64, u64, int, struct ib_pd *, struct ib_udata *);
|
|
+ int (*dereg_mr)(struct ib_mr *);
|
|
+ struct ib_mr * (*alloc_mr)(struct ib_pd *, enum ib_mr_type, u32);
|
|
+ int (*map_mr_sg)(struct ib_mr *, struct scatterlist *, int, unsigned int *);
|
|
+ struct ib_mw * (*alloc_mw)(struct ib_pd *, enum ib_mw_type, struct ib_udata *);
|
|
+ int (*dealloc_mw)(struct ib_mw *);
|
|
+ struct ib_fmr * (*alloc_fmr)(struct ib_pd *, int, struct ib_fmr_attr *);
|
|
+ int (*map_phys_fmr)(struct ib_fmr *, u64 *, int, u64);
|
|
+ int (*unmap_fmr)(struct list_head *);
|
|
+ int (*dealloc_fmr)(struct ib_fmr *);
|
|
+ int (*attach_mcast)(struct ib_qp *, union ib_gid *, u16);
|
|
+ int (*detach_mcast)(struct ib_qp *, union ib_gid *, u16);
|
|
+ int (*process_mad)(struct ib_device *, int, u8, const struct ib_wc *, const struct ib_grh *, const struct ib_mad_hdr *, size_t, struct ib_mad_hdr *, size_t *, u16 *);
|
|
+ struct ib_xrcd * (*alloc_xrcd)(struct ib_device *, struct ib_ucontext *, struct ib_udata *);
|
|
+ int (*dealloc_xrcd)(struct ib_xrcd *);
|
|
+ struct ib_flow * (*create_flow)(struct ib_qp *, struct ib_flow_attr *, int, struct ib_udata *);
|
|
+ int (*destroy_flow)(struct ib_flow *);
|
|
+ int (*check_mr_status)(struct ib_mr *, u32, struct ib_mr_status *);
|
|
+ void (*disassociate_ucontext)(struct ib_ucontext *);
|
|
+ void (*drain_rq)(struct ib_qp *);
|
|
+ void (*drain_sq)(struct ib_qp *);
|
|
+ int (*set_vf_link_state)(struct ib_device *, int, u8, int);
|
|
+ int (*get_vf_config)(struct ib_device *, int, u8, struct ifla_vf_info *);
|
|
+ int (*get_vf_stats)(struct ib_device *, int, u8, struct ifla_vf_stats *);
|
|
+ int (*set_vf_guid)(struct ib_device *, int, u8, u64, int);
|
|
+ struct ib_wq * (*create_wq)(struct ib_pd *, struct ib_wq_init_attr *, struct ib_udata *);
|
|
+ int (*destroy_wq)(struct ib_wq *);
|
|
+ int (*modify_wq)(struct ib_wq *, struct ib_wq_attr *, u32, struct ib_udata *);
|
|
+ struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *, struct ib_rwq_ind_table_init_attr *, struct ib_udata *);
|
|
+ int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *);
|
|
+ struct ib_flow_action * (*create_flow_action_esp)(struct ib_device *, const struct ib_flow_action_attrs_esp *, struct uverbs_attr_bundle *);
|
|
+ int (*destroy_flow_action)(struct ib_flow_action *);
|
|
+ int (*modify_flow_action_esp)(struct ib_flow_action *, const struct ib_flow_action_attrs_esp *, struct uverbs_attr_bundle *);
|
|
+ struct ib_dm * (*alloc_dm)(struct ib_device *, struct ib_ucontext *, struct ib_dm_alloc_attr *, struct uverbs_attr_bundle *);
|
|
+ int (*dealloc_dm)(struct ib_dm *);
|
|
+ struct ib_mr * (*reg_dm_mr)(struct ib_pd *, struct ib_dm *, struct ib_dm_mr_attr *, struct uverbs_attr_bundle *);
|
|
+ struct ib_counters * (*create_counters)(struct ib_device *, struct uverbs_attr_bundle *);
|
|
+ int (*destroy_counters)(struct ib_counters *);
|
|
+ int (*read_counters)(struct ib_counters *, struct ib_counters_read_attr *, struct uverbs_attr_bundle *);
|
|
+ struct net_device * (*alloc_rdma_netdev)(struct ib_device *, u8, enum rdma_netdev_t, const char *, unsigned char, void (*)(struct net_device *));
|
|
+ struct module *owner;
|
|
+ struct device dev;
|
|
+ struct kobject *ports_parent;
|
|
+ struct list_head port_list;
|
|
+ enum {
|
|
+ IB_DEV_UNINITIALIZED = 0,
|
|
+ IB_DEV_REGISTERED = 1,
|
|
+ IB_DEV_UNREGISTERED = 2,
|
|
+ } reg_state;
|
|
+ int uverbs_abi_ver;
|
|
+ u64 uverbs_cmd_mask;
|
|
+ u64 uverbs_ex_cmd_mask;
|
|
+ char node_desc[64];
|
|
+ __be64 node_guid;
|
|
+ u32 local_dma_lkey;
|
|
+ u16 is_switch: 1;
|
|
+ u8 node_type;
|
|
+ u8 phys_port_cnt;
|
|
+ struct ib_device_attr attrs;
|
|
+ struct attribute_group *hw_stats_ag;
|
|
+ struct rdma_hw_stats *hw_stats;
|
|
+ struct rdmacg_device cg_device;
|
|
+ u32 index;
|
|
+ struct rdma_restrack_root res;
|
|
+ int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *);
|
|
+ void (*get_dev_fw_str)(struct ib_device *, char *);
|
|
+ const struct cpumask * (*get_vector_affinity)(struct ib_device *, int);
|
|
+ const struct uverbs_object_tree_def * const *driver_specs;
|
|
+ enum rdma_driver_id driver_id;
|
|
+ refcount_t refcount;
|
|
+ struct completion unreg_completion;
|
|
+};
|
|
+
|
|
+struct ib_cq_init_attr {
|
|
+ unsigned int cqe;
|
|
+ u32 comp_vector;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct ib_dm_mr_attr {
|
|
+ u64 length;
|
|
+ u64 offset;
|
|
+ u32 access_flags;
|
|
+};
|
|
+
|
|
+struct ib_dm_alloc_attr {
|
|
+ u64 length;
|
|
+ u32 alignment;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+enum ib_mtu {
|
|
+ IB_MTU_256 = 1,
|
|
+ IB_MTU_512 = 2,
|
|
+ IB_MTU_1024 = 3,
|
|
+ IB_MTU_2048 = 4,
|
|
+ IB_MTU_4096 = 5,
|
|
+};
|
|
+
|
|
+enum ib_port_state {
|
|
+ IB_PORT_NOP = 0,
|
|
+ IB_PORT_DOWN = 1,
|
|
+ IB_PORT_INIT = 2,
|
|
+ IB_PORT_ARMED = 3,
|
|
+ IB_PORT_ACTIVE = 4,
|
|
+ IB_PORT_ACTIVE_DEFER = 5,
|
|
+};
|
|
+
|
|
+struct rdma_hw_stats {
|
|
+ struct mutex lock;
|
|
+ long unsigned int timestamp;
|
|
+ long unsigned int lifespan;
|
|
+ const char * const *names;
|
|
+ int num_counters;
|
|
+ u64 value[0];
|
|
+};
|
|
+
|
|
+struct ib_port_attr {
|
|
+ u64 subnet_prefix;
|
|
+ enum ib_port_state state;
|
|
+ enum ib_mtu max_mtu;
|
|
+ enum ib_mtu active_mtu;
|
|
+ int gid_tbl_len;
|
|
+ unsigned int ip_gids: 1;
|
|
+ u32 port_cap_flags;
|
|
+ u32 max_msg_sz;
|
|
+ u32 bad_pkey_cntr;
|
|
+ u32 qkey_viol_cntr;
|
|
+ u16 pkey_tbl_len;
|
|
+ u32 sm_lid;
|
|
+ u32 lid;
|
|
+ u8 lmc;
|
|
+ u8 max_vl_num;
|
|
+ u8 sm_sl;
|
|
+ u8 subnet_timeout;
|
|
+ u8 init_type_reply;
|
|
+ u8 active_width;
|
|
+ u8 active_speed;
|
|
+ u8 phys_state;
|
|
+};
|
|
+
|
|
+struct ib_device_modify {
|
|
+ u64 sys_image_guid;
|
|
+ char node_desc[64];
|
|
+};
|
|
+
|
|
+struct ib_port_modify {
|
|
+ u32 set_port_cap_mask;
|
|
+ u32 clr_port_cap_mask;
|
|
+ u8 init_type;
|
|
+};
|
|
+
|
|
+enum ib_event_type {
|
|
+ IB_EVENT_CQ_ERR = 0,
|
|
+ IB_EVENT_QP_FATAL = 1,
|
|
+ IB_EVENT_QP_REQ_ERR = 2,
|
|
+ IB_EVENT_QP_ACCESS_ERR = 3,
|
|
+ IB_EVENT_COMM_EST = 4,
|
|
+ IB_EVENT_SQ_DRAINED = 5,
|
|
+ IB_EVENT_PATH_MIG = 6,
|
|
+ IB_EVENT_PATH_MIG_ERR = 7,
|
|
+ IB_EVENT_DEVICE_FATAL = 8,
|
|
+ IB_EVENT_PORT_ACTIVE = 9,
|
|
+ IB_EVENT_PORT_ERR = 10,
|
|
+ IB_EVENT_LID_CHANGE = 11,
|
|
+ IB_EVENT_PKEY_CHANGE = 12,
|
|
+ IB_EVENT_SM_CHANGE = 13,
|
|
+ IB_EVENT_SRQ_ERR = 14,
|
|
+ IB_EVENT_SRQ_LIMIT_REACHED = 15,
|
|
+ IB_EVENT_QP_LAST_WQE_REACHED = 16,
|
|
+ IB_EVENT_CLIENT_REREGISTER = 17,
|
|
+ IB_EVENT_GID_CHANGE = 18,
|
|
+ IB_EVENT_WQ_FATAL = 19,
|
|
+};
|
|
+
|
|
+typedef void (*ib_comp_handler)(struct ib_cq *, void *);
|
|
+
|
|
+enum ib_poll_context {
|
|
+ IB_POLL_DIRECT = 0,
|
|
+ IB_POLL_SOFTIRQ = 1,
|
|
+ IB_POLL_WORKQUEUE = 2,
|
|
+ IB_POLL_UNBOUND_WORKQUEUE = 3,
|
|
+};
|
|
+
|
|
+struct ib_uobject;
|
|
+
|
|
+struct ib_cq {
|
|
+ struct ib_device *device;
|
|
+ struct ib_uobject *uobject;
|
|
+ ib_comp_handler comp_handler;
|
|
+ void (*event_handler)(struct ib_event *, void *);
|
|
+ void *cq_context;
|
|
+ int cqe;
|
|
+ atomic_t usecnt;
|
|
+ enum ib_poll_context poll_ctx;
|
|
+ struct ib_wc *wc;
|
|
+ union {
|
|
+ struct irq_poll iop;
|
|
+ struct work_struct work;
|
|
+ };
|
|
+ struct workqueue_struct *comp_wq;
|
|
+ struct rdma_restrack_entry res;
|
|
+};
|
|
+
|
|
+enum ib_qp_type {
|
|
+ IB_QPT_SMI = 0,
|
|
+ IB_QPT_GSI = 1,
|
|
+ IB_QPT_RC = 2,
|
|
+ IB_QPT_UC = 3,
|
|
+ IB_QPT_UD = 4,
|
|
+ IB_QPT_RAW_IPV6 = 5,
|
|
+ IB_QPT_RAW_ETHERTYPE = 6,
|
|
+ IB_QPT_RAW_PACKET = 8,
|
|
+ IB_QPT_XRC_INI = 9,
|
|
+ IB_QPT_XRC_TGT = 10,
|
|
+ IB_QPT_MAX = 11,
|
|
+ IB_QPT_DRIVER = 255,
|
|
+ IB_QPT_RESERVED1 = 4096,
|
|
+ IB_QPT_RESERVED2 = 4097,
|
|
+ IB_QPT_RESERVED3 = 4098,
|
|
+ IB_QPT_RESERVED4 = 4099,
|
|
+ IB_QPT_RESERVED5 = 4100,
|
|
+ IB_QPT_RESERVED6 = 4101,
|
|
+ IB_QPT_RESERVED7 = 4102,
|
|
+ IB_QPT_RESERVED8 = 4103,
|
|
+ IB_QPT_RESERVED9 = 4104,
|
|
+ IB_QPT_RESERVED10 = 4105,
|
|
+};
|
|
+
|
|
+struct ib_qp_security;
|
|
+
|
|
+struct ib_qp {
|
|
+ struct ib_device *device;
|
|
+ struct ib_pd *pd;
|
|
+ struct ib_cq *send_cq;
|
|
+ struct ib_cq *recv_cq;
|
|
+ spinlock_t mr_lock;
|
|
+ int mrs_used;
|
|
+ struct list_head rdma_mrs;
|
|
+ struct list_head sig_mrs;
|
|
+ struct ib_srq *srq;
|
|
+ struct ib_xrcd *xrcd;
|
|
+ struct list_head xrcd_list;
|
|
+ atomic_t usecnt;
|
|
+ struct list_head open_list;
|
|
+ struct ib_qp *real_qp;
|
|
+ struct ib_uobject *uobject;
|
|
+ void (*event_handler)(struct ib_event *, void *);
|
|
+ void *qp_context;
|
|
+ const struct ib_gid_attr *av_sgid_attr;
|
|
+ const struct ib_gid_attr *alt_path_sgid_attr;
|
|
+ u32 qp_num;
|
|
+ u32 max_write_sge;
|
|
+ u32 max_read_sge;
|
|
+ enum ib_qp_type qp_type;
|
|
+ struct ib_rwq_ind_table *rwq_ind_tbl;
|
|
+ struct ib_qp_security *qp_sec;
|
|
+ u8 port;
|
|
+ struct rdma_restrack_entry res;
|
|
+};
|
|
+
|
|
+enum ib_srq_type {
|
|
+ IB_SRQT_BASIC = 0,
|
|
+ IB_SRQT_XRC = 1,
|
|
+ IB_SRQT_TM = 2,
|
|
+};
|
|
+
|
|
+struct ib_srq {
|
|
+ struct ib_device *device;
|
|
+ struct ib_pd *pd;
|
|
+ struct ib_uobject *uobject;
|
|
+ void (*event_handler)(struct ib_event *, void *);
|
|
+ void *srq_context;
|
|
+ enum ib_srq_type srq_type;
|
|
+ atomic_t usecnt;
|
|
+ struct {
|
|
+ struct ib_cq *cq;
|
|
+ union {
|
|
+ struct {
|
|
+ struct ib_xrcd *xrcd;
|
|
+ u32 srq_num;
|
|
+ } xrc;
|
|
+ };
|
|
+ } ext;
|
|
+};
|
|
+
|
|
+enum ib_wq_state {
|
|
+ IB_WQS_RESET = 0,
|
|
+ IB_WQS_RDY = 1,
|
|
+ IB_WQS_ERR = 2,
|
|
+};
|
|
+
|
|
+enum ib_wq_type {
|
|
+ IB_WQT_RQ = 0,
|
|
+};
|
|
+
|
|
+struct ib_wq {
|
|
+ struct ib_device *device;
|
|
+ struct ib_uobject *uobject;
|
|
+ void *wq_context;
|
|
+ void (*event_handler)(struct ib_event *, void *);
|
|
+ struct ib_pd *pd;
|
|
+ struct ib_cq *cq;
|
|
+ u32 wq_num;
|
|
+ enum ib_wq_state state;
|
|
+ enum ib_wq_type wq_type;
|
|
+ atomic_t usecnt;
|
|
+};
|
|
+
|
|
+struct ib_event {
|
|
+ struct ib_device *device;
|
|
+ union {
|
|
+ struct ib_cq *cq;
|
|
+ struct ib_qp *qp;
|
|
+ struct ib_srq *srq;
|
|
+ struct ib_wq *wq;
|
|
+ u8 port_num;
|
|
+ } element;
|
|
+ enum ib_event_type event;
|
|
+};
|
|
+
|
|
+struct ib_global_route {
|
|
+ const struct ib_gid_attr *sgid_attr;
|
|
+ union ib_gid dgid;
|
|
+ u32 flow_label;
|
|
+ u8 sgid_index;
|
|
+ u8 hop_limit;
|
|
+ u8 traffic_class;
|
|
+};
|
|
+
|
|
+struct ib_grh {
|
|
+ __be32 version_tclass_flow;
|
|
+ __be16 paylen;
|
|
+ u8 next_hdr;
|
|
+ u8 hop_limit;
|
|
+ union ib_gid sgid;
|
|
+ union ib_gid dgid;
|
|
+};
|
|
+
|
|
+enum ib_sig_err_type {
|
|
+ IB_SIG_BAD_GUARD = 0,
|
|
+ IB_SIG_BAD_REFTAG = 1,
|
|
+ IB_SIG_BAD_APPTAG = 2,
|
|
+};
|
|
+
|
|
+struct ib_sig_err {
|
|
+ enum ib_sig_err_type err_type;
|
|
+ u32 expected;
|
|
+ u32 actual;
|
|
+ u64 sig_err_offset;
|
|
+ u32 key;
|
|
+};
|
|
+
|
|
+struct ib_mr_status {
|
|
+ u32 fail_status;
|
|
+ struct ib_sig_err sig_err;
|
|
+};
|
|
+
|
|
+enum rdma_ah_attr_type {
|
|
+ RDMA_AH_ATTR_TYPE_UNDEFINED = 0,
|
|
+ RDMA_AH_ATTR_TYPE_IB = 1,
|
|
+ RDMA_AH_ATTR_TYPE_ROCE = 2,
|
|
+ RDMA_AH_ATTR_TYPE_OPA = 3,
|
|
+};
|
|
+
|
|
+struct ib_ah_attr {
|
|
+ u16 dlid;
|
|
+ u8 src_path_bits;
|
|
+};
|
|
+
|
|
+struct roce_ah_attr {
|
|
+ u8 dmac[6];
|
|
+};
|
|
+
|
|
+struct opa_ah_attr {
|
|
+ u32 dlid;
|
|
+ u8 src_path_bits;
|
|
+ bool make_grd;
|
|
+};
|
|
+
|
|
+struct rdma_ah_attr {
|
|
+ struct ib_global_route grh;
|
|
+ u8 sl;
|
|
+ u8 static_rate;
|
|
+ u8 port_num;
|
|
+ u8 ah_flags;
|
|
+ enum rdma_ah_attr_type type;
|
|
+ union {
|
|
+ struct ib_ah_attr ib;
|
|
+ struct roce_ah_attr roce;
|
|
+ struct opa_ah_attr opa;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum ib_wc_status {
|
|
+ IB_WC_SUCCESS = 0,
|
|
+ IB_WC_LOC_LEN_ERR = 1,
|
|
+ IB_WC_LOC_QP_OP_ERR = 2,
|
|
+ IB_WC_LOC_EEC_OP_ERR = 3,
|
|
+ IB_WC_LOC_PROT_ERR = 4,
|
|
+ IB_WC_WR_FLUSH_ERR = 5,
|
|
+ IB_WC_MW_BIND_ERR = 6,
|
|
+ IB_WC_BAD_RESP_ERR = 7,
|
|
+ IB_WC_LOC_ACCESS_ERR = 8,
|
|
+ IB_WC_REM_INV_REQ_ERR = 9,
|
|
+ IB_WC_REM_ACCESS_ERR = 10,
|
|
+ IB_WC_REM_OP_ERR = 11,
|
|
+ IB_WC_RETRY_EXC_ERR = 12,
|
|
+ IB_WC_RNR_RETRY_EXC_ERR = 13,
|
|
+ IB_WC_LOC_RDD_VIOL_ERR = 14,
|
|
+ IB_WC_REM_INV_RD_REQ_ERR = 15,
|
|
+ IB_WC_REM_ABORT_ERR = 16,
|
|
+ IB_WC_INV_EECN_ERR = 17,
|
|
+ IB_WC_INV_EEC_STATE_ERR = 18,
|
|
+ IB_WC_FATAL_ERR = 19,
|
|
+ IB_WC_RESP_TIMEOUT_ERR = 20,
|
|
+ IB_WC_GENERAL_ERR = 21,
|
|
+};
|
|
+
|
|
+enum ib_wc_opcode {
|
|
+ IB_WC_SEND = 0,
|
|
+ IB_WC_RDMA_WRITE = 1,
|
|
+ IB_WC_RDMA_READ = 2,
|
|
+ IB_WC_COMP_SWAP = 3,
|
|
+ IB_WC_FETCH_ADD = 4,
|
|
+ IB_WC_LSO = 5,
|
|
+ IB_WC_LOCAL_INV = 6,
|
|
+ IB_WC_REG_MR = 7,
|
|
+ IB_WC_MASKED_COMP_SWAP = 8,
|
|
+ IB_WC_MASKED_FETCH_ADD = 9,
|
|
+ IB_WC_RECV = 128,
|
|
+ IB_WC_RECV_RDMA_WITH_IMM = 129,
|
|
+};
|
|
+
|
|
+struct ib_cqe {
|
|
+ void (*done)(struct ib_cq *, struct ib_wc *);
|
|
+};
|
|
+
|
|
+struct ib_wc {
|
|
+ union {
|
|
+ u64 wr_id;
|
|
+ struct ib_cqe *wr_cqe;
|
|
+ };
|
|
+ enum ib_wc_status status;
|
|
+ enum ib_wc_opcode opcode;
|
|
+ u32 vendor_err;
|
|
+ u32 byte_len;
|
|
+ struct ib_qp *qp;
|
|
+ union {
|
|
+ __be32 imm_data;
|
|
+ u32 invalidate_rkey;
|
|
+ } ex;
|
|
+ u32 src_qp;
|
|
+ u32 slid;
|
|
+ int wc_flags;
|
|
+ u16 pkey_index;
|
|
+ u8 sl;
|
|
+ u8 dlid_path_bits;
|
|
+ u8 port_num;
|
|
+ u8 smac[6];
|
|
+ u16 vlan_id;
|
|
+ u8 network_hdr_type;
|
|
+};
|
|
+
|
|
+struct ib_srq_attr {
|
|
+ u32 max_wr;
|
|
+ u32 max_sge;
|
|
+ u32 srq_limit;
|
|
+};
|
|
+
|
|
+struct ib_xrcd {
|
|
+ struct ib_device *device;
|
|
+ atomic_t usecnt;
|
|
+ struct inode *inode;
|
|
+ struct mutex tgt_qp_mutex;
|
|
+ struct list_head tgt_qp_list;
|
|
+};
|
|
+
|
|
+struct ib_srq_init_attr {
|
|
+ void (*event_handler)(struct ib_event *, void *);
|
|
+ void *srq_context;
|
|
+ struct ib_srq_attr attr;
|
|
+ enum ib_srq_type srq_type;
|
|
+ struct {
|
|
+ struct ib_cq *cq;
|
|
+ union {
|
|
+ struct {
|
|
+ struct ib_xrcd *xrcd;
|
|
+ } xrc;
|
|
+ struct {
|
|
+ u32 max_num_tags;
|
|
+ } tag_matching;
|
|
+ };
|
|
+ } ext;
|
|
+};
|
|
+
|
|
+struct ib_qp_cap {
|
|
+ u32 max_send_wr;
|
|
+ u32 max_recv_wr;
|
|
+ u32 max_send_sge;
|
|
+ u32 max_recv_sge;
|
|
+ u32 max_inline_data;
|
|
+ u32 max_rdma_ctxs;
|
|
+};
|
|
+
|
|
+enum ib_sig_type {
|
|
+ IB_SIGNAL_ALL_WR = 0,
|
|
+ IB_SIGNAL_REQ_WR = 1,
|
|
+};
|
|
+
|
|
+struct ib_qp_init_attr {
|
|
+ void (*event_handler)(struct ib_event *, void *);
|
|
+ void *qp_context;
|
|
+ struct ib_cq *send_cq;
|
|
+ struct ib_cq *recv_cq;
|
|
+ struct ib_srq *srq;
|
|
+ struct ib_xrcd *xrcd;
|
|
+ struct ib_qp_cap cap;
|
|
+ enum ib_sig_type sq_sig_type;
|
|
+ enum ib_qp_type qp_type;
|
|
+ u32 create_flags;
|
|
+ u8 port_num;
|
|
+ struct ib_rwq_ind_table *rwq_ind_tbl;
|
|
+ u32 source_qpn;
|
|
+};
|
|
+
|
|
+struct ib_rwq_ind_table {
|
|
+ struct ib_device *device;
|
|
+ struct ib_uobject *uobject;
|
|
+ atomic_t usecnt;
|
|
+ u32 ind_tbl_num;
|
|
+ u32 log_ind_tbl_size;
|
|
+ struct ib_wq **ind_tbl;
|
|
+};
|
|
+
|
|
+enum ib_qp_state {
|
|
+ IB_QPS_RESET = 0,
|
|
+ IB_QPS_INIT = 1,
|
|
+ IB_QPS_RTR = 2,
|
|
+ IB_QPS_RTS = 3,
|
|
+ IB_QPS_SQD = 4,
|
|
+ IB_QPS_SQE = 5,
|
|
+ IB_QPS_ERR = 6,
|
|
+};
|
|
+
|
|
+enum ib_mig_state {
|
|
+ IB_MIG_MIGRATED = 0,
|
|
+ IB_MIG_REARM = 1,
|
|
+ IB_MIG_ARMED = 2,
|
|
+};
|
|
+
|
|
+struct ib_qp_attr {
|
|
+ enum ib_qp_state qp_state;
|
|
+ enum ib_qp_state cur_qp_state;
|
|
+ enum ib_mtu path_mtu;
|
|
+ enum ib_mig_state path_mig_state;
|
|
+ u32 qkey;
|
|
+ u32 rq_psn;
|
|
+ u32 sq_psn;
|
|
+ u32 dest_qp_num;
|
|
+ int qp_access_flags;
|
|
+ struct ib_qp_cap cap;
|
|
+ struct rdma_ah_attr ah_attr;
|
|
+ struct rdma_ah_attr alt_ah_attr;
|
|
+ u16 pkey_index;
|
|
+ u16 alt_pkey_index;
|
|
+ u8 en_sqd_async_notify;
|
|
+ u8 sq_draining;
|
|
+ u8 max_rd_atomic;
|
|
+ u8 max_dest_rd_atomic;
|
|
+ u8 min_rnr_timer;
|
|
+ u8 port_num;
|
|
+ u8 timeout;
|
|
+ u8 retry_cnt;
|
|
+ u8 rnr_retry;
|
|
+ u8 alt_port_num;
|
|
+ u8 alt_timeout;
|
|
+ u32 rate_limit;
|
|
+};
|
|
+
|
|
+enum ib_wr_opcode {
|
|
+ IB_WR_RDMA_WRITE = 0,
|
|
+ IB_WR_RDMA_WRITE_WITH_IMM = 1,
|
|
+ IB_WR_SEND = 2,
|
|
+ IB_WR_SEND_WITH_IMM = 3,
|
|
+ IB_WR_RDMA_READ = 4,
|
|
+ IB_WR_ATOMIC_CMP_AND_SWP = 5,
|
|
+ IB_WR_ATOMIC_FETCH_AND_ADD = 6,
|
|
+ IB_WR_LSO = 10,
|
|
+ IB_WR_SEND_WITH_INV = 9,
|
|
+ IB_WR_RDMA_READ_WITH_INV = 11,
|
|
+ IB_WR_LOCAL_INV = 7,
|
|
+ IB_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,
|
|
+ IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,
|
|
+ IB_WR_REG_MR = 32,
|
|
+ IB_WR_REG_SIG_MR = 33,
|
|
+ IB_WR_RESERVED1 = 240,
|
|
+ IB_WR_RESERVED2 = 241,
|
|
+ IB_WR_RESERVED3 = 242,
|
|
+ IB_WR_RESERVED4 = 243,
|
|
+ IB_WR_RESERVED5 = 244,
|
|
+ IB_WR_RESERVED6 = 245,
|
|
+ IB_WR_RESERVED7 = 246,
|
|
+ IB_WR_RESERVED8 = 247,
|
|
+ IB_WR_RESERVED9 = 248,
|
|
+ IB_WR_RESERVED10 = 249,
|
|
+};
|
|
+
|
|
+struct ib_sge {
|
|
+ u64 addr;
|
|
+ u32 length;
|
|
+ u32 lkey;
|
|
+};
|
|
+
|
|
+struct ib_send_wr {
|
|
+ struct ib_send_wr *next;
|
|
+ union {
|
|
+ u64 wr_id;
|
|
+ struct ib_cqe *wr_cqe;
|
|
+ };
|
|
+ struct ib_sge *sg_list;
|
|
+ int num_sge;
|
|
+ enum ib_wr_opcode opcode;
|
|
+ int send_flags;
|
|
+ union {
|
|
+ __be32 imm_data;
|
|
+ u32 invalidate_rkey;
|
|
+ } ex;
|
|
+};
|
|
+
|
|
+struct ib_ah {
|
|
+ struct ib_device *device;
|
|
+ struct ib_pd *pd;
|
|
+ struct ib_uobject *uobject;
|
|
+ const struct ib_gid_attr *sgid_attr;
|
|
+ enum rdma_ah_attr_type type;
|
|
+};
|
|
+
|
|
+struct ib_mr {
|
|
+ struct ib_device *device;
|
|
+ struct ib_pd *pd;
|
|
+ u32 lkey;
|
|
+ u32 rkey;
|
|
+ u64 iova;
|
|
+ u64 length;
|
|
+ unsigned int page_size;
|
|
+ bool need_inval;
|
|
+ union {
|
|
+ struct ib_uobject *uobject;
|
|
+ struct list_head qp_entry;
|
|
+ };
|
|
+ struct ib_dm *dm;
|
|
+ struct rdma_restrack_entry res;
|
|
+};
|
|
+
|
|
+struct ib_recv_wr {
|
|
+ struct ib_recv_wr *next;
|
|
+ union {
|
|
+ u64 wr_id;
|
|
+ struct ib_cqe *wr_cqe;
|
|
+ };
|
|
+ struct ib_sge *sg_list;
|
|
+ int num_sge;
|
|
+};
|
|
+
|
|
+struct ib_fmr_attr {
|
|
+ int max_pages;
|
|
+ int max_maps;
|
|
+ u8 page_shift;
|
|
+};
|
|
+
|
|
+struct ib_rdmacg_object {
|
|
+ struct rdma_cgroup *cg;
|
|
+};
|
|
+
|
|
+struct ib_uverbs_file;
|
|
+
|
|
+struct ib_umem;
|
|
+
|
|
+struct ib_ucontext {
|
|
+ struct ib_device *device;
|
|
+ struct ib_uverbs_file *ufile;
|
|
+ int closing;
|
|
+ bool cleanup_retryable;
|
|
+ struct pid *tgid;
|
|
+ struct rb_root_cached umem_tree;
|
|
+ struct rw_semaphore umem_rwsem;
|
|
+ void (*invalidate_range)(struct ib_umem *, long unsigned int, long unsigned int);
|
|
+ struct mmu_notifier mn;
|
|
+ atomic_t notifier_count;
|
|
+ struct list_head no_private_counters;
|
|
+ int odp_mrs_count;
|
|
+ struct ib_rdmacg_object cg_obj;
|
|
+};
|
|
+
|
|
+struct uverbs_api_object;
|
|
+
|
|
+struct ib_uobject {
|
|
+ u64 user_handle;
|
|
+ struct ib_uverbs_file *ufile;
|
|
+ struct ib_ucontext *context;
|
|
+ void *object;
|
|
+ struct list_head list;
|
|
+ struct ib_rdmacg_object cg_obj;
|
|
+ int id;
|
|
+ struct kref ref;
|
|
+ atomic_t usecnt;
|
|
+ struct callback_head rcu;
|
|
+ const struct uverbs_api_object *uapi_object;
|
|
+};
|
|
+
|
|
+struct ib_udata {
|
|
+ const void *inbuf;
|
|
+ void *outbuf;
|
|
+ size_t inlen;
|
|
+ size_t outlen;
|
|
+};
|
|
+
|
|
+struct ib_pd {
|
|
+ u32 local_dma_lkey;
|
|
+ u32 flags;
|
|
+ struct ib_device *device;
|
|
+ struct ib_uobject *uobject;
|
|
+ atomic_t usecnt;
|
|
+ u32 unsafe_global_rkey;
|
|
+ struct ib_mr *__internal_mr;
|
|
+ struct rdma_restrack_entry res;
|
|
+};
|
|
+
|
|
+struct ib_wq_init_attr {
|
|
+ void *wq_context;
|
|
+ enum ib_wq_type wq_type;
|
|
+ u32 max_wr;
|
|
+ u32 max_sge;
|
|
+ struct ib_cq *cq;
|
|
+ void (*event_handler)(struct ib_event *, void *);
|
|
+ u32 create_flags;
|
|
+};
|
|
+
|
|
+struct ib_wq_attr {
|
|
+ enum ib_wq_state wq_state;
|
|
+ enum ib_wq_state curr_wq_state;
|
|
+ u32 flags;
|
|
+ u32 flags_mask;
|
|
+};
|
|
+
|
|
+struct ib_rwq_ind_table_init_attr {
|
|
+ u32 log_ind_tbl_size;
|
|
+ struct ib_wq **ind_tbl;
|
|
+};
|
|
+
|
|
+enum port_pkey_state {
|
|
+ IB_PORT_PKEY_NOT_VALID = 0,
|
|
+ IB_PORT_PKEY_VALID = 1,
|
|
+ IB_PORT_PKEY_LISTED = 2,
|
|
+};
|
|
+
|
|
+struct ib_port_pkey {
|
|
+ enum port_pkey_state state;
|
|
+ u16 pkey_index;
|
|
+ u8 port_num;
|
|
+ struct list_head qp_list;
|
|
+ struct list_head to_error_list;
|
|
+ struct ib_qp_security *sec;
|
|
+};
|
|
+
|
|
+struct ib_ports_pkeys;
|
|
+
|
|
+struct ib_qp_security {
|
|
+ struct ib_qp *qp;
|
|
+ struct ib_device *dev;
|
|
+ struct mutex mutex;
|
|
+ struct ib_ports_pkeys *ports_pkeys;
|
|
+ struct list_head shared_qp_list;
|
|
+ void *security;
|
|
+ bool destroying;
|
|
+ atomic_t error_list_count;
|
|
+ struct completion error_complete;
|
|
+ int error_comps_pending;
|
|
+};
|
|
+
|
|
+struct ib_ports_pkeys {
|
|
+ struct ib_port_pkey main;
|
|
+ struct ib_port_pkey alt;
|
|
+};
|
|
+
|
|
+struct ib_dm {
|
|
+ struct ib_device *device;
|
|
+ u32 length;
|
|
+ u32 flags;
|
|
+ struct ib_uobject *uobject;
|
|
+ atomic_t usecnt;
|
|
+};
|
|
+
|
|
+struct ib_mw {
|
|
+ struct ib_device *device;
|
|
+ struct ib_pd *pd;
|
|
+ struct ib_uobject *uobject;
|
|
+ u32 rkey;
|
|
+ enum ib_mw_type type;
|
|
+};
|
|
+
|
|
+struct ib_fmr {
|
|
+ struct ib_device *device;
|
|
+ struct ib_pd *pd;
|
|
+ struct list_head list;
|
|
+ u32 lkey;
|
|
+ u32 rkey;
|
|
+};
|
|
+
|
|
+enum ib_flow_attr_type {
|
|
+ IB_FLOW_ATTR_NORMAL = 0,
|
|
+ IB_FLOW_ATTR_ALL_DEFAULT = 1,
|
|
+ IB_FLOW_ATTR_MC_DEFAULT = 2,
|
|
+ IB_FLOW_ATTR_SNIFFER = 3,
|
|
+};
|
|
+
|
|
+enum ib_flow_spec_type {
|
|
+ IB_FLOW_SPEC_ETH = 32,
|
|
+ IB_FLOW_SPEC_IB = 34,
|
|
+ IB_FLOW_SPEC_IPV4 = 48,
|
|
+ IB_FLOW_SPEC_IPV6 = 49,
|
|
+ IB_FLOW_SPEC_ESP = 52,
|
|
+ IB_FLOW_SPEC_TCP = 64,
|
|
+ IB_FLOW_SPEC_UDP = 65,
|
|
+ IB_FLOW_SPEC_VXLAN_TUNNEL = 80,
|
|
+ IB_FLOW_SPEC_GRE = 81,
|
|
+ IB_FLOW_SPEC_MPLS = 96,
|
|
+ IB_FLOW_SPEC_INNER = 256,
|
|
+ IB_FLOW_SPEC_ACTION_TAG = 4096,
|
|
+ IB_FLOW_SPEC_ACTION_DROP = 4097,
|
|
+ IB_FLOW_SPEC_ACTION_HANDLE = 4098,
|
|
+ IB_FLOW_SPEC_ACTION_COUNT = 4099,
|
|
+};
|
|
+
|
|
+struct ib_flow_eth_filter {
|
|
+ u8 dst_mac[6];
|
|
+ u8 src_mac[6];
|
|
+ __be16 ether_type;
|
|
+ __be16 vlan_tag;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_eth {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_eth_filter val;
|
|
+ struct ib_flow_eth_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_ib_filter {
|
|
+ __be16 dlid;
|
|
+ __u8 sl;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_ib {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_ib_filter val;
|
|
+ struct ib_flow_ib_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_ipv4_filter {
|
|
+ __be32 src_ip;
|
|
+ __be32 dst_ip;
|
|
+ u8 proto;
|
|
+ u8 tos;
|
|
+ u8 ttl;
|
|
+ u8 flags;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_ipv4 {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_ipv4_filter val;
|
|
+ struct ib_flow_ipv4_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_ipv6_filter {
|
|
+ u8 src_ip[16];
|
|
+ u8 dst_ip[16];
|
|
+ __be32 flow_label;
|
|
+ u8 next_hdr;
|
|
+ u8 traffic_class;
|
|
+ u8 hop_limit;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_ipv6 {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_ipv6_filter val;
|
|
+ struct ib_flow_ipv6_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_tcp_udp_filter {
|
|
+ __be16 dst_port;
|
|
+ __be16 src_port;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_tcp_udp {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_tcp_udp_filter val;
|
|
+ struct ib_flow_tcp_udp_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_tunnel_filter {
|
|
+ __be32 tunnel_id;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_tunnel {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_tunnel_filter val;
|
|
+ struct ib_flow_tunnel_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_esp_filter {
|
|
+ __be32 spi;
|
|
+ __be32 seq;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_esp {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_esp_filter val;
|
|
+ struct ib_flow_esp_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_gre_filter {
|
|
+ __be16 c_ks_res0_ver;
|
|
+ __be16 protocol;
|
|
+ __be32 key;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_gre {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_gre_filter val;
|
|
+ struct ib_flow_gre_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_mpls_filter {
|
|
+ __be32 tag;
|
|
+ u8 real_sz[0];
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_mpls {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ struct ib_flow_mpls_filter val;
|
|
+ struct ib_flow_mpls_filter mask;
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_action_tag {
|
|
+ enum ib_flow_spec_type type;
|
|
+ u16 size;
|
|
+ u32 tag_id;
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_action_drop {
|
|
+ enum ib_flow_spec_type type;
|
|
+ u16 size;
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_action_handle {
|
|
+ enum ib_flow_spec_type type;
|
|
+ u16 size;
|
|
+ struct ib_flow_action *act;
|
|
+};
|
|
+
|
|
+enum ib_flow_action_type {
|
|
+ IB_FLOW_ACTION_UNSPECIFIED = 0,
|
|
+ IB_FLOW_ACTION_ESP = 1,
|
|
+};
|
|
+
|
|
+struct ib_flow_action {
|
|
+ struct ib_device *device;
|
|
+ struct ib_uobject *uobject;
|
|
+ enum ib_flow_action_type type;
|
|
+ atomic_t usecnt;
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_action_count {
|
|
+ enum ib_flow_spec_type type;
|
|
+ u16 size;
|
|
+ struct ib_counters *counters;
|
|
+};
|
|
+
|
|
+struct ib_counters {
|
|
+ struct ib_device *device;
|
|
+ struct ib_uobject *uobject;
|
|
+ atomic_t usecnt;
|
|
+};
|
|
+
|
|
+union ib_flow_spec {
|
|
+ struct {
|
|
+ u32 type;
|
|
+ u16 size;
|
|
+ };
|
|
+ struct ib_flow_spec_eth eth;
|
|
+ struct ib_flow_spec_ib ib;
|
|
+ struct ib_flow_spec_ipv4 ipv4;
|
|
+ struct ib_flow_spec_tcp_udp tcp_udp;
|
|
+ struct ib_flow_spec_ipv6 ipv6;
|
|
+ struct ib_flow_spec_tunnel tunnel;
|
|
+ struct ib_flow_spec_esp esp;
|
|
+ struct ib_flow_spec_gre gre;
|
|
+ struct ib_flow_spec_mpls mpls;
|
|
+ struct ib_flow_spec_action_tag flow_tag;
|
|
+ struct ib_flow_spec_action_drop drop;
|
|
+ struct ib_flow_spec_action_handle action;
|
|
+ struct ib_flow_spec_action_count flow_count;
|
|
+};
|
|
+
|
|
+struct ib_flow_attr {
|
|
+ enum ib_flow_attr_type type;
|
|
+ u16 size;
|
|
+ u16 priority;
|
|
+ u32 flags;
|
|
+ u8 num_of_specs;
|
|
+ u8 port;
|
|
+ union ib_flow_spec flows[0];
|
|
+};
|
|
+
|
|
+struct ib_flow {
|
|
+ struct ib_qp *qp;
|
|
+ struct ib_device *device;
|
|
+ struct ib_uobject *uobject;
|
|
+};
|
|
+
|
|
+struct ib_flow_action_attrs_esp_keymats {
|
|
+ enum ib_uverbs_flow_action_esp_keymat protocol;
|
|
+ union {
|
|
+ struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
|
|
+ } keymat;
|
|
+};
|
|
+
|
|
+struct ib_flow_action_attrs_esp_replays {
|
|
+ enum ib_uverbs_flow_action_esp_replay protocol;
|
|
+ union {
|
|
+ struct ib_uverbs_flow_action_esp_replay_bmp bmp;
|
|
+ } replay;
|
|
+};
|
|
+
|
|
+struct ib_flow_spec_list {
|
|
+ struct ib_flow_spec_list *next;
|
|
+ union ib_flow_spec spec;
|
|
+};
|
|
+
|
|
+struct ib_flow_action_attrs_esp {
|
|
+ struct ib_flow_action_attrs_esp_keymats *keymat;
|
|
+ struct ib_flow_action_attrs_esp_replays *replay;
|
|
+ struct ib_flow_spec_list *encap;
|
|
+ u32 esn;
|
|
+ u32 spi;
|
|
+ u32 seq;
|
|
+ u32 tfc_pad;
|
|
+ u64 flags;
|
|
+ u64 hard_limit_pkts;
|
|
+};
|
|
+
|
|
+struct ib_pkey_cache;
|
|
+
|
|
+struct ib_gid_table;
|
|
+
|
|
+struct ib_port_cache {
|
|
+ u64 subnet_prefix;
|
|
+ struct ib_pkey_cache *pkey;
|
|
+ struct ib_gid_table *gid;
|
|
+ u8 lmc;
|
|
+ enum ib_port_state port_state;
|
|
+};
|
|
+
|
|
+struct ib_port_immutable {
|
|
+ int pkey_tbl_len;
|
|
+ int gid_tbl_len;
|
|
+ u32 core_cap_flags;
|
|
+ u32 max_mad_size;
|
|
+};
|
|
+
|
|
+struct ib_port_pkey_list {
|
|
+ spinlock_t list_lock;
|
|
+ struct list_head pkey_list;
|
|
+};
|
|
+
|
|
+struct ib_counters_read_attr {
|
|
+ u64 *counters_buff;
|
|
+ u32 ncounters;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+enum wbt_flags {
|
|
+ WBT_TRACKED = 1,
|
|
+ WBT_READ = 2,
|
|
+ WBT_KSWAPD = 4,
|
|
+ WBT_DISCARD = 8,
|
|
+ WBT_NR_BITS = 4,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ WBT_STATE_ON_DEFAULT = 1,
|
|
+ WBT_STATE_ON_MANUAL = 2,
|
|
+ WBT_STATE_OFF_DEFAULT = 3,
|
|
+};
|
|
+
|
|
+struct rq_wb {
|
|
+ unsigned int wb_background;
|
|
+ unsigned int wb_normal;
|
|
+ short int enable_state;
|
|
+ unsigned int unknown_cnt;
|
|
+ u64 win_nsec;
|
|
+ u64 cur_win_nsec;
|
|
+ struct blk_stat_callback *cb;
|
|
+ u64 sync_issue;
|
|
+ void *sync_cookie;
|
|
+ unsigned int wc;
|
|
+ long unsigned int last_issue;
|
|
+ long unsigned int last_comp;
|
|
+ long unsigned int min_lat_nsec;
|
|
+ struct rq_qos rqos;
|
|
+ struct rq_wait rq_wait[3];
|
|
+ struct rq_depth rq_depth;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_wbt_stat {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ s64 rmean;
|
|
+ u64 rmin;
|
|
+ u64 rmax;
|
|
+ s64 rnr_samples;
|
|
+ s64 rtime;
|
|
+ s64 wmean;
|
|
+ u64 wmin;
|
|
+ u64 wmax;
|
|
+ s64 wnr_samples;
|
|
+ s64 wtime;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_wbt_lat {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long unsigned int lat;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_wbt_step {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ const char *msg;
|
|
+ int step;
|
|
+ long unsigned int window;
|
|
+ unsigned int bg;
|
|
+ unsigned int normal;
|
|
+ unsigned int max;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_wbt_timer {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ unsigned int status;
|
|
+ int step;
|
|
+ unsigned int inflight;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_wbt_stat {};
|
|
+
|
|
+struct trace_event_data_offsets_wbt_lat {};
|
|
+
|
|
+struct trace_event_data_offsets_wbt_step {};
|
|
+
|
|
+struct trace_event_data_offsets_wbt_timer {};
|
|
+
|
|
+enum {
|
|
+ RWB_DEF_DEPTH = 16,
|
|
+ RWB_WINDOW_NSEC = 100000000,
|
|
+ RWB_MIN_WRITE_SAMPLES = 3,
|
|
+ RWB_UNKNOWN_BUMP = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LAT_OK = 1,
|
|
+ LAT_UNKNOWN = 2,
|
|
+ LAT_UNKNOWN_WRITES = 3,
|
|
+ LAT_EXCEEDED = 4,
|
|
+};
|
|
+
|
|
+struct wbt_wait_data {
|
|
+ struct wait_queue_entry wq;
|
|
+ struct task_struct *task;
|
|
+ struct rq_wb *rwb;
|
|
+ struct rq_wait *rqw;
|
|
+ long unsigned int rw;
|
|
+ bool got_token;
|
|
+};
|
|
+
|
|
+struct show_busy_params {
|
|
+ struct seq_file *m;
|
|
+ struct blk_mq_hw_ctx *hctx;
|
|
+};
|
|
+
|
|
+struct siprand_state {
|
|
+ long unsigned int v0;
|
|
+ long unsigned int v1;
|
|
+ long unsigned int v2;
|
|
+ long unsigned int v3;
|
|
+};
|
|
+
|
|
+typedef __kernel_long_t __kernel_ptrdiff_t;
|
|
+
|
|
+typedef __kernel_ptrdiff_t ptrdiff_t;
|
|
+
|
|
+enum {
|
|
+ REG_OP_ISFREE = 0,
|
|
+ REG_OP_ALLOC = 1,
|
|
+ REG_OP_RELEASE = 2,
|
|
+};
|
|
+
|
|
+typedef struct scatterlist *sg_alloc_fn(unsigned int, gfp_t);
|
|
+
|
|
+typedef void sg_free_fn(struct scatterlist *, unsigned int);
|
|
+
|
|
+struct sg_page_iter {
|
|
+ struct scatterlist *sg;
|
|
+ unsigned int sg_pgoffset;
|
|
+ unsigned int __nents;
|
|
+ int __pg_advance;
|
|
+};
|
|
+
|
|
+struct sg_mapping_iter {
|
|
+ struct page *page;
|
|
+ void *addr;
|
|
+ size_t length;
|
|
+ size_t consumed;
|
|
+ struct sg_page_iter piter;
|
|
+ unsigned int __offset;
|
|
+ unsigned int __remaining;
|
|
+ unsigned int __flags;
|
|
+};
|
|
+
|
|
+struct flex_array_part {
|
|
+ char elements[4096];
|
|
+};
|
|
+
|
|
+struct rhashtable_walker {
|
|
+ struct list_head list;
|
|
+ struct bucket_table *tbl;
|
|
+};
|
|
+
|
|
+struct rhashtable_iter {
|
|
+ struct rhashtable *ht;
|
|
+ struct rhash_head *p;
|
|
+ struct rhlist_head *list;
|
|
+ struct rhashtable_walker walker;
|
|
+ unsigned int slot;
|
|
+ unsigned int skip;
|
|
+ bool end_of_table;
|
|
+};
|
|
+
|
|
+union nested_table {
|
|
+ union nested_table *table;
|
|
+ struct rhash_head *bucket;
|
|
+};
|
|
+
|
|
+struct reciprocal_value_adv {
|
|
+ u32 m;
|
|
+ u8 sh;
|
|
+ u8 exp;
|
|
+ bool is_wide_m;
|
|
+};
|
|
+
|
|
+struct once_work {
|
|
+ struct work_struct work;
|
|
+ struct static_key_true *key;
|
|
+ struct module *module;
|
|
+};
|
|
+
|
|
+struct test_fail {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+};
|
|
+
|
|
+struct test_s8 {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ s8 expected_res;
|
|
+};
|
|
+
|
|
+struct test_u8 {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ u8 expected_res;
|
|
+};
|
|
+
|
|
+struct test_s16 {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ s16 expected_res;
|
|
+};
|
|
+
|
|
+struct test_u16 {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ u16 expected_res;
|
|
+};
|
|
+
|
|
+struct test_s32 {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ s32 expected_res;
|
|
+};
|
|
+
|
|
+struct test_u32 {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ u32 expected_res;
|
|
+};
|
|
+
|
|
+struct test_s64 {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ s64 expected_res;
|
|
+};
|
|
+
|
|
+struct test_u64 {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ u64 expected_res;
|
|
+};
|
|
+
|
|
+struct test_ll {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ long long int expected_res;
|
|
+};
|
|
+
|
|
+struct test_ull {
|
|
+ const char *str;
|
|
+ unsigned int base;
|
|
+ long long unsigned int expected_res;
|
|
+};
|
|
+
|
|
+enum devm_ioremap_type {
|
|
+ DEVM_IOREMAP = 0,
|
|
+ DEVM_IOREMAP_NC = 1,
|
|
+ DEVM_IOREMAP_WC = 2,
|
|
+};
|
|
+
|
|
+struct pcim_iomap_devres {
|
|
+ void *table[6];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LOGIC_PIO_INDIRECT = 0,
|
|
+ LOGIC_PIO_CPU_MMIO = 1,
|
|
+};
|
|
+
|
|
+struct logic_pio_host_ops;
|
|
+
|
|
+struct logic_pio_hwaddr {
|
|
+ struct list_head list;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ resource_size_t hw_start;
|
|
+ resource_size_t io_start;
|
|
+ resource_size_t size;
|
|
+ long unsigned int flags;
|
|
+ void *hostdata;
|
|
+ const struct logic_pio_host_ops *ops;
|
|
+};
|
|
+
|
|
+struct logic_pio_host_ops {
|
|
+ u32 (*in)(void *, long unsigned int, size_t);
|
|
+ void (*out)(void *, long unsigned int, u32, size_t);
|
|
+ u32 (*ins)(void *, long unsigned int, void *, size_t, unsigned int);
|
|
+ void (*outs)(void *, long unsigned int, const void *, size_t, unsigned int);
|
|
+};
|
|
+
|
|
+struct btree_head {
|
|
+ long unsigned int *node;
|
|
+ mempool_t *mempool;
|
|
+ int height;
|
|
+};
|
|
+
|
|
+struct btree_geo {
|
|
+ int keylen;
|
|
+ int no_pairs;
|
|
+ int no_longs;
|
|
+};
|
|
+
|
|
+typedef void (*visitor128_t)(void *, long unsigned int, u64, u64, size_t);
|
|
+
|
|
+typedef void (*visitorl_t)(void *, long unsigned int, long unsigned int, size_t);
|
|
+
|
|
+typedef void (*visitor32_t)(void *, long unsigned int, u32, size_t);
|
|
+
|
|
+typedef void (*visitor64_t)(void *, long unsigned int, u64, size_t);
|
|
+
|
|
+struct interval_tree_node {
|
|
+ struct rb_node rb;
|
|
+ long unsigned int start;
|
|
+ long unsigned int last;
|
|
+ long unsigned int __subtree_last;
|
|
+};
|
|
+
|
|
+enum assoc_array_walk_status {
|
|
+ assoc_array_walk_tree_empty = 0,
|
|
+ assoc_array_walk_found_terminal_node = 1,
|
|
+ assoc_array_walk_found_wrong_shortcut = 2,
|
|
+};
|
|
+
|
|
+struct assoc_array_walk_result {
|
|
+ struct {
|
|
+ struct assoc_array_node *node;
|
|
+ int level;
|
|
+ int slot;
|
|
+ } terminal_node;
|
|
+ struct {
|
|
+ struct assoc_array_shortcut *shortcut;
|
|
+ int level;
|
|
+ int sc_level;
|
|
+ long unsigned int sc_segments;
|
|
+ long unsigned int dissimilarity;
|
|
+ } wrong_shortcut;
|
|
+};
|
|
+
|
|
+struct assoc_array_delete_collapse_context {
|
|
+ struct assoc_array_node *node;
|
|
+ const void *skip_leaf;
|
|
+ int slot;
|
|
+};
|
|
+
|
|
+struct xxh32_state {
|
|
+ uint32_t total_len_32;
|
|
+ uint32_t large_len;
|
|
+ uint32_t v1;
|
|
+ uint32_t v2;
|
|
+ uint32_t v3;
|
|
+ uint32_t v4;
|
|
+ uint32_t mem32[4];
|
|
+ uint32_t memsize;
|
|
+};
|
|
+
|
|
+struct xxh64_state {
|
|
+ uint64_t total_len;
|
|
+ uint64_t v1;
|
|
+ uint64_t v2;
|
|
+ uint64_t v3;
|
|
+ uint64_t v4;
|
|
+ uint64_t mem64[4];
|
|
+ uint32_t memsize;
|
|
+};
|
|
+
|
|
+struct gen_pool_chunk {
|
|
+ struct list_head next_chunk;
|
|
+ atomic_long_t avail;
|
|
+ phys_addr_t phys_addr;
|
|
+ long unsigned int start_addr;
|
|
+ long unsigned int end_addr;
|
|
+ long unsigned int bits[0];
|
|
+};
|
|
+
|
|
+struct genpool_data_align {
|
|
+ int align;
|
|
+};
|
|
+
|
|
+struct genpool_data_fixed {
|
|
+ long unsigned int offset;
|
|
+};
|
|
+
|
|
+typedef struct z_stream_s z_stream;
|
|
+
|
|
+typedef z_stream *z_streamp;
|
|
+
|
|
+typedef struct {
|
|
+ unsigned char op;
|
|
+ unsigned char bits;
|
|
+ short unsigned int val;
|
|
+} code;
|
|
+
|
|
+typedef enum {
|
|
+ HEAD = 0,
|
|
+ FLAGS = 1,
|
|
+ TIME = 2,
|
|
+ OS = 3,
|
|
+ EXLEN = 4,
|
|
+ EXTRA = 5,
|
|
+ NAME = 6,
|
|
+ COMMENT = 7,
|
|
+ HCRC = 8,
|
|
+ DICTID = 9,
|
|
+ DICT = 10,
|
|
+ TYPE = 11,
|
|
+ TYPEDO = 12,
|
|
+ STORED = 13,
|
|
+ COPY = 14,
|
|
+ TABLE = 15,
|
|
+ LENLENS = 16,
|
|
+ CODELENS = 17,
|
|
+ LEN = 18,
|
|
+ LENEXT = 19,
|
|
+ DIST = 20,
|
|
+ DISTEXT = 21,
|
|
+ MATCH = 22,
|
|
+ LIT = 23,
|
|
+ CHECK = 24,
|
|
+ LENGTH = 25,
|
|
+ DONE = 26,
|
|
+ BAD___2 = 27,
|
|
+ MEM = 28,
|
|
+ SYNC = 29,
|
|
+} inflate_mode;
|
|
+
|
|
+struct inflate_state {
|
|
+ inflate_mode mode;
|
|
+ int last;
|
|
+ int wrap;
|
|
+ int havedict;
|
|
+ int flags;
|
|
+ unsigned int dmax;
|
|
+ long unsigned int check;
|
|
+ long unsigned int total;
|
|
+ unsigned int wbits;
|
|
+ unsigned int wsize;
|
|
+ unsigned int whave;
|
|
+ unsigned int write;
|
|
+ unsigned char *window;
|
|
+ long unsigned int hold;
|
|
+ unsigned int bits;
|
|
+ unsigned int length;
|
|
+ unsigned int offset;
|
|
+ unsigned int extra;
|
|
+ const code *lencode;
|
|
+ const code *distcode;
|
|
+ unsigned int lenbits;
|
|
+ unsigned int distbits;
|
|
+ unsigned int ncode;
|
|
+ unsigned int nlen;
|
|
+ unsigned int ndist;
|
|
+ unsigned int have;
|
|
+ code *next;
|
|
+ short unsigned int lens[320];
|
|
+ short unsigned int work[288];
|
|
+ code codes[2048];
|
|
+};
|
|
+
|
|
+union uu {
|
|
+ short unsigned int us;
|
|
+ unsigned char b[2];
|
|
+};
|
|
+
|
|
+typedef unsigned int uInt;
|
|
+
|
|
+struct inflate_workspace {
|
|
+ struct inflate_state inflate_state;
|
|
+ unsigned char working_window[32768];
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ CODES = 0,
|
|
+ LENS = 1,
|
|
+ DISTS = 2,
|
|
+} codetype;
|
|
+
|
|
+typedef unsigned char uch;
|
|
+
|
|
+typedef short unsigned int ush;
|
|
+
|
|
+typedef long unsigned int ulg;
|
|
+
|
|
+struct ct_data_s {
|
|
+ union {
|
|
+ ush freq;
|
|
+ ush code;
|
|
+ } fc;
|
|
+ union {
|
|
+ ush dad;
|
|
+ ush len;
|
|
+ } dl;
|
|
+};
|
|
+
|
|
+typedef struct ct_data_s ct_data;
|
|
+
|
|
+struct static_tree_desc_s {
|
|
+ const ct_data *static_tree;
|
|
+ const int *extra_bits;
|
|
+ int extra_base;
|
|
+ int elems;
|
|
+ int max_length;
|
|
+};
|
|
+
|
|
+typedef struct static_tree_desc_s static_tree_desc;
|
|
+
|
|
+struct tree_desc_s {
|
|
+ ct_data *dyn_tree;
|
|
+ int max_code;
|
|
+ static_tree_desc *stat_desc;
|
|
+};
|
|
+
|
|
+typedef ush Pos;
|
|
+
|
|
+typedef unsigned int IPos;
|
|
+
|
|
+struct deflate_state {
|
|
+ z_streamp strm;
|
|
+ int status;
|
|
+ Byte *pending_buf;
|
|
+ ulg pending_buf_size;
|
|
+ Byte *pending_out;
|
|
+ int pending;
|
|
+ int noheader;
|
|
+ Byte data_type;
|
|
+ Byte method;
|
|
+ int last_flush;
|
|
+ uInt w_size;
|
|
+ uInt w_bits;
|
|
+ uInt w_mask;
|
|
+ Byte *window;
|
|
+ ulg window_size;
|
|
+ Pos *prev;
|
|
+ Pos *head;
|
|
+ uInt ins_h;
|
|
+ uInt hash_size;
|
|
+ uInt hash_bits;
|
|
+ uInt hash_mask;
|
|
+ uInt hash_shift;
|
|
+ long int block_start;
|
|
+ uInt match_length;
|
|
+ IPos prev_match;
|
|
+ int match_available;
|
|
+ uInt strstart;
|
|
+ uInt match_start;
|
|
+ uInt lookahead;
|
|
+ uInt prev_length;
|
|
+ uInt max_chain_length;
|
|
+ uInt max_lazy_match;
|
|
+ int level;
|
|
+ int strategy;
|
|
+ uInt good_match;
|
|
+ int nice_match;
|
|
+ struct ct_data_s dyn_ltree[573];
|
|
+ struct ct_data_s dyn_dtree[61];
|
|
+ struct ct_data_s bl_tree[39];
|
|
+ struct tree_desc_s l_desc;
|
|
+ struct tree_desc_s d_desc;
|
|
+ struct tree_desc_s bl_desc;
|
|
+ ush bl_count[16];
|
|
+ int heap[573];
|
|
+ int heap_len;
|
|
+ int heap_max;
|
|
+ uch depth[573];
|
|
+ uch *l_buf;
|
|
+ uInt lit_bufsize;
|
|
+ uInt last_lit;
|
|
+ ush *d_buf;
|
|
+ ulg opt_len;
|
|
+ ulg static_len;
|
|
+ ulg compressed_len;
|
|
+ uInt matches;
|
|
+ int last_eob_len;
|
|
+ ush bi_buf;
|
|
+ int bi_valid;
|
|
+};
|
|
+
|
|
+typedef struct deflate_state deflate_state;
|
|
+
|
|
+struct deflate_workspace {
|
|
+ deflate_state deflate_memory;
|
|
+ Byte *window_memory;
|
|
+ Pos *prev_memory;
|
|
+ Pos *head_memory;
|
|
+ char *overlay_memory;
|
|
+};
|
|
+
|
|
+typedef struct deflate_workspace deflate_workspace;
|
|
+
|
|
+typedef enum {
|
|
+ need_more = 0,
|
|
+ block_done = 1,
|
|
+ finish_started = 2,
|
|
+ finish_done = 3,
|
|
+} block_state;
|
|
+
|
|
+typedef block_state (*compress_func)(deflate_state *, int);
|
|
+
|
|
+struct config_s {
|
|
+ ush good_length;
|
|
+ ush max_lazy;
|
|
+ ush nice_length;
|
|
+ ush max_chain;
|
|
+ compress_func func;
|
|
+};
|
|
+
|
|
+typedef struct config_s config;
|
|
+
|
|
+typedef struct tree_desc_s tree_desc;
|
|
+
|
|
+typedef struct {
|
|
+ const uint8_t *externalDict;
|
|
+ size_t extDictSize;
|
|
+ const uint8_t *prefixEnd;
|
|
+ size_t prefixSize;
|
|
+} LZ4_streamDecode_t_internal;
|
|
+
|
|
+typedef union {
|
|
+ long long unsigned int table[4];
|
|
+ LZ4_streamDecode_t_internal internal_donotuse;
|
|
+} LZ4_streamDecode_t;
|
|
+
|
|
+typedef uint8_t BYTE;
|
|
+
|
|
+typedef uint16_t U16;
|
|
+
|
|
+typedef uint32_t U32;
|
|
+
|
|
+typedef uint64_t U64;
|
|
+
|
|
+enum {
|
|
+ noDict = 0,
|
|
+ withPrefix64k = 1,
|
|
+ usingExtDict = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ endOnOutputSize = 0,
|
|
+ endOnInputSize = 1,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ full = 0,
|
|
+ partial = 1,
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ size_t bitContainer;
|
|
+ int bitPos;
|
|
+ char *startPtr;
|
|
+ char *ptr;
|
|
+ char *endPtr;
|
|
+} BIT_CStream_t;
|
|
+
|
|
+typedef unsigned int FSE_CTable;
|
|
+
|
|
+typedef struct {
|
|
+ ptrdiff_t value;
|
|
+ const void *stateTable;
|
|
+ const void *symbolTT;
|
|
+ unsigned int stateLog;
|
|
+} FSE_CState_t;
|
|
+
|
|
+typedef struct {
|
|
+ int deltaFindState;
|
|
+ U32 deltaNbBits;
|
|
+} FSE_symbolCompressionTransform;
|
|
+
|
|
+typedef int16_t S16;
|
|
+
|
|
+struct HUF_CElt_s {
|
|
+ U16 val;
|
|
+ BYTE nbBits;
|
|
+};
|
|
+
|
|
+typedef struct HUF_CElt_s HUF_CElt;
|
|
+
|
|
+typedef enum {
|
|
+ HUF_repeat_none = 0,
|
|
+ HUF_repeat_check = 1,
|
|
+ HUF_repeat_valid = 2,
|
|
+} HUF_repeat;
|
|
+
|
|
+struct nodeElt_s {
|
|
+ U32 count;
|
|
+ U16 parent;
|
|
+ BYTE byte;
|
|
+ BYTE nbBits;
|
|
+};
|
|
+
|
|
+typedef struct nodeElt_s nodeElt;
|
|
+
|
|
+typedef struct {
|
|
+ U32 base;
|
|
+ U32 curr;
|
|
+} rankPos;
|
|
+
|
|
+typedef enum {
|
|
+ ZSTDcs_created = 0,
|
|
+ ZSTDcs_init = 1,
|
|
+ ZSTDcs_ongoing = 2,
|
|
+ ZSTDcs_ending = 3,
|
|
+} ZSTD_compressionStage_e;
|
|
+
|
|
+typedef void * (*ZSTD_allocFunction)(void *, size_t);
|
|
+
|
|
+typedef void (*ZSTD_freeFunction)(void *, void *);
|
|
+
|
|
+typedef struct {
|
|
+ ZSTD_allocFunction customAlloc;
|
|
+ ZSTD_freeFunction customFree;
|
|
+ void *opaque;
|
|
+} ZSTD_customMem;
|
|
+
|
|
+typedef struct {
|
|
+ U32 price;
|
|
+ U32 off;
|
|
+ U32 mlen;
|
|
+ U32 litlen;
|
|
+ U32 rep[3];
|
|
+} ZSTD_optimal_t;
|
|
+
|
|
+typedef struct {
|
|
+ U32 off;
|
|
+ U32 len;
|
|
+} ZSTD_match_t;
|
|
+
|
|
+struct seqDef_s;
|
|
+
|
|
+typedef struct seqDef_s seqDef;
|
|
+
|
|
+typedef struct {
|
|
+ seqDef *sequencesStart;
|
|
+ seqDef *sequences;
|
|
+ BYTE *litStart;
|
|
+ BYTE *lit;
|
|
+ BYTE *llCode;
|
|
+ BYTE *mlCode;
|
|
+ BYTE *ofCode;
|
|
+ U32 longLengthID;
|
|
+ U32 longLengthPos;
|
|
+ ZSTD_optimal_t *priceTable;
|
|
+ ZSTD_match_t *matchTable;
|
|
+ U32 *matchLengthFreq;
|
|
+ U32 *litLengthFreq;
|
|
+ U32 *litFreq;
|
|
+ U32 *offCodeFreq;
|
|
+ U32 matchLengthSum;
|
|
+ U32 matchSum;
|
|
+ U32 litLengthSum;
|
|
+ U32 litSum;
|
|
+ U32 offCodeSum;
|
|
+ U32 log2matchLengthSum;
|
|
+ U32 log2matchSum;
|
|
+ U32 log2litLengthSum;
|
|
+ U32 log2litSum;
|
|
+ U32 log2offCodeSum;
|
|
+ U32 factor;
|
|
+ U32 staticPrices;
|
|
+ U32 cachedPrice;
|
|
+ U32 cachedLitLength;
|
|
+ const BYTE *cachedLiterals;
|
|
+} seqStore_t;
|
|
+
|
|
+struct HUF_CElt_s___2;
|
|
+
|
|
+typedef struct HUF_CElt_s___2 HUF_CElt___2;
|
|
+
|
|
+struct ZSTD_CCtx_s___2 {
|
|
+ const BYTE *nextSrc;
|
|
+ const BYTE *base;
|
|
+ const BYTE *dictBase;
|
|
+ U32 dictLimit;
|
|
+ U32 lowLimit;
|
|
+ U32 nextToUpdate;
|
|
+ U32 nextToUpdate3;
|
|
+ U32 hashLog3;
|
|
+ U32 loadedDictEnd;
|
|
+ U32 forceWindow;
|
|
+ U32 forceRawDict;
|
|
+ ZSTD_compressionStage_e stage;
|
|
+ U32 rep[3];
|
|
+ U32 repToConfirm[3];
|
|
+ U32 dictID;
|
|
+ ZSTD_parameters params;
|
|
+ void *workSpace;
|
|
+ size_t workSpaceSize;
|
|
+ size_t blockSize;
|
|
+ U64 frameContentSize;
|
|
+ struct xxh64_state xxhState;
|
|
+ ZSTD_customMem customMem;
|
|
+ seqStore_t seqStore;
|
|
+ U32 *hashTable;
|
|
+ U32 *hashTable3;
|
|
+ U32 *chainTable;
|
|
+ HUF_CElt___2 *hufTable;
|
|
+ U32 flagStaticTables;
|
|
+ HUF_repeat flagStaticHufTable;
|
|
+ FSE_CTable offcodeCTable[187];
|
|
+ FSE_CTable matchlengthCTable[363];
|
|
+ FSE_CTable litlengthCTable[329];
|
|
+ unsigned int tmpCounters[1536];
|
|
+};
|
|
+
|
|
+typedef struct ZSTD_CCtx_s___2 ZSTD_CCtx___2;
|
|
+
|
|
+struct ZSTD_CDict_s {
|
|
+ void *dictBuffer;
|
|
+ const void *dictContent;
|
|
+ size_t dictContentSize;
|
|
+ ZSTD_CCtx___2 *refContext;
|
|
+};
|
|
+
|
|
+typedef struct ZSTD_CDict_s ZSTD_CDict;
|
|
+
|
|
+struct ZSTD_inBuffer_s {
|
|
+ const void *src;
|
|
+ size_t size;
|
|
+ size_t pos;
|
|
+};
|
|
+
|
|
+typedef struct ZSTD_inBuffer_s ZSTD_inBuffer;
|
|
+
|
|
+struct ZSTD_outBuffer_s {
|
|
+ void *dst;
|
|
+ size_t size;
|
|
+ size_t pos;
|
|
+};
|
|
+
|
|
+typedef struct ZSTD_outBuffer_s ZSTD_outBuffer;
|
|
+
|
|
+typedef enum {
|
|
+ zcss_init = 0,
|
|
+ zcss_load = 1,
|
|
+ zcss_flush = 2,
|
|
+ zcss_final = 3,
|
|
+} ZSTD_cStreamStage;
|
|
+
|
|
+struct ZSTD_CStream_s {
|
|
+ ZSTD_CCtx___2 *cctx;
|
|
+ ZSTD_CDict *cdictLocal;
|
|
+ const ZSTD_CDict *cdict;
|
|
+ char *inBuff;
|
|
+ size_t inBuffSize;
|
|
+ size_t inToCompress;
|
|
+ size_t inBuffPos;
|
|
+ size_t inBuffTarget;
|
|
+ size_t blockSize;
|
|
+ char *outBuff;
|
|
+ size_t outBuffSize;
|
|
+ size_t outBuffContentSize;
|
|
+ size_t outBuffFlushedSize;
|
|
+ ZSTD_cStreamStage stage;
|
|
+ U32 checksum;
|
|
+ U32 frameEnded;
|
|
+ U64 pledgedSrcSize;
|
|
+ U64 inputProcessed;
|
|
+ ZSTD_parameters params;
|
|
+ ZSTD_customMem customMem;
|
|
+};
|
|
+
|
|
+typedef struct ZSTD_CStream_s ZSTD_CStream;
|
|
+
|
|
+typedef int32_t S32;
|
|
+
|
|
+typedef enum {
|
|
+ set_basic = 0,
|
|
+ set_rle = 1,
|
|
+ set_compressed = 2,
|
|
+ set_repeat = 3,
|
|
+} symbolEncodingType_e;
|
|
+
|
|
+struct seqDef_s {
|
|
+ U32 offset;
|
|
+ U16 litLength;
|
|
+ U16 matchLength;
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ ZSTDcrp_continue = 0,
|
|
+ ZSTDcrp_noMemset = 1,
|
|
+ ZSTDcrp_fullReset = 2,
|
|
+} ZSTD_compResetPolicy_e;
|
|
+
|
|
+typedef void (*ZSTD_blockCompressor)(ZSTD_CCtx___2 *, const void *, size_t);
|
|
+
|
|
+typedef enum {
|
|
+ zsf_gather = 0,
|
|
+ zsf_flush = 1,
|
|
+ zsf_end = 2,
|
|
+} ZSTD_flush_e;
|
|
+
|
|
+typedef size_t (*searchMax_f)(ZSTD_CCtx___2 *, const BYTE *, const BYTE *, size_t *, U32, U32);
|
|
+
|
|
+typedef struct {
|
|
+ size_t bitContainer;
|
|
+ unsigned int bitsConsumed;
|
|
+ const char *ptr;
|
|
+ const char *start;
|
|
+} BIT_DStream_t;
|
|
+
|
|
+typedef enum {
|
|
+ BIT_DStream_unfinished = 0,
|
|
+ BIT_DStream_endOfBuffer = 1,
|
|
+ BIT_DStream_completed = 2,
|
|
+ BIT_DStream_overflow = 3,
|
|
+} BIT_DStream_status;
|
|
+
|
|
+typedef unsigned int FSE_DTable;
|
|
+
|
|
+typedef struct {
|
|
+ size_t state;
|
|
+ const void *table;
|
|
+} FSE_DState_t;
|
|
+
|
|
+typedef struct {
|
|
+ U16 tableLog;
|
|
+ U16 fastMode;
|
|
+} FSE_DTableHeader;
|
|
+
|
|
+typedef struct {
|
|
+ short unsigned int newState;
|
|
+ unsigned char symbol;
|
|
+ unsigned char nbBits;
|
|
+} FSE_decode_t;
|
|
+
|
|
+typedef struct {
|
|
+ void *ptr;
|
|
+ const void *end;
|
|
+} ZSTD_stack;
|
|
+
|
|
+typedef U32 HUF_DTable;
|
|
+
|
|
+typedef struct {
|
|
+ BYTE maxTableLog;
|
|
+ BYTE tableType;
|
|
+ BYTE tableLog;
|
|
+ BYTE reserved;
|
|
+} DTableDesc;
|
|
+
|
|
+typedef struct {
|
|
+ BYTE byte;
|
|
+ BYTE nbBits;
|
|
+} HUF_DEltX2;
|
|
+
|
|
+typedef struct {
|
|
+ U16 sequence;
|
|
+ BYTE nbBits;
|
|
+ BYTE length;
|
|
+} HUF_DEltX4;
|
|
+
|
|
+typedef struct {
|
|
+ BYTE symbol;
|
|
+ BYTE weight;
|
|
+} sortedSymbol_t;
|
|
+
|
|
+typedef U32 rankValCol_t[13];
|
|
+
|
|
+typedef struct {
|
|
+ U32 tableTime;
|
|
+ U32 decode256Time;
|
|
+} algo_time_t;
|
|
+
|
|
+typedef struct {
|
|
+ FSE_DTable LLTable[513];
|
|
+ FSE_DTable OFTable[257];
|
|
+ FSE_DTable MLTable[513];
|
|
+ HUF_DTable hufTable[4097];
|
|
+ U64 workspace[384];
|
|
+ U32 rep[3];
|
|
+} ZSTD_entropyTables_t;
|
|
+
|
|
+typedef struct {
|
|
+ long long unsigned int frameContentSize;
|
|
+ unsigned int windowSize;
|
|
+ unsigned int dictID;
|
|
+ unsigned int checksumFlag;
|
|
+} ZSTD_frameParams;
|
|
+
|
|
+typedef enum {
|
|
+ bt_raw = 0,
|
|
+ bt_rle = 1,
|
|
+ bt_compressed = 2,
|
|
+ bt_reserved = 3,
|
|
+} blockType_e;
|
|
+
|
|
+typedef enum {
|
|
+ ZSTDds_getFrameHeaderSize = 0,
|
|
+ ZSTDds_decodeFrameHeader = 1,
|
|
+ ZSTDds_decodeBlockHeader = 2,
|
|
+ ZSTDds_decompressBlock = 3,
|
|
+ ZSTDds_decompressLastBlock = 4,
|
|
+ ZSTDds_checkChecksum = 5,
|
|
+ ZSTDds_decodeSkippableHeader = 6,
|
|
+ ZSTDds_skipFrame = 7,
|
|
+} ZSTD_dStage;
|
|
+
|
|
+struct ZSTD_DCtx_s___2 {
|
|
+ const FSE_DTable *LLTptr;
|
|
+ const FSE_DTable *MLTptr;
|
|
+ const FSE_DTable *OFTptr;
|
|
+ const HUF_DTable *HUFptr;
|
|
+ ZSTD_entropyTables_t entropy;
|
|
+ const void *previousDstEnd;
|
|
+ const void *base;
|
|
+ const void *vBase;
|
|
+ const void *dictEnd;
|
|
+ size_t expected;
|
|
+ ZSTD_frameParams fParams;
|
|
+ blockType_e bType;
|
|
+ ZSTD_dStage stage;
|
|
+ U32 litEntropy;
|
|
+ U32 fseEntropy;
|
|
+ struct xxh64_state xxhState;
|
|
+ size_t headerSize;
|
|
+ U32 dictID;
|
|
+ const BYTE *litPtr;
|
|
+ ZSTD_customMem customMem;
|
|
+ size_t litSize;
|
|
+ size_t rleSize;
|
|
+ BYTE litBuffer[131080];
|
|
+ BYTE headerBuffer[18];
|
|
+};
|
|
+
|
|
+typedef struct ZSTD_DCtx_s___2 ZSTD_DCtx___2;
|
|
+
|
|
+struct ZSTD_DDict_s {
|
|
+ void *dictBuffer;
|
|
+ const void *dictContent;
|
|
+ size_t dictSize;
|
|
+ ZSTD_entropyTables_t entropy;
|
|
+ U32 dictID;
|
|
+ U32 entropyPresent;
|
|
+ ZSTD_customMem cMem;
|
|
+};
|
|
+
|
|
+typedef struct ZSTD_DDict_s ZSTD_DDict;
|
|
+
|
|
+typedef enum {
|
|
+ zdss_init = 0,
|
|
+ zdss_loadHeader = 1,
|
|
+ zdss_read = 2,
|
|
+ zdss_load = 3,
|
|
+ zdss_flush = 4,
|
|
+} ZSTD_dStreamStage;
|
|
+
|
|
+struct ZSTD_DStream_s {
|
|
+ ZSTD_DCtx___2 *dctx;
|
|
+ ZSTD_DDict *ddictLocal;
|
|
+ const ZSTD_DDict *ddict;
|
|
+ ZSTD_frameParams fParams;
|
|
+ ZSTD_dStreamStage stage;
|
|
+ char *inBuff;
|
|
+ size_t inBuffSize;
|
|
+ size_t inPos;
|
|
+ size_t maxWindowSize;
|
|
+ char *outBuff;
|
|
+ size_t outBuffSize;
|
|
+ size_t outStart;
|
|
+ size_t outEnd;
|
|
+ size_t blockSize;
|
|
+ BYTE headerBuffer[18];
|
|
+ size_t lhSize;
|
|
+ ZSTD_customMem customMem;
|
|
+ void *legacyContext;
|
|
+ U32 previousLegacyVersion;
|
|
+ U32 legacyVersion;
|
|
+ U32 hostageByte;
|
|
+};
|
|
+
|
|
+typedef struct ZSTD_DStream_s ZSTD_DStream;
|
|
+
|
|
+typedef enum {
|
|
+ ZSTDnit_frameHeader = 0,
|
|
+ ZSTDnit_blockHeader = 1,
|
|
+ ZSTDnit_block = 2,
|
|
+ ZSTDnit_lastBlock = 3,
|
|
+ ZSTDnit_checksum = 4,
|
|
+ ZSTDnit_skippableFrame = 5,
|
|
+} ZSTD_nextInputType_e;
|
|
+
|
|
+typedef uintptr_t uPtrDiff;
|
|
+
|
|
+typedef struct {
|
|
+ blockType_e blockType;
|
|
+ U32 lastBlock;
|
|
+ U32 origSize;
|
|
+} blockProperties_t;
|
|
+
|
|
+typedef union {
|
|
+ FSE_decode_t realData;
|
|
+ U32 alignedBy4;
|
|
+} FSE_decode_t4;
|
|
+
|
|
+typedef struct {
|
|
+ size_t litLength;
|
|
+ size_t matchLength;
|
|
+ size_t offset;
|
|
+ const BYTE *match;
|
|
+} seq_t;
|
|
+
|
|
+typedef struct {
|
|
+ BIT_DStream_t DStream;
|
|
+ FSE_DState_t stateLL;
|
|
+ FSE_DState_t stateOffb;
|
|
+ FSE_DState_t stateML;
|
|
+ size_t prevOffset[3];
|
|
+ const BYTE *base;
|
|
+ size_t pos;
|
|
+ uPtrDiff gotoDict;
|
|
+} seqState_t;
|
|
+
|
|
+enum xz_mode {
|
|
+ XZ_SINGLE = 0,
|
|
+ XZ_PREALLOC = 1,
|
|
+ XZ_DYNALLOC = 2,
|
|
+};
|
|
+
|
|
+enum xz_ret {
|
|
+ XZ_OK = 0,
|
|
+ XZ_STREAM_END = 1,
|
|
+ XZ_UNSUPPORTED_CHECK = 2,
|
|
+ XZ_MEM_ERROR = 3,
|
|
+ XZ_MEMLIMIT_ERROR = 4,
|
|
+ XZ_FORMAT_ERROR = 5,
|
|
+ XZ_OPTIONS_ERROR = 6,
|
|
+ XZ_DATA_ERROR = 7,
|
|
+ XZ_BUF_ERROR = 8,
|
|
+};
|
|
+
|
|
+struct xz_buf {
|
|
+ const uint8_t *in;
|
|
+ size_t in_pos;
|
|
+ size_t in_size;
|
|
+ uint8_t *out;
|
|
+ size_t out_pos;
|
|
+ size_t out_size;
|
|
+};
|
|
+
|
|
+typedef uint64_t vli_type;
|
|
+
|
|
+enum xz_check {
|
|
+ XZ_CHECK_NONE = 0,
|
|
+ XZ_CHECK_CRC32 = 1,
|
|
+ XZ_CHECK_CRC64 = 4,
|
|
+ XZ_CHECK_SHA256 = 10,
|
|
+};
|
|
+
|
|
+struct xz_dec_hash {
|
|
+ vli_type unpadded;
|
|
+ vli_type uncompressed;
|
|
+ uint32_t crc32;
|
|
+};
|
|
+
|
|
+struct xz_dec_lzma2;
|
|
+
|
|
+struct xz_dec_bcj;
|
|
+
|
|
+struct xz_dec {
|
|
+ enum {
|
|
+ SEQ_STREAM_HEADER = 0,
|
|
+ SEQ_BLOCK_START = 1,
|
|
+ SEQ_BLOCK_HEADER = 2,
|
|
+ SEQ_BLOCK_UNCOMPRESS = 3,
|
|
+ SEQ_BLOCK_PADDING = 4,
|
|
+ SEQ_BLOCK_CHECK = 5,
|
|
+ SEQ_INDEX = 6,
|
|
+ SEQ_INDEX_PADDING = 7,
|
|
+ SEQ_INDEX_CRC32 = 8,
|
|
+ SEQ_STREAM_FOOTER = 9,
|
|
+ } sequence;
|
|
+ uint32_t pos;
|
|
+ vli_type vli;
|
|
+ size_t in_start;
|
|
+ size_t out_start;
|
|
+ uint32_t crc32;
|
|
+ enum xz_check check_type;
|
|
+ enum xz_mode mode;
|
|
+ bool allow_buf_error;
|
|
+ struct {
|
|
+ vli_type compressed;
|
|
+ vli_type uncompressed;
|
|
+ uint32_t size;
|
|
+ } block_header;
|
|
+ struct {
|
|
+ vli_type compressed;
|
|
+ vli_type uncompressed;
|
|
+ vli_type count;
|
|
+ struct xz_dec_hash hash;
|
|
+ } block;
|
|
+ struct {
|
|
+ enum {
|
|
+ SEQ_INDEX_COUNT = 0,
|
|
+ SEQ_INDEX_UNPADDED = 1,
|
|
+ SEQ_INDEX_UNCOMPRESSED = 2,
|
|
+ } sequence;
|
|
+ vli_type size;
|
|
+ vli_type count;
|
|
+ struct xz_dec_hash hash;
|
|
+ } index;
|
|
+ struct {
|
|
+ size_t pos;
|
|
+ size_t size;
|
|
+ uint8_t buf[1024];
|
|
+ } temp;
|
|
+ struct xz_dec_lzma2 *lzma2;
|
|
+ struct xz_dec_bcj *bcj;
|
|
+ bool bcj_active;
|
|
+};
|
|
+
|
|
+enum lzma_state {
|
|
+ STATE_LIT_LIT = 0,
|
|
+ STATE_MATCH_LIT_LIT = 1,
|
|
+ STATE_REP_LIT_LIT = 2,
|
|
+ STATE_SHORTREP_LIT_LIT = 3,
|
|
+ STATE_MATCH_LIT = 4,
|
|
+ STATE_REP_LIT = 5,
|
|
+ STATE_SHORTREP_LIT = 6,
|
|
+ STATE_LIT_MATCH = 7,
|
|
+ STATE_LIT_LONGREP = 8,
|
|
+ STATE_LIT_SHORTREP = 9,
|
|
+ STATE_NONLIT_MATCH = 10,
|
|
+ STATE_NONLIT_REP = 11,
|
|
+};
|
|
+
|
|
+struct dictionary {
|
|
+ uint8_t *buf;
|
|
+ size_t start;
|
|
+ size_t pos;
|
|
+ size_t full;
|
|
+ size_t limit;
|
|
+ size_t end;
|
|
+ uint32_t size;
|
|
+ uint32_t size_max;
|
|
+ uint32_t allocated;
|
|
+ enum xz_mode mode;
|
|
+};
|
|
+
|
|
+struct rc_dec {
|
|
+ uint32_t range;
|
|
+ uint32_t code;
|
|
+ uint32_t init_bytes_left;
|
|
+ const uint8_t *in;
|
|
+ size_t in_pos;
|
|
+ size_t in_limit;
|
|
+};
|
|
+
|
|
+struct lzma_len_dec {
|
|
+ uint16_t choice;
|
|
+ uint16_t choice2;
|
|
+ uint16_t low[128];
|
|
+ uint16_t mid[128];
|
|
+ uint16_t high[256];
|
|
+};
|
|
+
|
|
+struct lzma_dec {
|
|
+ uint32_t rep0;
|
|
+ uint32_t rep1;
|
|
+ uint32_t rep2;
|
|
+ uint32_t rep3;
|
|
+ enum lzma_state state;
|
|
+ uint32_t len;
|
|
+ uint32_t lc;
|
|
+ uint32_t literal_pos_mask;
|
|
+ uint32_t pos_mask;
|
|
+ uint16_t is_match[192];
|
|
+ uint16_t is_rep[12];
|
|
+ uint16_t is_rep0[12];
|
|
+ uint16_t is_rep1[12];
|
|
+ uint16_t is_rep2[12];
|
|
+ uint16_t is_rep0_long[192];
|
|
+ uint16_t dist_slot[256];
|
|
+ uint16_t dist_special[114];
|
|
+ uint16_t dist_align[16];
|
|
+ struct lzma_len_dec match_len_dec;
|
|
+ struct lzma_len_dec rep_len_dec;
|
|
+ uint16_t literal[12288];
|
|
+};
|
|
+
|
|
+enum lzma2_seq {
|
|
+ SEQ_CONTROL = 0,
|
|
+ SEQ_UNCOMPRESSED_1 = 1,
|
|
+ SEQ_UNCOMPRESSED_2 = 2,
|
|
+ SEQ_COMPRESSED_0 = 3,
|
|
+ SEQ_COMPRESSED_1 = 4,
|
|
+ SEQ_PROPERTIES = 5,
|
|
+ SEQ_LZMA_PREPARE = 6,
|
|
+ SEQ_LZMA_RUN = 7,
|
|
+ SEQ_COPY = 8,
|
|
+};
|
|
+
|
|
+struct lzma2_dec {
|
|
+ enum lzma2_seq sequence;
|
|
+ enum lzma2_seq next_sequence;
|
|
+ uint32_t uncompressed;
|
|
+ uint32_t compressed;
|
|
+ bool need_dict_reset;
|
|
+ bool need_props;
|
|
+};
|
|
+
|
|
+struct xz_dec_lzma2___2 {
|
|
+ struct rc_dec rc;
|
|
+ struct dictionary dict;
|
|
+ struct lzma2_dec lzma2;
|
|
+ struct lzma_dec lzma;
|
|
+ struct {
|
|
+ uint32_t size;
|
|
+ uint8_t buf[63];
|
|
+ } temp;
|
|
+};
|
|
+
|
|
+struct xz_dec_bcj___2 {
|
|
+ enum {
|
|
+ BCJ_X86 = 4,
|
|
+ BCJ_POWERPC = 5,
|
|
+ BCJ_IA64 = 6,
|
|
+ BCJ_ARM = 7,
|
|
+ BCJ_ARMTHUMB = 8,
|
|
+ BCJ_SPARC = 9,
|
|
+ } type;
|
|
+ enum xz_ret ret;
|
|
+ bool single_call;
|
|
+ uint32_t pos;
|
|
+ uint32_t x86_prev_mask;
|
|
+ uint8_t *out;
|
|
+ size_t out_pos;
|
|
+ size_t out_size;
|
|
+ struct {
|
|
+ size_t filtered;
|
|
+ size_t size;
|
|
+ uint8_t buf[16];
|
|
+ } temp;
|
|
+};
|
|
+
|
|
+struct ts_state {
|
|
+ unsigned int offset;
|
|
+ char cb[40];
|
|
+};
|
|
+
|
|
+struct ts_config;
|
|
+
|
|
+struct ts_ops {
|
|
+ const char *name;
|
|
+ struct ts_config * (*init)(const void *, unsigned int, gfp_t, int);
|
|
+ unsigned int (*find)(struct ts_config *, struct ts_state *);
|
|
+ void (*destroy)(struct ts_config *);
|
|
+ void * (*get_pattern)(struct ts_config *);
|
|
+ unsigned int (*get_pattern_len)(struct ts_config *);
|
|
+ struct module *owner;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct ts_config {
|
|
+ struct ts_ops *ops;
|
|
+ int flags;
|
|
+ unsigned int (*get_next_block)(unsigned int, const u8 **, struct ts_config *, struct ts_state *);
|
|
+ void (*finish)(struct ts_config *, struct ts_state *);
|
|
+};
|
|
+
|
|
+struct ts_linear_state {
|
|
+ unsigned int len;
|
|
+ const void *data;
|
|
+};
|
|
+
|
|
+typedef s32 pao_T_____7;
|
|
+
|
|
+struct ei_entry {
|
|
+ struct list_head list;
|
|
+ long unsigned int start_addr;
|
|
+ long unsigned int end_addr;
|
|
+ int etype;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct ddebug_table {
|
|
+ struct list_head link;
|
|
+ const char *mod_name;
|
|
+ unsigned int num_ddebugs;
|
|
+ struct _ddebug *ddebugs;
|
|
+};
|
|
+
|
|
+struct ddebug_query {
|
|
+ const char *filename;
|
|
+ const char *module;
|
|
+ const char *function;
|
|
+ const char *format;
|
|
+ unsigned int first_lineno;
|
|
+ unsigned int last_lineno;
|
|
+};
|
|
+
|
|
+struct ddebug_iter {
|
|
+ struct ddebug_table *table;
|
|
+ unsigned int idx;
|
|
+};
|
|
+
|
|
+struct nla_bitfield32 {
|
|
+ __u32 value;
|
|
+ __u32 selector;
|
|
+};
|
|
+
|
|
+struct cpu_rmap {
|
|
+ struct kref refcount;
|
|
+ u16 size;
|
|
+ u16 used;
|
|
+ void **obj;
|
|
+ struct {
|
|
+ u16 index;
|
|
+ u16 dist;
|
|
+ } near[0];
|
|
+};
|
|
+
|
|
+struct irq_glue {
|
|
+ struct irq_affinity_notify notify;
|
|
+ struct cpu_rmap *rmap;
|
|
+ u16 index;
|
|
+};
|
|
+
|
|
+typedef mpi_limb_t *mpi_ptr_t;
|
|
+
|
|
+typedef int mpi_size_t;
|
|
+
|
|
+typedef mpi_limb_t UWtype;
|
|
+
|
|
+typedef unsigned int UHWtype;
|
|
+
|
|
+struct karatsuba_ctx {
|
|
+ struct karatsuba_ctx *next;
|
|
+ mpi_ptr_t tspace;
|
|
+ mpi_size_t tspace_size;
|
|
+ mpi_ptr_t tp;
|
|
+ mpi_size_t tp_size;
|
|
+};
|
|
+
|
|
+typedef long int mpi_limb_signed_t;
|
|
+
|
|
+enum pubkey_algo {
|
|
+ PUBKEY_ALGO_RSA = 0,
|
|
+ PUBKEY_ALGO_MAX = 1,
|
|
+};
|
|
+
|
|
+struct pubkey_hdr {
|
|
+ uint8_t version;
|
|
+ uint32_t timestamp;
|
|
+ uint8_t algo;
|
|
+ uint8_t nmpi;
|
|
+ char mpi[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct signature_hdr {
|
|
+ uint8_t version;
|
|
+ uint32_t timestamp;
|
|
+ uint8_t algo;
|
|
+ uint8_t hash;
|
|
+ uint8_t keyid[8];
|
|
+ uint8_t nmpi;
|
|
+ char mpi[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct sg_pool {
|
|
+ size_t size;
|
|
+ char *name;
|
|
+ struct kmem_cache *slab;
|
|
+ mempool_t *pool;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IRQ_POLL_F_SCHED = 0,
|
|
+ IRQ_POLL_F_DISABLE = 1,
|
|
+};
|
|
+
|
|
+struct font_data {
|
|
+ unsigned int extra[4];
|
|
+ const unsigned char data[0];
|
|
+};
|
|
+
|
|
+typedef u16 ucs2_char_t;
|
|
+
|
|
+struct msr {
|
|
+ union {
|
|
+ struct {
|
|
+ u32 l;
|
|
+ u32 h;
|
|
+ };
|
|
+ u64 q;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct msr_info {
|
|
+ u32 msr_no;
|
|
+ struct msr reg;
|
|
+ struct msr *msrs;
|
|
+ int err;
|
|
+};
|
|
+
|
|
+struct msr_regs_info {
|
|
+ u32 *regs;
|
|
+ int err;
|
|
+};
|
|
+
|
|
+struct msr_info_completion {
|
|
+ struct msr_info msr;
|
|
+ struct completion done;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_msr_trace_class {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int msr;
|
|
+ u64 val;
|
|
+ int failed;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_msr_trace_class {};
|
|
+
|
|
+struct pinctrl {
|
|
+ struct list_head node;
|
|
+ struct device *dev;
|
|
+ struct list_head states;
|
|
+ struct pinctrl_state *state;
|
|
+ struct list_head dt_maps;
|
|
+ struct kref users;
|
|
+};
|
|
+
|
|
+struct pinctrl_state {
|
|
+ struct list_head node;
|
|
+ const char *name;
|
|
+ struct list_head settings;
|
|
+};
|
|
+
|
|
+struct pinctrl_pin_desc {
|
|
+ unsigned int number;
|
|
+ const char *name;
|
|
+ void *drv_data;
|
|
+};
|
|
+
|
|
+struct gpio_chip;
|
|
+
|
|
+struct pinctrl_gpio_range {
|
|
+ struct list_head node;
|
|
+ const char *name;
|
|
+ unsigned int id;
|
|
+ unsigned int base;
|
|
+ unsigned int pin_base;
|
|
+ const unsigned int *pins;
|
|
+ unsigned int npins;
|
|
+ struct gpio_chip *gc;
|
|
+};
|
|
+
|
|
+struct gpio_irq_chip {
|
|
+ struct irq_chip *chip;
|
|
+ struct irq_domain *domain;
|
|
+ const struct irq_domain_ops *domain_ops;
|
|
+ irq_flow_handler_t handler;
|
|
+ unsigned int default_type;
|
|
+ struct lock_class_key *lock_key;
|
|
+ struct lock_class_key *request_key;
|
|
+ irq_flow_handler_t parent_handler;
|
|
+ void *parent_handler_data;
|
|
+ unsigned int num_parents;
|
|
+ unsigned int parent_irq;
|
|
+ unsigned int *parents;
|
|
+ unsigned int *map;
|
|
+ bool threaded;
|
|
+ bool need_valid_mask;
|
|
+ long unsigned int *valid_mask;
|
|
+ unsigned int first;
|
|
+};
|
|
+
|
|
+struct gpio_device;
|
|
+
|
|
+struct gpio_chip {
|
|
+ const char *label;
|
|
+ struct gpio_device *gpiodev;
|
|
+ struct device *parent;
|
|
+ struct module *owner;
|
|
+ int (*request)(struct gpio_chip *, unsigned int);
|
|
+ void (*free)(struct gpio_chip *, unsigned int);
|
|
+ int (*get_direction)(struct gpio_chip *, unsigned int);
|
|
+ int (*direction_input)(struct gpio_chip *, unsigned int);
|
|
+ int (*direction_output)(struct gpio_chip *, unsigned int, int);
|
|
+ int (*get)(struct gpio_chip *, unsigned int);
|
|
+ int (*get_multiple)(struct gpio_chip *, long unsigned int *, long unsigned int *);
|
|
+ void (*set)(struct gpio_chip *, unsigned int, int);
|
|
+ void (*set_multiple)(struct gpio_chip *, long unsigned int *, long unsigned int *);
|
|
+ int (*set_config)(struct gpio_chip *, unsigned int, long unsigned int);
|
|
+ int (*to_irq)(struct gpio_chip *, unsigned int);
|
|
+ void (*dbg_show)(struct seq_file *, struct gpio_chip *);
|
|
+ int (*init_valid_mask)(struct gpio_chip *);
|
|
+ int base;
|
|
+ u16 ngpio;
|
|
+ const char * const *names;
|
|
+ bool can_sleep;
|
|
+ long unsigned int (*read_reg)(void *);
|
|
+ void (*write_reg)(void *, long unsigned int);
|
|
+ bool be_bits;
|
|
+ void *reg_dat;
|
|
+ void *reg_set;
|
|
+ void *reg_clr;
|
|
+ void *reg_dir;
|
|
+ bool bgpio_dir_inverted;
|
|
+ int bgpio_bits;
|
|
+ spinlock_t bgpio_lock;
|
|
+ long unsigned int bgpio_data;
|
|
+ long unsigned int bgpio_dir;
|
|
+ struct gpio_irq_chip irq;
|
|
+ bool need_valid_mask;
|
|
+ long unsigned int *valid_mask;
|
|
+};
|
|
+
|
|
+struct pinctrl_dev;
|
|
+
|
|
+struct pinctrl_map;
|
|
+
|
|
+struct pinctrl_ops {
|
|
+ int (*get_groups_count)(struct pinctrl_dev *);
|
|
+ const char * (*get_group_name)(struct pinctrl_dev *, unsigned int);
|
|
+ int (*get_group_pins)(struct pinctrl_dev *, unsigned int, const unsigned int **, unsigned int *);
|
|
+ void (*pin_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int);
|
|
+ int (*dt_node_to_map)(struct pinctrl_dev *, struct device_node *, struct pinctrl_map **, unsigned int *);
|
|
+ void (*dt_free_map)(struct pinctrl_dev *, struct pinctrl_map *, unsigned int);
|
|
+};
|
|
+
|
|
+struct pinctrl_desc;
|
|
+
|
|
+struct pinctrl_dev {
|
|
+ struct list_head node;
|
|
+ struct pinctrl_desc *desc;
|
|
+ struct radix_tree_root pin_desc_tree;
|
|
+ struct list_head gpio_ranges;
|
|
+ struct device *dev;
|
|
+ struct module *owner;
|
|
+ void *driver_data;
|
|
+ struct pinctrl *p;
|
|
+ struct pinctrl_state *hog_default;
|
|
+ struct pinctrl_state *hog_sleep;
|
|
+ struct mutex mutex;
|
|
+ struct dentry *device_root;
|
|
+};
|
|
+
|
|
+enum pinctrl_map_type {
|
|
+ PIN_MAP_TYPE_INVALID = 0,
|
|
+ PIN_MAP_TYPE_DUMMY_STATE = 1,
|
|
+ PIN_MAP_TYPE_MUX_GROUP = 2,
|
|
+ PIN_MAP_TYPE_CONFIGS_PIN = 3,
|
|
+ PIN_MAP_TYPE_CONFIGS_GROUP = 4,
|
|
+};
|
|
+
|
|
+struct pinctrl_map_mux {
|
|
+ const char *group;
|
|
+ const char *function;
|
|
+};
|
|
+
|
|
+struct pinctrl_map_configs {
|
|
+ const char *group_or_pin;
|
|
+ long unsigned int *configs;
|
|
+ unsigned int num_configs;
|
|
+};
|
|
+
|
|
+struct pinctrl_map {
|
|
+ const char *dev_name;
|
|
+ const char *name;
|
|
+ enum pinctrl_map_type type;
|
|
+ const char *ctrl_dev_name;
|
|
+ union {
|
|
+ struct pinctrl_map_mux mux;
|
|
+ struct pinctrl_map_configs configs;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+struct pinconf_generic_params;
|
|
+
|
|
+struct pinmux_ops;
|
|
+
|
|
+struct pinconf_ops;
|
|
+
|
|
+struct pin_config_item;
|
|
+
|
|
+struct pinctrl_desc {
|
|
+ const char *name;
|
|
+ const struct pinctrl_pin_desc *pins;
|
|
+ unsigned int npins;
|
|
+ const struct pinctrl_ops *pctlops;
|
|
+ const struct pinmux_ops *pmxops;
|
|
+ const struct pinconf_ops *confops;
|
|
+ struct module *owner;
|
|
+ unsigned int num_custom_params;
|
|
+ const struct pinconf_generic_params *custom_params;
|
|
+ const struct pin_config_item *custom_conf_items;
|
|
+};
|
|
+
|
|
+struct pinmux_ops {
|
|
+ int (*request)(struct pinctrl_dev *, unsigned int);
|
|
+ int (*free)(struct pinctrl_dev *, unsigned int);
|
|
+ int (*get_functions_count)(struct pinctrl_dev *);
|
|
+ const char * (*get_function_name)(struct pinctrl_dev *, unsigned int);
|
|
+ int (*get_function_groups)(struct pinctrl_dev *, unsigned int, const char * const **, unsigned int *);
|
|
+ int (*set_mux)(struct pinctrl_dev *, unsigned int, unsigned int);
|
|
+ int (*gpio_request_enable)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int);
|
|
+ void (*gpio_disable_free)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int);
|
|
+ int (*gpio_set_direction)(struct pinctrl_dev *, struct pinctrl_gpio_range *, unsigned int, bool);
|
|
+ bool strict;
|
|
+};
|
|
+
|
|
+struct pinconf_ops {
|
|
+ bool is_generic;
|
|
+ int (*pin_config_get)(struct pinctrl_dev *, unsigned int, long unsigned int *);
|
|
+ int (*pin_config_set)(struct pinctrl_dev *, unsigned int, long unsigned int *, unsigned int);
|
|
+ int (*pin_config_group_get)(struct pinctrl_dev *, unsigned int, long unsigned int *);
|
|
+ int (*pin_config_group_set)(struct pinctrl_dev *, unsigned int, long unsigned int *, unsigned int);
|
|
+ int (*pin_config_dbg_parse_modify)(struct pinctrl_dev *, const char *, long unsigned int *);
|
|
+ void (*pin_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int);
|
|
+ void (*pin_config_group_dbg_show)(struct pinctrl_dev *, struct seq_file *, unsigned int);
|
|
+ void (*pin_config_config_dbg_show)(struct pinctrl_dev *, struct seq_file *, long unsigned int);
|
|
+};
|
|
+
|
|
+enum pin_config_param {
|
|
+ PIN_CONFIG_BIAS_BUS_HOLD = 0,
|
|
+ PIN_CONFIG_BIAS_DISABLE = 1,
|
|
+ PIN_CONFIG_BIAS_HIGH_IMPEDANCE = 2,
|
|
+ PIN_CONFIG_BIAS_PULL_DOWN = 3,
|
|
+ PIN_CONFIG_BIAS_PULL_PIN_DEFAULT = 4,
|
|
+ PIN_CONFIG_BIAS_PULL_UP = 5,
|
|
+ PIN_CONFIG_DRIVE_OPEN_DRAIN = 6,
|
|
+ PIN_CONFIG_DRIVE_OPEN_SOURCE = 7,
|
|
+ PIN_CONFIG_DRIVE_PUSH_PULL = 8,
|
|
+ PIN_CONFIG_DRIVE_STRENGTH = 9,
|
|
+ PIN_CONFIG_INPUT_DEBOUNCE = 10,
|
|
+ PIN_CONFIG_INPUT_ENABLE = 11,
|
|
+ PIN_CONFIG_INPUT_SCHMITT = 12,
|
|
+ PIN_CONFIG_INPUT_SCHMITT_ENABLE = 13,
|
|
+ PIN_CONFIG_LOW_POWER_MODE = 14,
|
|
+ PIN_CONFIG_OUTPUT_ENABLE = 15,
|
|
+ PIN_CONFIG_OUTPUT = 16,
|
|
+ PIN_CONFIG_POWER_SOURCE = 17,
|
|
+ PIN_CONFIG_SLEEP_HARDWARE_STATE = 18,
|
|
+ PIN_CONFIG_SLEW_RATE = 19,
|
|
+ PIN_CONFIG_SKEW_DELAY = 20,
|
|
+ PIN_CONFIG_PERSIST_STATE = 21,
|
|
+ PIN_CONFIG_END = 127,
|
|
+ PIN_CONFIG_MAX = 255,
|
|
+};
|
|
+
|
|
+struct pin_config_item {
|
|
+ const enum pin_config_param param;
|
|
+ const char * const display;
|
|
+ const char * const format;
|
|
+ bool has_arg;
|
|
+};
|
|
+
|
|
+struct pinctrl_setting_mux {
|
|
+ unsigned int group;
|
|
+ unsigned int func;
|
|
+};
|
|
+
|
|
+struct pinctrl_setting_configs {
|
|
+ unsigned int group_or_pin;
|
|
+ long unsigned int *configs;
|
|
+ unsigned int num_configs;
|
|
+};
|
|
+
|
|
+struct pinctrl_setting {
|
|
+ struct list_head node;
|
|
+ enum pinctrl_map_type type;
|
|
+ struct pinctrl_dev *pctldev;
|
|
+ const char *dev_name;
|
|
+ union {
|
|
+ struct pinctrl_setting_mux mux;
|
|
+ struct pinctrl_setting_configs configs;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+struct pin_desc {
|
|
+ struct pinctrl_dev *pctldev;
|
|
+ const char *name;
|
|
+ bool dynamic_name;
|
|
+ void *drv_data;
|
|
+ unsigned int mux_usecount;
|
|
+ const char *mux_owner;
|
|
+ const struct pinctrl_setting_mux *mux_setting;
|
|
+ const char *gpio_owner;
|
|
+};
|
|
+
|
|
+struct pinctrl_maps {
|
|
+ struct list_head node;
|
|
+ const struct pinctrl_map *maps;
|
|
+ unsigned int num_maps;
|
|
+};
|
|
+
|
|
+struct pctldev;
|
|
+
|
|
+struct dbg_cfg {
|
|
+ enum pinctrl_map_type map_type;
|
|
+ char dev_name[16];
|
|
+ char state_name[16];
|
|
+ char pin_name[16];
|
|
+};
|
|
+
|
|
+struct byt_gpio_pin_context {
|
|
+ u32 conf0;
|
|
+ u32 val;
|
|
+};
|
|
+
|
|
+struct byt_simple_func_mux {
|
|
+ const char *name;
|
|
+ short unsigned int func;
|
|
+};
|
|
+
|
|
+struct byt_mixed_func_mux {
|
|
+ const char *name;
|
|
+ const short unsigned int *func_values;
|
|
+};
|
|
+
|
|
+struct byt_pingroup {
|
|
+ const char *name;
|
|
+ const unsigned int *pins;
|
|
+ size_t npins;
|
|
+ short unsigned int has_simple_funcs;
|
|
+ union {
|
|
+ const struct byt_simple_func_mux *simple_funcs;
|
|
+ const struct byt_mixed_func_mux *mixed_funcs;
|
|
+ };
|
|
+ size_t nfuncs;
|
|
+};
|
|
+
|
|
+struct byt_function {
|
|
+ const char *name;
|
|
+ const char * const *groups;
|
|
+ size_t ngroups;
|
|
+};
|
|
+
|
|
+struct byt_community {
|
|
+ unsigned int pin_base;
|
|
+ size_t npins;
|
|
+ const unsigned int *pad_map;
|
|
+ void *reg_base;
|
|
+};
|
|
+
|
|
+struct byt_pinctrl_soc_data {
|
|
+ const char *uid;
|
|
+ const struct pinctrl_pin_desc *pins;
|
|
+ size_t npins;
|
|
+ const struct byt_pingroup *groups;
|
|
+ size_t ngroups;
|
|
+ const struct byt_function *functions;
|
|
+ size_t nfunctions;
|
|
+ const struct byt_community *communities;
|
|
+ size_t ncommunities;
|
|
+};
|
|
+
|
|
+struct byt_gpio {
|
|
+ struct gpio_chip chip;
|
|
+ struct platform_device *pdev;
|
|
+ struct pinctrl_dev *pctl_dev;
|
|
+ struct pinctrl_desc pctl_desc;
|
|
+ const struct byt_pinctrl_soc_data *soc_data;
|
|
+ struct byt_community *communities_copy;
|
|
+ struct byt_gpio_pin_context *saved_context;
|
|
+};
|
|
+
|
|
+struct gpio_desc;
|
|
+
|
|
+struct gpio_device {
|
|
+ int id;
|
|
+ struct device dev;
|
|
+ struct cdev chrdev;
|
|
+ struct device *mockdev;
|
|
+ struct module *owner;
|
|
+ struct gpio_chip *chip;
|
|
+ struct gpio_desc *descs;
|
|
+ int base;
|
|
+ u16 ngpio;
|
|
+ const char *label;
|
|
+ void *data;
|
|
+ struct list_head list;
|
|
+ struct list_head pin_ranges;
|
|
+};
|
|
+
|
|
+struct gpio_descs {
|
|
+ unsigned int ndescs;
|
|
+ struct gpio_desc *desc[0];
|
|
+};
|
|
+
|
|
+struct gpio_desc {
|
|
+ struct gpio_device *gdev;
|
|
+ long unsigned int flags;
|
|
+ const char *label;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+enum gpiod_flags {
|
|
+ GPIOD_ASIS = 0,
|
|
+ GPIOD_IN = 1,
|
|
+ GPIOD_OUT_LOW = 3,
|
|
+ GPIOD_OUT_HIGH = 7,
|
|
+ GPIOD_OUT_LOW_OPEN_DRAIN = 11,
|
|
+ GPIOD_OUT_HIGH_OPEN_DRAIN = 15,
|
|
+};
|
|
+
|
|
+struct gpio_pin_range {
|
|
+ struct list_head node;
|
|
+ struct pinctrl_dev *pctldev;
|
|
+ struct pinctrl_gpio_range range;
|
|
+};
|
|
+
|
|
+enum of_gpio_flags {
|
|
+ OF_GPIO_ACTIVE_LOW = 1,
|
|
+ OF_GPIO_SINGLE_ENDED = 2,
|
|
+ OF_GPIO_OPEN_DRAIN = 4,
|
|
+ OF_GPIO_TRANSITORY = 8,
|
|
+};
|
|
+
|
|
+enum gpio_lookup_flags {
|
|
+ GPIO_ACTIVE_HIGH = 0,
|
|
+ GPIO_ACTIVE_LOW = 1,
|
|
+ GPIO_OPEN_DRAIN = 2,
|
|
+ GPIO_OPEN_SOURCE = 4,
|
|
+ GPIO_PERSISTENT = 0,
|
|
+ GPIO_TRANSITORY = 8,
|
|
+};
|
|
+
|
|
+struct gpiod_lookup {
|
|
+ const char *chip_label;
|
|
+ u16 chip_hwnum;
|
|
+ const char *con_id;
|
|
+ unsigned int idx;
|
|
+ enum gpio_lookup_flags flags;
|
|
+};
|
|
+
|
|
+struct gpiod_lookup_table {
|
|
+ struct list_head list;
|
|
+ const char *dev_id;
|
|
+ struct gpiod_lookup table[0];
|
|
+};
|
|
+
|
|
+struct gpiod_hog {
|
|
+ struct list_head list;
|
|
+ const char *chip_label;
|
|
+ u16 chip_hwnum;
|
|
+ const char *line_name;
|
|
+ enum gpio_lookup_flags lflags;
|
|
+ int dflags;
|
|
+};
|
|
+
|
|
+struct gpiochip_info {
|
|
+ char name[32];
|
|
+ char label[32];
|
|
+ __u32 lines;
|
|
+};
|
|
+
|
|
+struct gpioline_info {
|
|
+ __u32 line_offset;
|
|
+ __u32 flags;
|
|
+ char name[32];
|
|
+ char consumer[32];
|
|
+};
|
|
+
|
|
+struct gpiohandle_request {
|
|
+ __u32 lineoffsets[64];
|
|
+ __u32 flags;
|
|
+ __u8 default_values[64];
|
|
+ char consumer_label[32];
|
|
+ __u32 lines;
|
|
+ int fd;
|
|
+};
|
|
+
|
|
+struct gpiohandle_data {
|
|
+ __u8 values[64];
|
|
+};
|
|
+
|
|
+struct gpioevent_request {
|
|
+ __u32 lineoffset;
|
|
+ __u32 handleflags;
|
|
+ __u32 eventflags;
|
|
+ char consumer_label[32];
|
|
+ int fd;
|
|
+};
|
|
+
|
|
+struct gpioevent_data {
|
|
+ __u64 timestamp;
|
|
+ __u32 id;
|
|
+};
|
|
+
|
|
+struct acpi_gpio_info {
|
|
+ struct acpi_device *adev;
|
|
+ enum gpiod_flags flags;
|
|
+ bool gpioint;
|
|
+ int polarity;
|
|
+ int triggering;
|
|
+ unsigned int quirks;
|
|
+};
|
|
+
|
|
+struct linehandle_state {
|
|
+ struct gpio_device *gdev;
|
|
+ const char *label;
|
|
+ struct gpio_desc *descs[64];
|
|
+ u32 numdescs;
|
|
+};
|
|
+
|
|
+struct lineevent_state {
|
|
+ struct gpio_device *gdev;
|
|
+ const char *label;
|
|
+ struct gpio_desc *desc;
|
|
+ u32 eflags;
|
|
+ int irq;
|
|
+ wait_queue_head_t wait;
|
|
+ struct {
|
|
+ union {
|
|
+ struct __kfifo kfifo;
|
|
+ struct gpioevent_data *type;
|
|
+ const struct gpioevent_data *const_type;
|
|
+ char (*rectype)[0];
|
|
+ struct gpioevent_data *ptr;
|
|
+ const struct gpioevent_data *ptr_const;
|
|
+ };
|
|
+ struct gpioevent_data buf[16];
|
|
+ } events;
|
|
+ struct mutex read_lock;
|
|
+ u64 timestamp;
|
|
+};
|
|
+
|
|
+struct gpio {
|
|
+ unsigned int gpio;
|
|
+ long unsigned int flags;
|
|
+ const char *label;
|
|
+};
|
|
+
|
|
+struct class_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct class *, struct class_attribute *, char *);
|
|
+ ssize_t (*store)(struct class *, struct class_attribute *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct gpiod_data {
|
|
+ struct gpio_desc *desc;
|
|
+ struct mutex mutex;
|
|
+ struct kernfs_node *value_kn;
|
|
+ int irq;
|
|
+ unsigned char irq_flags;
|
|
+ bool direction_can_change;
|
|
+};
|
|
+
|
|
+struct acpi_connection_info {
|
|
+ u8 *connection;
|
|
+ u16 length;
|
|
+ u8 access_length;
|
|
+};
|
|
+
|
|
+struct acpi_resource_irq {
|
|
+ u8 descriptor_length;
|
|
+ u8 triggering;
|
|
+ u8 polarity;
|
|
+ u8 sharable;
|
|
+ u8 wake_capable;
|
|
+ u8 interrupt_count;
|
|
+ u8 interrupts[1];
|
|
+};
|
|
+
|
|
+struct acpi_resource_dma {
|
|
+ u8 type;
|
|
+ u8 bus_master;
|
|
+ u8 transfer;
|
|
+ u8 channel_count;
|
|
+ u8 channels[1];
|
|
+};
|
|
+
|
|
+struct acpi_resource_start_dependent {
|
|
+ u8 descriptor_length;
|
|
+ u8 compatibility_priority;
|
|
+ u8 performance_robustness;
|
|
+};
|
|
+
|
|
+struct acpi_resource_io {
|
|
+ u8 io_decode;
|
|
+ u8 alignment;
|
|
+ u8 address_length;
|
|
+ u16 minimum;
|
|
+ u16 maximum;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_fixed_io {
|
|
+ u16 address;
|
|
+ u8 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_fixed_dma {
|
|
+ u16 request_lines;
|
|
+ u16 channels;
|
|
+ u8 width;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_vendor {
|
|
+ u16 byte_length;
|
|
+ u8 byte_data[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_vendor_typed {
|
|
+ u16 byte_length;
|
|
+ u8 uuid_subtype;
|
|
+ u8 uuid[16];
|
|
+ u8 byte_data[1];
|
|
+};
|
|
+
|
|
+struct acpi_resource_end_tag {
|
|
+ u8 checksum;
|
|
+};
|
|
+
|
|
+struct acpi_resource_memory24 {
|
|
+ u8 write_protect;
|
|
+ u16 minimum;
|
|
+ u16 maximum;
|
|
+ u16 alignment;
|
|
+ u16 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_memory32 {
|
|
+ u8 write_protect;
|
|
+ u32 minimum;
|
|
+ u32 maximum;
|
|
+ u32 alignment;
|
|
+ u32 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_fixed_memory32 {
|
|
+ u8 write_protect;
|
|
+ u32 address;
|
|
+ u32 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_memory_attribute {
|
|
+ u8 write_protect;
|
|
+ u8 caching;
|
|
+ u8 range_type;
|
|
+ u8 translation;
|
|
+};
|
|
+
|
|
+struct acpi_io_attribute {
|
|
+ u8 range_type;
|
|
+ u8 translation;
|
|
+ u8 translation_type;
|
|
+ u8 reserved1;
|
|
+};
|
|
+
|
|
+union acpi_resource_attribute {
|
|
+ struct acpi_memory_attribute mem;
|
|
+ struct acpi_io_attribute io;
|
|
+ u8 type_specific;
|
|
+};
|
|
+
|
|
+struct acpi_resource_label {
|
|
+ u16 string_length;
|
|
+ char *string_ptr;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_source {
|
|
+ u8 index;
|
|
+ u16 string_length;
|
|
+ char *string_ptr;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_address16_attribute {
|
|
+ u16 granularity;
|
|
+ u16 minimum;
|
|
+ u16 maximum;
|
|
+ u16 translation_offset;
|
|
+ u16 address_length;
|
|
+};
|
|
+
|
|
+struct acpi_address32_attribute {
|
|
+ u32 granularity;
|
|
+ u32 minimum;
|
|
+ u32 maximum;
|
|
+ u32 translation_offset;
|
|
+ u32 address_length;
|
|
+};
|
|
+
|
|
+struct acpi_address64_attribute {
|
|
+ u64 granularity;
|
|
+ u64 minimum;
|
|
+ u64 maximum;
|
|
+ u64 translation_offset;
|
|
+ u64 address_length;
|
|
+};
|
|
+
|
|
+struct acpi_resource_address {
|
|
+ u8 resource_type;
|
|
+ u8 producer_consumer;
|
|
+ u8 decode;
|
|
+ u8 min_address_fixed;
|
|
+ u8 max_address_fixed;
|
|
+ union acpi_resource_attribute info;
|
|
+};
|
|
+
|
|
+struct acpi_resource_address16 {
|
|
+ u8 resource_type;
|
|
+ u8 producer_consumer;
|
|
+ u8 decode;
|
|
+ u8 min_address_fixed;
|
|
+ u8 max_address_fixed;
|
|
+ union acpi_resource_attribute info;
|
|
+ struct acpi_address16_attribute address;
|
|
+ struct acpi_resource_source resource_source;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_address32 {
|
|
+ u8 resource_type;
|
|
+ u8 producer_consumer;
|
|
+ u8 decode;
|
|
+ u8 min_address_fixed;
|
|
+ u8 max_address_fixed;
|
|
+ union acpi_resource_attribute info;
|
|
+ struct acpi_address32_attribute address;
|
|
+ struct acpi_resource_source resource_source;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_address64 {
|
|
+ u8 resource_type;
|
|
+ u8 producer_consumer;
|
|
+ u8 decode;
|
|
+ u8 min_address_fixed;
|
|
+ u8 max_address_fixed;
|
|
+ union acpi_resource_attribute info;
|
|
+ struct acpi_address64_attribute address;
|
|
+ struct acpi_resource_source resource_source;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_extended_address64 {
|
|
+ u8 resource_type;
|
|
+ u8 producer_consumer;
|
|
+ u8 decode;
|
|
+ u8 min_address_fixed;
|
|
+ u8 max_address_fixed;
|
|
+ union acpi_resource_attribute info;
|
|
+ u8 revision_ID;
|
|
+ struct acpi_address64_attribute address;
|
|
+ u64 type_specific;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_extended_irq {
|
|
+ u8 producer_consumer;
|
|
+ u8 triggering;
|
|
+ u8 polarity;
|
|
+ u8 sharable;
|
|
+ u8 wake_capable;
|
|
+ u8 interrupt_count;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ u32 interrupts[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_generic_register {
|
|
+ u8 space_id;
|
|
+ u8 bit_width;
|
|
+ u8 bit_offset;
|
|
+ u8 access_size;
|
|
+ u64 address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_gpio {
|
|
+ u8 revision_id;
|
|
+ u8 connection_type;
|
|
+ u8 producer_consumer;
|
|
+ u8 pin_config;
|
|
+ u8 sharable;
|
|
+ u8 wake_capable;
|
|
+ u8 io_restriction;
|
|
+ u8 triggering;
|
|
+ u8 polarity;
|
|
+ u16 drive_strength;
|
|
+ u16 debounce_timeout;
|
|
+ u16 pin_table_length;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ u16 *pin_table;
|
|
+ u8 *vendor_data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_common_serialbus {
|
|
+ u8 revision_id;
|
|
+ u8 type;
|
|
+ u8 producer_consumer;
|
|
+ u8 slave_mode;
|
|
+ u8 connection_sharing;
|
|
+ u8 type_revision_id;
|
|
+ u16 type_data_length;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ u8 *vendor_data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_i2c_serialbus {
|
|
+ u8 revision_id;
|
|
+ u8 type;
|
|
+ u8 producer_consumer;
|
|
+ u8 slave_mode;
|
|
+ u8 connection_sharing;
|
|
+ u8 type_revision_id;
|
|
+ u16 type_data_length;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ u8 *vendor_data;
|
|
+ u8 access_mode;
|
|
+ u16 slave_address;
|
|
+ u32 connection_speed;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_spi_serialbus {
|
|
+ u8 revision_id;
|
|
+ u8 type;
|
|
+ u8 producer_consumer;
|
|
+ u8 slave_mode;
|
|
+ u8 connection_sharing;
|
|
+ u8 type_revision_id;
|
|
+ u16 type_data_length;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ u8 *vendor_data;
|
|
+ u8 wire_mode;
|
|
+ u8 device_polarity;
|
|
+ u8 data_bit_length;
|
|
+ u8 clock_phase;
|
|
+ u8 clock_polarity;
|
|
+ u16 device_selection;
|
|
+ u32 connection_speed;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_uart_serialbus {
|
|
+ u8 revision_id;
|
|
+ u8 type;
|
|
+ u8 producer_consumer;
|
|
+ u8 slave_mode;
|
|
+ u8 connection_sharing;
|
|
+ u8 type_revision_id;
|
|
+ u16 type_data_length;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ u8 *vendor_data;
|
|
+ u8 endian;
|
|
+ u8 data_bits;
|
|
+ u8 stop_bits;
|
|
+ u8 flow_control;
|
|
+ u8 parity;
|
|
+ u8 lines_enabled;
|
|
+ u16 rx_fifo_size;
|
|
+ u16 tx_fifo_size;
|
|
+ u32 default_baud_rate;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_pin_function {
|
|
+ u8 revision_id;
|
|
+ u8 pin_config;
|
|
+ u8 sharable;
|
|
+ u16 function_number;
|
|
+ u16 pin_table_length;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ u16 *pin_table;
|
|
+ u8 *vendor_data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_pin_config {
|
|
+ u8 revision_id;
|
|
+ u8 producer_consumer;
|
|
+ u8 sharable;
|
|
+ u8 pin_config_type;
|
|
+ u32 pin_config_value;
|
|
+ u16 pin_table_length;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ u16 *pin_table;
|
|
+ u8 *vendor_data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_pin_group {
|
|
+ u8 revision_id;
|
|
+ u8 producer_consumer;
|
|
+ u16 pin_table_length;
|
|
+ u16 vendor_length;
|
|
+ u16 *pin_table;
|
|
+ struct acpi_resource_label resource_label;
|
|
+ u8 *vendor_data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_pin_group_function {
|
|
+ u8 revision_id;
|
|
+ u8 producer_consumer;
|
|
+ u8 sharable;
|
|
+ u16 function_number;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ struct acpi_resource_label resource_source_label;
|
|
+ u8 *vendor_data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_resource_pin_group_config {
|
|
+ u8 revision_id;
|
|
+ u8 producer_consumer;
|
|
+ u8 sharable;
|
|
+ u8 pin_config_type;
|
|
+ u32 pin_config_value;
|
|
+ u16 vendor_length;
|
|
+ struct acpi_resource_source resource_source;
|
|
+ struct acpi_resource_label resource_source_label;
|
|
+ u8 *vendor_data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+union acpi_resource_data {
|
|
+ struct acpi_resource_irq irq;
|
|
+ struct acpi_resource_dma dma;
|
|
+ struct acpi_resource_start_dependent start_dpf;
|
|
+ struct acpi_resource_io io;
|
|
+ struct acpi_resource_fixed_io fixed_io;
|
|
+ struct acpi_resource_fixed_dma fixed_dma;
|
|
+ struct acpi_resource_vendor vendor;
|
|
+ struct acpi_resource_vendor_typed vendor_typed;
|
|
+ struct acpi_resource_end_tag end_tag;
|
|
+ struct acpi_resource_memory24 memory24;
|
|
+ struct acpi_resource_memory32 memory32;
|
|
+ struct acpi_resource_fixed_memory32 fixed_memory32;
|
|
+ struct acpi_resource_address16 address16;
|
|
+ struct acpi_resource_address32 address32;
|
|
+ struct acpi_resource_address64 address64;
|
|
+ struct acpi_resource_extended_address64 ext_address64;
|
|
+ struct acpi_resource_extended_irq extended_irq;
|
|
+ struct acpi_resource_generic_register generic_reg;
|
|
+ struct acpi_resource_gpio gpio;
|
|
+ struct acpi_resource_i2c_serialbus i2c_serial_bus;
|
|
+ struct acpi_resource_spi_serialbus spi_serial_bus;
|
|
+ struct acpi_resource_uart_serialbus uart_serial_bus;
|
|
+ struct acpi_resource_common_serialbus common_serial_bus;
|
|
+ struct acpi_resource_pin_function pin_function;
|
|
+ struct acpi_resource_pin_config pin_config;
|
|
+ struct acpi_resource_pin_group pin_group;
|
|
+ struct acpi_resource_pin_group_function pin_group_function;
|
|
+ struct acpi_resource_pin_group_config pin_group_config;
|
|
+ struct acpi_resource_address address;
|
|
+};
|
|
+
|
|
+struct acpi_resource {
|
|
+ u32 type;
|
|
+ u32 length;
|
|
+ union acpi_resource_data data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_gpio_event {
|
|
+ struct list_head node;
|
|
+ acpi_handle handle;
|
|
+ irq_handler_t handler;
|
|
+ unsigned int pin;
|
|
+ unsigned int irq;
|
|
+ long unsigned int irqflags;
|
|
+ bool irq_is_wake;
|
|
+ bool irq_requested;
|
|
+ struct gpio_desc *desc;
|
|
+};
|
|
+
|
|
+struct acpi_gpio_connection {
|
|
+ struct list_head node;
|
|
+ unsigned int pin;
|
|
+ struct gpio_desc *desc;
|
|
+};
|
|
+
|
|
+struct acpi_gpio_chip {
|
|
+ struct acpi_connection_info conn_info;
|
|
+ struct list_head conns;
|
|
+ struct mutex conn_lock;
|
|
+ struct gpio_chip *chip;
|
|
+ struct list_head events;
|
|
+ struct list_head deferred_req_irqs_list_entry;
|
|
+};
|
|
+
|
|
+struct acpi_gpio_lookup {
|
|
+ struct acpi_gpio_info info;
|
|
+ int index;
|
|
+ int pin_index;
|
|
+ bool active_low;
|
|
+ struct gpio_desc *desc;
|
|
+ int n;
|
|
+};
|
|
+
|
|
+enum pwm_polarity {
|
|
+ PWM_POLARITY_NORMAL = 0,
|
|
+ PWM_POLARITY_INVERSED = 1,
|
|
+};
|
|
+
|
|
+struct pwm_args {
|
|
+ unsigned int period;
|
|
+ enum pwm_polarity polarity;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PWMF_REQUESTED = 1,
|
|
+ PWMF_EXPORTED = 2,
|
|
+};
|
|
+
|
|
+struct pwm_state {
|
|
+ unsigned int period;
|
|
+ unsigned int duty_cycle;
|
|
+ enum pwm_polarity polarity;
|
|
+ bool enabled;
|
|
+};
|
|
+
|
|
+struct pwm_chip;
|
|
+
|
|
+struct pwm_device {
|
|
+ const char *label;
|
|
+ long unsigned int flags;
|
|
+ unsigned int hwpwm;
|
|
+ unsigned int pwm;
|
|
+ struct pwm_chip *chip;
|
|
+ void *chip_data;
|
|
+ struct pwm_args args;
|
|
+ struct pwm_state state;
|
|
+};
|
|
+
|
|
+struct pwm_ops;
|
|
+
|
|
+struct pwm_chip {
|
|
+ struct device *dev;
|
|
+ struct list_head list;
|
|
+ const struct pwm_ops *ops;
|
|
+ int base;
|
|
+ unsigned int npwm;
|
|
+ struct pwm_device *pwms;
|
|
+ struct pwm_device * (*of_xlate)(struct pwm_chip *, const struct of_phandle_args *);
|
|
+ unsigned int of_pwm_n_cells;
|
|
+};
|
|
+
|
|
+struct pwm_capture;
|
|
+
|
|
+struct pwm_ops {
|
|
+ int (*request)(struct pwm_chip *, struct pwm_device *);
|
|
+ void (*free)(struct pwm_chip *, struct pwm_device *);
|
|
+ int (*config)(struct pwm_chip *, struct pwm_device *, int, int);
|
|
+ int (*set_polarity)(struct pwm_chip *, struct pwm_device *, enum pwm_polarity);
|
|
+ int (*capture)(struct pwm_chip *, struct pwm_device *, struct pwm_capture *, long unsigned int);
|
|
+ int (*enable)(struct pwm_chip *, struct pwm_device *);
|
|
+ void (*disable)(struct pwm_chip *, struct pwm_device *);
|
|
+ int (*apply)(struct pwm_chip *, struct pwm_device *, struct pwm_state *);
|
|
+ void (*get_state)(struct pwm_chip *, struct pwm_device *, struct pwm_state *);
|
|
+ void (*dbg_show)(struct pwm_chip *, struct seq_file *);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct pwm_capture {
|
|
+ unsigned int period;
|
|
+ unsigned int duty_cycle;
|
|
+};
|
|
+
|
|
+struct pwm_lookup {
|
|
+ struct list_head list;
|
|
+ const char *provider;
|
|
+ unsigned int index;
|
|
+ const char *dev_id;
|
|
+ const char *con_id;
|
|
+ unsigned int period;
|
|
+ enum pwm_polarity polarity;
|
|
+ const char *module;
|
|
+};
|
|
+
|
|
+struct pwm_export {
|
|
+ struct device child;
|
|
+ struct pwm_device *pwm;
|
|
+ struct mutex lock;
|
|
+};
|
|
+
|
|
+struct pci_sriov {
|
|
+ int pos;
|
|
+ int nres;
|
|
+ u32 cap;
|
|
+ u16 ctrl;
|
|
+ u16 total_VFs;
|
|
+ u16 initial_VFs;
|
|
+ u16 num_VFs;
|
|
+ u16 offset;
|
|
+ u16 stride;
|
|
+ u16 vf_device;
|
|
+ u32 pgsz;
|
|
+ u8 link;
|
|
+ u8 max_VF_buses;
|
|
+ u16 driver_max_VFs;
|
|
+ struct pci_dev *dev;
|
|
+ struct pci_dev *self;
|
|
+ u32 class;
|
|
+ u8 hdr_type;
|
|
+ u16 subsystem_vendor;
|
|
+ u16 subsystem_device;
|
|
+ resource_size_t barsz[6];
|
|
+ bool drivers_autoprobe;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct pci_bus_resource {
|
|
+ struct list_head list;
|
|
+ struct resource *res;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+typedef u64 pci_bus_addr_t;
|
|
+
|
|
+struct pci_bus_region {
|
|
+ pci_bus_addr_t start;
|
|
+ pci_bus_addr_t end;
|
|
+};
|
|
+
|
|
+enum pci_fixup_pass {
|
|
+ pci_fixup_early = 0,
|
|
+ pci_fixup_header = 1,
|
|
+ pci_fixup_final = 2,
|
|
+ pci_fixup_enable = 3,
|
|
+ pci_fixup_resume = 4,
|
|
+ pci_fixup_suspend = 5,
|
|
+ pci_fixup_resume_early = 6,
|
|
+ pci_fixup_suspend_late = 7,
|
|
+};
|
|
+
|
|
+struct hotplug_slot_ops;
|
|
+
|
|
+struct hotplug_slot_info;
|
|
+
|
|
+struct hotplug_slot {
|
|
+ struct hotplug_slot_ops *ops;
|
|
+ struct hotplug_slot_info *info;
|
|
+ void *private;
|
|
+ struct list_head slot_list;
|
|
+ struct pci_slot *pci_slot;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+enum pci_dev_flags {
|
|
+ PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = 1,
|
|
+ PCI_DEV_FLAGS_NO_D3 = 2,
|
|
+ PCI_DEV_FLAGS_ASSIGNED = 4,
|
|
+ PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = 8,
|
|
+ PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = 32,
|
|
+ PCI_DEV_FLAGS_NO_BUS_RESET = 64,
|
|
+ PCI_DEV_FLAGS_NO_PM_RESET = 128,
|
|
+ PCI_DEV_FLAGS_VPD_REF_F0 = 256,
|
|
+ PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = 512,
|
|
+ PCI_DEV_FLAGS_NO_FLR_RESET = 1024,
|
|
+ PCI_DEV_FLAGS_NO_RELAXED_ORDERING = 2048,
|
|
+};
|
|
+
|
|
+enum pci_bus_flags {
|
|
+ PCI_BUS_FLAGS_NO_MSI = 1,
|
|
+ PCI_BUS_FLAGS_NO_MMRBC = 2,
|
|
+ PCI_BUS_FLAGS_NO_AERSID = 4,
|
|
+ PCI_BUS_FLAGS_NO_EXTCFG = 8,
|
|
+};
|
|
+
|
|
+enum pci_bus_speed {
|
|
+ PCI_SPEED_33MHz = 0,
|
|
+ PCI_SPEED_66MHz = 1,
|
|
+ PCI_SPEED_66MHz_PCIX = 2,
|
|
+ PCI_SPEED_100MHz_PCIX = 3,
|
|
+ PCI_SPEED_133MHz_PCIX = 4,
|
|
+ PCI_SPEED_66MHz_PCIX_ECC = 5,
|
|
+ PCI_SPEED_100MHz_PCIX_ECC = 6,
|
|
+ PCI_SPEED_133MHz_PCIX_ECC = 7,
|
|
+ PCI_SPEED_66MHz_PCIX_266 = 9,
|
|
+ PCI_SPEED_100MHz_PCIX_266 = 10,
|
|
+ PCI_SPEED_133MHz_PCIX_266 = 11,
|
|
+ AGP_UNKNOWN = 12,
|
|
+ AGP_1X = 13,
|
|
+ AGP_2X = 14,
|
|
+ AGP_4X = 15,
|
|
+ AGP_8X = 16,
|
|
+ PCI_SPEED_66MHz_PCIX_533 = 17,
|
|
+ PCI_SPEED_100MHz_PCIX_533 = 18,
|
|
+ PCI_SPEED_133MHz_PCIX_533 = 19,
|
|
+ PCIE_SPEED_2_5GT = 20,
|
|
+ PCIE_SPEED_5_0GT = 21,
|
|
+ PCIE_SPEED_8_0GT = 22,
|
|
+ PCIE_SPEED_16_0GT = 23,
|
|
+ PCIE_SPEED_32_0GT = 24,
|
|
+ PCI_SPEED_UNKNOWN = 255,
|
|
+};
|
|
+
|
|
+struct pci_host_bridge {
|
|
+ struct device dev;
|
|
+ struct pci_bus *bus;
|
|
+ struct pci_ops *ops;
|
|
+ void *sysdata;
|
|
+ int busnr;
|
|
+ struct list_head windows;
|
|
+ u8 (*swizzle_irq)(struct pci_dev *, u8 *);
|
|
+ int (*map_irq)(const struct pci_dev *, u8, u8);
|
|
+ void (*release_fn)(struct pci_host_bridge *);
|
|
+ void *release_data;
|
|
+ struct msi_controller *msi;
|
|
+ unsigned int ignore_reset_delay: 1;
|
|
+ unsigned int no_ext_tags: 1;
|
|
+ unsigned int native_aer: 1;
|
|
+ unsigned int native_pcie_hotplug: 1;
|
|
+ unsigned int native_shpc_hotplug: 1;
|
|
+ unsigned int native_pme: 1;
|
|
+ unsigned int native_ltr: 1;
|
|
+ unsigned int preserve_config: 1;
|
|
+ resource_size_t (*align_resource)(struct pci_dev *, const struct resource *, resource_size_t, resource_size_t, resource_size_t);
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long unsigned int private[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PCI_REASSIGN_ALL_RSRC = 1,
|
|
+ PCI_REASSIGN_ALL_BUS = 2,
|
|
+ PCI_PROBE_ONLY = 4,
|
|
+ PCI_CAN_SKIP_ISA_ALIGN = 8,
|
|
+ PCI_ENABLE_PROC_DOMAINS = 16,
|
|
+ PCI_COMPAT_DOMAIN_0 = 32,
|
|
+ PCI_SCAN_ALL_PCIE_DEVS = 64,
|
|
+};
|
|
+
|
|
+struct hotplug_slot_ops {
|
|
+ struct module *owner;
|
|
+ const char *mod_name;
|
|
+ int (*enable_slot)(struct hotplug_slot *);
|
|
+ int (*disable_slot)(struct hotplug_slot *);
|
|
+ int (*set_attention_status)(struct hotplug_slot *, u8);
|
|
+ int (*hardware_test)(struct hotplug_slot *, u32);
|
|
+ int (*get_power_status)(struct hotplug_slot *, u8 *);
|
|
+ int (*get_attention_status)(struct hotplug_slot *, u8 *);
|
|
+ int (*get_latch_status)(struct hotplug_slot *, u8 *);
|
|
+ int (*get_adapter_status)(struct hotplug_slot *, u8 *);
|
|
+ int (*reset_slot)(struct hotplug_slot *, int);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int kabi_reserved7;
|
|
+ long unsigned int kabi_reserved8;
|
|
+};
|
|
+
|
|
+struct hotplug_slot_info {
|
|
+ u8 power_status;
|
|
+ u8 attention_status;
|
|
+ u8 latch_status;
|
|
+ u8 adapter_status;
|
|
+};
|
|
+
|
|
+struct hpp_type0 {
|
|
+ u32 revision;
|
|
+ u8 cache_line_size;
|
|
+ u8 latency_timer;
|
|
+ u8 enable_serr;
|
|
+ u8 enable_perr;
|
|
+};
|
|
+
|
|
+struct hpp_type1 {
|
|
+ u32 revision;
|
|
+ u8 max_mem_read;
|
|
+ u8 avg_max_split;
|
|
+ u16 tot_max_split;
|
|
+};
|
|
+
|
|
+struct hpp_type2 {
|
|
+ u32 revision;
|
|
+ u32 unc_err_mask_and;
|
|
+ u32 unc_err_mask_or;
|
|
+ u32 unc_err_sever_and;
|
|
+ u32 unc_err_sever_or;
|
|
+ u32 cor_err_mask_and;
|
|
+ u32 cor_err_mask_or;
|
|
+ u32 adv_err_cap_and;
|
|
+ u32 adv_err_cap_or;
|
|
+ u16 pci_exp_devctl_and;
|
|
+ u16 pci_exp_devctl_or;
|
|
+ u16 pci_exp_lnkctl_and;
|
|
+ u16 pci_exp_lnkctl_or;
|
|
+ u32 sec_unc_err_sever_and;
|
|
+ u32 sec_unc_err_sever_or;
|
|
+ u32 sec_unc_err_mask_and;
|
|
+ u32 sec_unc_err_mask_or;
|
|
+};
|
|
+
|
|
+struct hotplug_params {
|
|
+ struct hpp_type0 *t0;
|
|
+ struct hpp_type1 *t1;
|
|
+ struct hpp_type2 *t2;
|
|
+ struct hpp_type0 type0_data;
|
|
+ struct hpp_type1 type1_data;
|
|
+ struct hpp_type2 type2_data;
|
|
+};
|
|
+
|
|
+enum pci_bar_type {
|
|
+ pci_bar_unknown = 0,
|
|
+ pci_bar_io = 1,
|
|
+ pci_bar_mem32 = 2,
|
|
+ pci_bar_mem64 = 3,
|
|
+};
|
|
+
|
|
+struct pci_domain_busn_res {
|
|
+ struct list_head list;
|
|
+ struct resource res;
|
|
+ int domain_nr;
|
|
+};
|
|
+
|
|
+struct skip_bus_num {
|
|
+ char module_name[32];
|
|
+ char label[4];
|
|
+ int bus_num;
|
|
+ int dev_num;
|
|
+ int skip;
|
|
+};
|
|
+
|
|
+struct bus_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct bus_type *, char *);
|
|
+ ssize_t (*store)(struct bus_type *, const char *, size_t);
|
|
+};
|
|
+
|
|
+enum pcie_reset_state {
|
|
+ pcie_deassert_reset = 1,
|
|
+ pcie_warm_reset = 2,
|
|
+ pcie_hot_reset = 3,
|
|
+};
|
|
+
|
|
+enum pcie_link_width {
|
|
+ PCIE_LNK_WIDTH_RESRV = 0,
|
|
+ PCIE_LNK_X1 = 1,
|
|
+ PCIE_LNK_X2 = 2,
|
|
+ PCIE_LNK_X4 = 4,
|
|
+ PCIE_LNK_X8 = 8,
|
|
+ PCIE_LNK_X12 = 12,
|
|
+ PCIE_LNK_X16 = 16,
|
|
+ PCIE_LNK_X32 = 32,
|
|
+ PCIE_LNK_WIDTH_UNKNOWN = 255,
|
|
+};
|
|
+
|
|
+struct pci_cap_saved_data {
|
|
+ u16 cap_nr;
|
|
+ bool cap_extended;
|
|
+ unsigned int size;
|
|
+ u32 data[0];
|
|
+};
|
|
+
|
|
+struct pci_cap_saved_state {
|
|
+ struct hlist_node next;
|
|
+ struct pci_cap_saved_data cap;
|
|
+};
|
|
+
|
|
+typedef int (*arch_set_vga_state_t)(struct pci_dev *, bool, unsigned int, u32);
|
|
+
|
|
+struct pci_platform_pm_ops {
|
|
+ bool (*is_manageable)(struct pci_dev *);
|
|
+ int (*set_state)(struct pci_dev *, pci_power_t);
|
|
+ pci_power_t (*get_state)(struct pci_dev *);
|
|
+ void (*refresh_state)(struct pci_dev *);
|
|
+ pci_power_t (*choose_state)(struct pci_dev *);
|
|
+ int (*set_wakeup)(struct pci_dev *, bool);
|
|
+ bool (*need_resume)(struct pci_dev *);
|
|
+};
|
|
+
|
|
+struct pci_pme_device {
|
|
+ struct list_head list;
|
|
+ struct pci_dev *dev;
|
|
+};
|
|
+
|
|
+struct pci_saved_state {
|
|
+ u32 config_space[16];
|
|
+ struct pci_cap_saved_data cap[0];
|
|
+};
|
|
+
|
|
+struct pci_devres {
|
|
+ unsigned int enabled: 1;
|
|
+ unsigned int pinned: 1;
|
|
+ unsigned int orig_intx: 1;
|
|
+ unsigned int restore_intx: 1;
|
|
+ unsigned int mwi: 1;
|
|
+ u32 region_mask;
|
|
+};
|
|
+
|
|
+struct driver_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct device_driver *, char *);
|
|
+ ssize_t (*store)(struct device_driver *, const char *, size_t);
|
|
+};
|
|
+
|
|
+enum pci_ers_result {
|
|
+ PCI_ERS_RESULT_NONE = 1,
|
|
+ PCI_ERS_RESULT_CAN_RECOVER = 2,
|
|
+ PCI_ERS_RESULT_NEED_RESET = 3,
|
|
+ PCI_ERS_RESULT_DISCONNECT = 4,
|
|
+ PCI_ERS_RESULT_RECOVERED = 5,
|
|
+ PCI_ERS_RESULT_NO_AER_DRIVER = 6,
|
|
+};
|
|
+
|
|
+enum dev_dma_attr {
|
|
+ DEV_DMA_NOT_SUPPORTED = 0,
|
|
+ DEV_DMA_NON_COHERENT = 1,
|
|
+ DEV_DMA_COHERENT = 2,
|
|
+};
|
|
+
|
|
+struct pcie_device {
|
|
+ int irq;
|
|
+ struct pci_dev *port;
|
|
+ u32 service;
|
|
+ void *priv_data;
|
|
+ struct device device;
|
|
+};
|
|
+
|
|
+struct pcie_port_service_driver {
|
|
+ const char *name;
|
|
+ int (*probe)(struct pcie_device *);
|
|
+ void (*remove)(struct pcie_device *);
|
|
+ int (*suspend)(struct pcie_device *);
|
|
+ int (*resume_noirq)(struct pcie_device *);
|
|
+ int (*resume)(struct pcie_device *);
|
|
+ void (*error_resume)(struct pci_dev *);
|
|
+ pci_ers_result_t (*reset_link)(struct pci_dev *);
|
|
+ int port_type;
|
|
+ u32 service;
|
|
+ struct device_driver driver;
|
|
+};
|
|
+
|
|
+struct pci_dynid {
|
|
+ struct list_head node;
|
|
+ struct pci_device_id id;
|
|
+};
|
|
+
|
|
+struct drv_dev_and_id {
|
|
+ struct pci_driver *drv;
|
|
+ struct pci_dev *dev;
|
|
+ const struct pci_device_id *id;
|
|
+};
|
|
+
|
|
+enum pci_mmap_state {
|
|
+ pci_mmap_io = 0,
|
|
+ pci_mmap_mem = 1,
|
|
+};
|
|
+
|
|
+enum pci_mmap_api {
|
|
+ PCI_MMAP_SYSFS = 0,
|
|
+ PCI_MMAP_PROCFS = 1,
|
|
+};
|
|
+
|
|
+enum pci_lost_interrupt_reason {
|
|
+ PCI_LOST_IRQ_NO_INFORMATION = 0,
|
|
+ PCI_LOST_IRQ_DISABLE_MSI = 1,
|
|
+ PCI_LOST_IRQ_DISABLE_MSIX = 2,
|
|
+ PCI_LOST_IRQ_DISABLE_ACPI = 3,
|
|
+};
|
|
+
|
|
+struct pci_vpd_ops;
|
|
+
|
|
+struct pci_vpd {
|
|
+ const struct pci_vpd_ops *ops;
|
|
+ struct bin_attribute *attr;
|
|
+ struct mutex lock;
|
|
+ unsigned int len;
|
|
+ u16 flag;
|
|
+ u8 cap;
|
|
+ unsigned int busy: 1;
|
|
+ unsigned int valid: 1;
|
|
+};
|
|
+
|
|
+struct pci_vpd_ops {
|
|
+ ssize_t (*read)(struct pci_dev *, loff_t, size_t, void *);
|
|
+ ssize_t (*write)(struct pci_dev *, loff_t, size_t, const void *);
|
|
+ int (*set_size)(struct pci_dev *, size_t);
|
|
+};
|
|
+
|
|
+struct pci_dev_resource {
|
|
+ struct list_head list;
|
|
+ struct resource *res;
|
|
+ struct pci_dev *dev;
|
|
+ resource_size_t start;
|
|
+ resource_size_t end;
|
|
+ resource_size_t add_size;
|
|
+ resource_size_t min_align;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+enum release_type {
|
|
+ leaf_only = 0,
|
|
+ whole_subtree = 1,
|
|
+};
|
|
+
|
|
+enum enable_type {
|
|
+ undefined = -1,
|
|
+ user_disabled = 0,
|
|
+ auto_disabled = 1,
|
|
+ user_enabled = 2,
|
|
+ auto_enabled = 3,
|
|
+};
|
|
+
|
|
+struct pci_filp_private {
|
|
+ enum pci_mmap_state mmap_state;
|
|
+ int write_combine;
|
|
+};
|
|
+
|
|
+struct pci_slot_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct pci_slot *, char *);
|
|
+ ssize_t (*store)(struct pci_slot *, const char *, size_t);
|
|
+};
|
|
+
|
|
+enum pci_irq_reroute_variant {
|
|
+ INTEL_IRQ_REROUTE_VARIANT = 1,
|
|
+ MAX_IRQ_REROUTE_VARIANTS = 3,
|
|
+};
|
|
+
|
|
+struct pci_fixup {
|
|
+ u16 vendor;
|
|
+ u16 device;
|
|
+ u32 class;
|
|
+ unsigned int class_shift;
|
|
+ int hook_offset;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NVME_REG_CAP = 0,
|
|
+ NVME_REG_VS = 8,
|
|
+ NVME_REG_INTMS = 12,
|
|
+ NVME_REG_INTMC = 16,
|
|
+ NVME_REG_CC = 20,
|
|
+ NVME_REG_CSTS = 28,
|
|
+ NVME_REG_NSSR = 32,
|
|
+ NVME_REG_AQA = 36,
|
|
+ NVME_REG_ASQ = 40,
|
|
+ NVME_REG_ACQ = 48,
|
|
+ NVME_REG_CMBLOC = 56,
|
|
+ NVME_REG_CMBSZ = 60,
|
|
+ NVME_REG_DBS = 4096,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NVME_CC_ENABLE = 1,
|
|
+ NVME_CC_CSS_NVM = 0,
|
|
+ NVME_CC_EN_SHIFT = 0,
|
|
+ NVME_CC_CSS_SHIFT = 4,
|
|
+ NVME_CC_MPS_SHIFT = 7,
|
|
+ NVME_CC_AMS_SHIFT = 11,
|
|
+ NVME_CC_SHN_SHIFT = 14,
|
|
+ NVME_CC_IOSQES_SHIFT = 16,
|
|
+ NVME_CC_IOCQES_SHIFT = 20,
|
|
+ NVME_CC_AMS_RR = 0,
|
|
+ NVME_CC_AMS_WRRU = 2048,
|
|
+ NVME_CC_AMS_VS = 14336,
|
|
+ NVME_CC_SHN_NONE = 0,
|
|
+ NVME_CC_SHN_NORMAL = 16384,
|
|
+ NVME_CC_SHN_ABRUPT = 32768,
|
|
+ NVME_CC_SHN_MASK = 49152,
|
|
+ NVME_CC_IOSQES = 393216,
|
|
+ NVME_CC_IOCQES = 4194304,
|
|
+ NVME_CSTS_RDY = 1,
|
|
+ NVME_CSTS_CFS = 2,
|
|
+ NVME_CSTS_NSSRO = 16,
|
|
+ NVME_CSTS_PP = 32,
|
|
+ NVME_CSTS_SHST_NORMAL = 0,
|
|
+ NVME_CSTS_SHST_OCCUR = 4,
|
|
+ NVME_CSTS_SHST_CMPLT = 8,
|
|
+ NVME_CSTS_SHST_MASK = 12,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SWITCHTEC_GAS_MRPC_OFFSET = 0,
|
|
+ SWITCHTEC_GAS_TOP_CFG_OFFSET = 4096,
|
|
+ SWITCHTEC_GAS_SW_EVENT_OFFSET = 6144,
|
|
+ SWITCHTEC_GAS_SYS_INFO_OFFSET = 8192,
|
|
+ SWITCHTEC_GAS_FLASH_INFO_OFFSET = 8704,
|
|
+ SWITCHTEC_GAS_PART_CFG_OFFSET = 16384,
|
|
+ SWITCHTEC_GAS_NTB_OFFSET = 65536,
|
|
+ SWITCHTEC_GAS_PFF_CSR_OFFSET = 1261568,
|
|
+};
|
|
+
|
|
+struct sys_info_regs {
|
|
+ u32 device_id;
|
|
+ u32 device_version;
|
|
+ u32 firmware_version;
|
|
+ u32 reserved1;
|
|
+ u32 vendor_table_revision;
|
|
+ u32 table_format_version;
|
|
+ u32 partition_id;
|
|
+ u32 cfg_file_fmt_version;
|
|
+ u16 cfg_running;
|
|
+ u16 img_running;
|
|
+ u32 reserved2[57];
|
|
+ char vendor_id[8];
|
|
+ char product_id[16];
|
|
+ char product_revision[4];
|
|
+ char component_vendor[8];
|
|
+ u16 component_id;
|
|
+ u8 component_revision;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum {
|
|
+ SWITCHTEC_NTB_REG_INFO_OFFSET = 0,
|
|
+ SWITCHTEC_NTB_REG_CTRL_OFFSET = 16384,
|
|
+ SWITCHTEC_NTB_REG_DBMSG_OFFSET = 409600,
|
|
+};
|
|
+
|
|
+struct nt_partition_info {
|
|
+ u32 xlink_enabled;
|
|
+ u32 target_part_low;
|
|
+ u32 target_part_high;
|
|
+ u32 reserved;
|
|
+};
|
|
+
|
|
+struct ntb_info_regs {
|
|
+ u8 partition_count;
|
|
+ u8 partition_id;
|
|
+ u16 reserved1;
|
|
+ u64 ep_map;
|
|
+ u16 requester_id;
|
|
+ u16 reserved2;
|
|
+ u32 reserved3[4];
|
|
+ struct nt_partition_info ntp_info[48];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct ntb_ctrl_regs {
|
|
+ u32 partition_status;
|
|
+ u32 partition_op;
|
|
+ u32 partition_ctrl;
|
|
+ u32 bar_setup;
|
|
+ u32 bar_error;
|
|
+ u16 lut_table_entries;
|
|
+ u16 lut_table_offset;
|
|
+ u32 lut_error;
|
|
+ u16 req_id_table_size;
|
|
+ u16 req_id_table_offset;
|
|
+ u32 req_id_error;
|
|
+ u32 reserved1[7];
|
|
+ struct {
|
|
+ u32 ctl;
|
|
+ u32 win_size;
|
|
+ u64 xlate_addr;
|
|
+ } bar_entry[6];
|
|
+ u32 reserved2[216];
|
|
+ u32 req_id_table[256];
|
|
+ u32 reserved3[512];
|
|
+ u64 lut_entry[512];
|
|
+};
|
|
+
|
|
+struct pci_dev_reset_methods {
|
|
+ u16 vendor;
|
|
+ u16 device;
|
|
+ int (*reset)(struct pci_dev *, int);
|
|
+};
|
|
+
|
|
+struct pci_dev_acs_enabled {
|
|
+ u16 vendor;
|
|
+ u16 device;
|
|
+ int (*acs_enabled)(struct pci_dev *, u16);
|
|
+};
|
|
+
|
|
+struct pci_dev_acs_ops {
|
|
+ u16 vendor;
|
|
+ u16 device;
|
|
+ int (*enable_acs)(struct pci_dev *);
|
|
+ int (*disable_acs_redir)(struct pci_dev *);
|
|
+};
|
|
+
|
|
+struct portdrv_service_data {
|
|
+ struct pcie_port_service_driver *drv;
|
|
+ struct device *dev;
|
|
+ u32 service;
|
|
+};
|
|
+
|
|
+typedef int (*pcie_pm_callback_t)(struct pcie_device *);
|
|
+
|
|
+struct aspm_latency {
|
|
+ u32 l0s;
|
|
+ u32 l1;
|
|
+};
|
|
+
|
|
+struct pcie_link_state {
|
|
+ struct pci_dev *pdev;
|
|
+ struct pci_dev *downstream;
|
|
+ struct pcie_link_state *root;
|
|
+ struct pcie_link_state *parent;
|
|
+ struct list_head sibling;
|
|
+ struct list_head children;
|
|
+ struct list_head link;
|
|
+ u32 aspm_support: 7;
|
|
+ u32 aspm_enabled: 7;
|
|
+ u32 aspm_capable: 7;
|
|
+ u32 aspm_default: 7;
|
|
+ char: 4;
|
|
+ u32 aspm_disable: 7;
|
|
+ u32 clkpm_capable: 1;
|
|
+ u32 clkpm_enabled: 1;
|
|
+ u32 clkpm_default: 1;
|
|
+ struct aspm_latency latency_up;
|
|
+ struct aspm_latency latency_dw;
|
|
+ struct aspm_latency acceptable[8];
|
|
+ struct {
|
|
+ u32 up_cap_ptr;
|
|
+ u32 dw_cap_ptr;
|
|
+ u32 ctl1;
|
|
+ u32 ctl2;
|
|
+ } l1ss;
|
|
+};
|
|
+
|
|
+struct aspm_register_info {
|
|
+ u32 support: 2;
|
|
+ u32 enabled: 2;
|
|
+ u32 latency_encoding_l0s;
|
|
+ u32 latency_encoding_l1;
|
|
+ u32 l1ss_cap_ptr;
|
|
+ u32 l1ss_cap;
|
|
+ u32 l1ss_ctl1;
|
|
+ u32 l1ss_ctl2;
|
|
+};
|
|
+
|
|
+struct aer_stats {
|
|
+ u64 dev_cor_errs[16];
|
|
+ u64 dev_fatal_errs[26];
|
|
+ u64 dev_nonfatal_errs[26];
|
|
+ u64 dev_total_cor_errs;
|
|
+ u64 dev_total_fatal_errs;
|
|
+ u64 dev_total_nonfatal_errs;
|
|
+ u64 rootport_total_cor_errs;
|
|
+ u64 rootport_total_fatal_errs;
|
|
+ u64 rootport_total_nonfatal_errs;
|
|
+};
|
|
+
|
|
+enum acpi_hest_types {
|
|
+ ACPI_HEST_TYPE_IA32_CHECK = 0,
|
|
+ ACPI_HEST_TYPE_IA32_CORRECTED_CHECK = 1,
|
|
+ ACPI_HEST_TYPE_IA32_NMI = 2,
|
|
+ ACPI_HEST_TYPE_NOT_USED3 = 3,
|
|
+ ACPI_HEST_TYPE_NOT_USED4 = 4,
|
|
+ ACPI_HEST_TYPE_NOT_USED5 = 5,
|
|
+ ACPI_HEST_TYPE_AER_ROOT_PORT = 6,
|
|
+ ACPI_HEST_TYPE_AER_ENDPOINT = 7,
|
|
+ ACPI_HEST_TYPE_AER_BRIDGE = 8,
|
|
+ ACPI_HEST_TYPE_GENERIC_ERROR = 9,
|
|
+ ACPI_HEST_TYPE_GENERIC_ERROR_V2 = 10,
|
|
+ ACPI_HEST_TYPE_IA32_DEFERRED_CHECK = 11,
|
|
+ ACPI_HEST_TYPE_RESERVED = 12,
|
|
+};
|
|
+
|
|
+struct acpi_hest_aer_common {
|
|
+ u16 reserved1;
|
|
+ u8 flags;
|
|
+ u8 enabled;
|
|
+ u32 records_to_preallocate;
|
|
+ u32 max_sections_per_record;
|
|
+ u32 bus;
|
|
+ u16 device;
|
|
+ u16 function;
|
|
+ u16 device_control;
|
|
+ u16 reserved2;
|
|
+ u32 uncorrectable_mask;
|
|
+ u32 uncorrectable_severity;
|
|
+ u32 correctable_mask;
|
|
+ u32 advanced_capabilities;
|
|
+};
|
|
+
|
|
+struct aer_header_log_regs {
|
|
+ unsigned int dw0;
|
|
+ unsigned int dw1;
|
|
+ unsigned int dw2;
|
|
+ unsigned int dw3;
|
|
+};
|
|
+
|
|
+struct aer_capability_regs {
|
|
+ u32 header;
|
|
+ u32 uncor_status;
|
|
+ u32 uncor_mask;
|
|
+ u32 uncor_severity;
|
|
+ u32 cor_status;
|
|
+ u32 cor_mask;
|
|
+ u32 cap_control;
|
|
+ struct aer_header_log_regs header_log;
|
|
+ u32 root_command;
|
|
+ u32 root_status;
|
|
+ u16 cor_err_source;
|
|
+ u16 uncor_err_source;
|
|
+};
|
|
+
|
|
+struct aer_err_info {
|
|
+ struct pci_dev *dev[5];
|
|
+ int error_dev_num;
|
|
+ unsigned int id: 16;
|
|
+ unsigned int severity: 2;
|
|
+ unsigned int __pad1: 5;
|
|
+ unsigned int multi_error_valid: 1;
|
|
+ unsigned int first_error: 5;
|
|
+ unsigned int __pad2: 2;
|
|
+ unsigned int tlp_header_valid: 1;
|
|
+ unsigned int status;
|
|
+ unsigned int mask;
|
|
+ struct aer_header_log_regs tlp;
|
|
+};
|
|
+
|
|
+struct aer_err_source {
|
|
+ unsigned int status;
|
|
+ unsigned int id;
|
|
+};
|
|
+
|
|
+struct aer_rpc {
|
|
+ struct pci_dev *rpd;
|
|
+ struct {
|
|
+ union {
|
|
+ struct __kfifo kfifo;
|
|
+ struct aer_err_source *type;
|
|
+ const struct aer_err_source *const_type;
|
|
+ char (*rectype)[0];
|
|
+ struct aer_err_source *ptr;
|
|
+ const struct aer_err_source *ptr_const;
|
|
+ };
|
|
+ struct aer_err_source buf[128];
|
|
+ } aer_fifo;
|
|
+};
|
|
+
|
|
+struct aer_hest_parse_info {
|
|
+ struct pci_dev *pci_dev;
|
|
+ int firmware_first;
|
|
+};
|
|
+
|
|
+struct aer_recover_entry {
|
|
+ u8 bus;
|
|
+ u8 devfn;
|
|
+ u16 domain;
|
|
+ int severity;
|
|
+ struct aer_capability_regs *regs;
|
|
+};
|
|
+
|
|
+struct pcie_pme_service_data {
|
|
+ spinlock_t lock;
|
|
+ struct pcie_device *srv;
|
|
+ struct work_struct work;
|
|
+ bool noirq;
|
|
+};
|
|
+
|
|
+struct dpc_dev {
|
|
+ struct pcie_device *dev;
|
|
+ u16 cap_pos;
|
|
+ bool rp_extensions;
|
|
+ u8 rp_log_size;
|
|
+};
|
|
+
|
|
+struct acpi_pci_root {
|
|
+ struct acpi_device *device;
|
|
+ struct pci_bus *bus;
|
|
+ u16 segment;
|
|
+ struct resource secondary;
|
|
+ u32 osc_support_set;
|
|
+ u32 osc_control_set;
|
|
+ phys_addr_t mcfg_addr;
|
|
+};
|
|
+
|
|
+struct controller;
|
|
+
|
|
+struct slot {
|
|
+ u8 state;
|
|
+ struct controller *ctrl;
|
|
+ struct hotplug_slot *hotplug_slot;
|
|
+ struct delayed_work work;
|
|
+ struct mutex lock;
|
|
+};
|
|
+
|
|
+struct controller {
|
|
+ struct mutex ctrl_lock;
|
|
+ struct pcie_device *pcie;
|
|
+ unsigned int inband_presence_disabled: 1;
|
|
+ struct rw_semaphore reset_lock;
|
|
+ struct slot *slot;
|
|
+ wait_queue_head_t queue;
|
|
+ u32 slot_cap;
|
|
+ u16 slot_ctrl;
|
|
+ struct task_struct *poll_thread;
|
|
+ long unsigned int cmd_started;
|
|
+ unsigned int cmd_busy: 1;
|
|
+ unsigned int link_active_reporting: 1;
|
|
+ unsigned int notification_enabled: 1;
|
|
+ unsigned int power_fault_detected;
|
|
+ atomic_t pending_events;
|
|
+ int request_result;
|
|
+ wait_queue_head_t requester;
|
|
+};
|
|
+
|
|
+struct controller___2;
|
|
+
|
|
+struct hpc_ops;
|
|
+
|
|
+struct slot___2 {
|
|
+ u8 bus;
|
|
+ u8 device;
|
|
+ u16 status;
|
|
+ u32 number;
|
|
+ u8 is_a_board;
|
|
+ u8 state;
|
|
+ u8 presence_save;
|
|
+ u8 pwr_save;
|
|
+ struct controller___2 *ctrl;
|
|
+ const struct hpc_ops *hpc_ops;
|
|
+ struct hotplug_slot *hotplug_slot;
|
|
+ struct list_head slot_list;
|
|
+ struct delayed_work work;
|
|
+ struct mutex lock;
|
|
+ struct workqueue_struct *wq;
|
|
+ u8 hp_slot;
|
|
+};
|
|
+
|
|
+struct controller___2 {
|
|
+ struct mutex crit_sect;
|
|
+ struct mutex cmd_lock;
|
|
+ int num_slots;
|
|
+ int slot_num_inc;
|
|
+ struct pci_dev *pci_dev;
|
|
+ struct list_head slot_list;
|
|
+ const struct hpc_ops *hpc_ops;
|
|
+ wait_queue_head_t queue;
|
|
+ u8 slot_device_offset;
|
|
+ u32 pcix_misc2_reg;
|
|
+ u32 first_slot;
|
|
+ u32 cap_offset;
|
|
+ long unsigned int mmio_base;
|
|
+ long unsigned int mmio_size;
|
|
+ void *creg;
|
|
+ struct timer_list poll_timer;
|
|
+};
|
|
+
|
|
+struct hpc_ops {
|
|
+ int (*power_on_slot)(struct slot___2 *);
|
|
+ int (*slot_enable)(struct slot___2 *);
|
|
+ int (*slot_disable)(struct slot___2 *);
|
|
+ int (*set_bus_speed_mode)(struct slot___2 *, enum pci_bus_speed);
|
|
+ int (*get_power_status)(struct slot___2 *, u8 *);
|
|
+ int (*get_attention_status)(struct slot___2 *, u8 *);
|
|
+ int (*set_attention_status)(struct slot___2 *, u8);
|
|
+ int (*get_latch_status)(struct slot___2 *, u8 *);
|
|
+ int (*get_adapter_status)(struct slot___2 *, u8 *);
|
|
+ int (*get_adapter_speed)(struct slot___2 *, enum pci_bus_speed *);
|
|
+ int (*get_mode1_ECC_cap)(struct slot___2 *, u8 *);
|
|
+ int (*get_prog_int)(struct slot___2 *, u8 *);
|
|
+ int (*query_power_fault)(struct slot___2 *);
|
|
+ void (*green_led_on)(struct slot___2 *);
|
|
+ void (*green_led_off)(struct slot___2 *);
|
|
+ void (*green_led_blink)(struct slot___2 *);
|
|
+ void (*release_ctlr)(struct controller___2 *);
|
|
+ int (*check_cmd_status)(struct controller___2 *);
|
|
+};
|
|
+
|
|
+struct event_info {
|
|
+ u32 event_type;
|
|
+ struct slot___2 *p_slot;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+struct pushbutton_work_info {
|
|
+ struct slot___2 *p_slot;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+enum ctrl_offsets {
|
|
+ BASE_OFFSET = 0,
|
|
+ SLOT_AVAIL1 = 4,
|
|
+ SLOT_AVAIL2 = 8,
|
|
+ SLOT_CONFIG = 12,
|
|
+ SEC_BUS_CONFIG = 16,
|
|
+ MSI_CTRL = 18,
|
|
+ PROG_INTERFACE = 19,
|
|
+ CMD = 20,
|
|
+ CMD_STATUS = 22,
|
|
+ INTR_LOC = 24,
|
|
+ SERR_LOC = 28,
|
|
+ SERR_INTR_ENABLE = 32,
|
|
+ SLOT1 = 36,
|
|
+};
|
|
+
|
|
+struct acpiphp_slot;
|
|
+
|
|
+struct slot___3 {
|
|
+ struct hotplug_slot *hotplug_slot;
|
|
+ struct acpiphp_slot *acpi_slot;
|
|
+ struct hotplug_slot_info info;
|
|
+ unsigned int sun;
|
|
+};
|
|
+
|
|
+struct acpiphp_slot {
|
|
+ struct list_head node;
|
|
+ struct pci_bus *bus;
|
|
+ struct list_head funcs;
|
|
+ struct slot___3 *slot;
|
|
+ u8 device;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct acpiphp_attention_info {
|
|
+ int (*set_attn)(struct hotplug_slot *, u8);
|
|
+ int (*get_attn)(struct hotplug_slot *, u8 *);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct acpi_object_list {
|
|
+ u32 count;
|
|
+ union acpi_object *pointer;
|
|
+};
|
|
+
|
|
+struct acpiphp_context;
|
|
+
|
|
+struct acpiphp_bridge {
|
|
+ struct list_head list;
|
|
+ struct list_head slots;
|
|
+ struct kref ref;
|
|
+ struct acpiphp_context *context;
|
|
+ int nr_slots;
|
|
+ struct pci_bus *pci_bus;
|
|
+ struct pci_dev *pci_dev;
|
|
+ bool is_going_away;
|
|
+};
|
|
+
|
|
+struct acpiphp_func {
|
|
+ struct acpiphp_bridge *parent;
|
|
+ struct acpiphp_slot *slot;
|
|
+ struct list_head sibling;
|
|
+ u8 function;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct acpiphp_context {
|
|
+ struct acpi_hotplug_context hp;
|
|
+ struct acpiphp_func func;
|
|
+ struct acpiphp_bridge *bridge;
|
|
+ unsigned int refcount;
|
|
+};
|
|
+
|
|
+struct acpiphp_root_context {
|
|
+ struct acpi_hotplug_context hp;
|
|
+ struct acpiphp_bridge *root_bridge;
|
|
+};
|
|
+
|
|
+struct msix_entry {
|
|
+ u32 vector;
|
|
+ u16 entry;
|
|
+};
|
|
+
|
|
+struct acpi_bus_type {
|
|
+ struct list_head list;
|
|
+ const char *name;
|
|
+ bool (*match)(struct device *);
|
|
+ struct acpi_device * (*find_companion)(struct device *);
|
|
+ void (*setup)(struct device *);
|
|
+ void (*cleanup)(struct device *);
|
|
+};
|
|
+
|
|
+enum pm_qos_flags_status {
|
|
+ PM_QOS_FLAGS_UNDEFINED = -1,
|
|
+ PM_QOS_FLAGS_NONE = 0,
|
|
+ PM_QOS_FLAGS_SOME = 1,
|
|
+ PM_QOS_FLAGS_ALL = 2,
|
|
+};
|
|
+
|
|
+enum dmi_device_type {
|
|
+ DMI_DEV_TYPE_ANY = 0,
|
|
+ DMI_DEV_TYPE_OTHER = 1,
|
|
+ DMI_DEV_TYPE_UNKNOWN = 2,
|
|
+ DMI_DEV_TYPE_VIDEO = 3,
|
|
+ DMI_DEV_TYPE_SCSI = 4,
|
|
+ DMI_DEV_TYPE_ETHERNET = 5,
|
|
+ DMI_DEV_TYPE_TOKENRING = 6,
|
|
+ DMI_DEV_TYPE_SOUND = 7,
|
|
+ DMI_DEV_TYPE_PATA = 8,
|
|
+ DMI_DEV_TYPE_SATA = 9,
|
|
+ DMI_DEV_TYPE_SAS = 10,
|
|
+ DMI_DEV_TYPE_IPMI = -1,
|
|
+ DMI_DEV_TYPE_OEM_STRING = -2,
|
|
+ DMI_DEV_TYPE_DEV_ONBOARD = -3,
|
|
+ DMI_DEV_TYPE_DEV_SLOT = -4,
|
|
+};
|
|
+
|
|
+struct dmi_device {
|
|
+ struct list_head list;
|
|
+ int type;
|
|
+ const char *name;
|
|
+ void *device_data;
|
|
+};
|
|
+
|
|
+struct dmi_dev_onboard {
|
|
+ struct dmi_device dev;
|
|
+ int instance;
|
|
+ int segment;
|
|
+ int bus;
|
|
+ int devfn;
|
|
+};
|
|
+
|
|
+enum smbios_attr_enum {
|
|
+ SMBIOS_ATTR_NONE = 0,
|
|
+ SMBIOS_ATTR_LABEL_SHOW = 1,
|
|
+ SMBIOS_ATTR_INSTANCE_SHOW = 2,
|
|
+};
|
|
+
|
|
+enum acpi_attr_enum {
|
|
+ ACPI_ATTR_LABEL_SHOW = 0,
|
|
+ ACPI_ATTR_INDEX_SHOW = 1,
|
|
+};
|
|
+
|
|
+struct dma_domain {
|
|
+ struct list_head node;
|
|
+ const struct dma_map_ops *dma_ops;
|
|
+ int domain_nr;
|
|
+};
|
|
+
|
|
+enum vmd_features {
|
|
+ VMD_FEAT_HAS_MEMBAR_SHADOW = 1,
|
|
+ VMD_FEAT_HAS_BUS_RESTRICTIONS = 2,
|
|
+};
|
|
+
|
|
+struct vmd_irq_list;
|
|
+
|
|
+struct vmd_irq {
|
|
+ struct list_head node;
|
|
+ struct vmd_irq_list *irq;
|
|
+ bool enabled;
|
|
+ unsigned int virq;
|
|
+};
|
|
+
|
|
+struct vmd_irq_list {
|
|
+ struct list_head irq_list;
|
|
+ struct srcu_struct srcu;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct vmd_dev {
|
|
+ struct pci_dev *dev;
|
|
+ spinlock_t cfg_lock;
|
|
+ char *cfgbar;
|
|
+ int msix_count;
|
|
+ struct vmd_irq_list *irqs;
|
|
+ struct pci_sysdata sysdata;
|
|
+ struct resource resources[3];
|
|
+ struct irq_domain *irq_domain;
|
|
+ struct pci_bus *bus;
|
|
+ u8 busn_start;
|
|
+ struct dma_map_ops dma_ops;
|
|
+ struct dma_domain dma_domain;
|
|
+};
|
|
+
|
|
+enum hdmi_infoframe_type {
|
|
+ HDMI_INFOFRAME_TYPE_VENDOR = 129,
|
|
+ HDMI_INFOFRAME_TYPE_AVI = 130,
|
|
+ HDMI_INFOFRAME_TYPE_SPD = 131,
|
|
+ HDMI_INFOFRAME_TYPE_AUDIO = 132,
|
|
+};
|
|
+
|
|
+struct hdmi_any_infoframe {
|
|
+ enum hdmi_infoframe_type type;
|
|
+ unsigned char version;
|
|
+ unsigned char length;
|
|
+};
|
|
+
|
|
+enum hdmi_colorspace {
|
|
+ HDMI_COLORSPACE_RGB = 0,
|
|
+ HDMI_COLORSPACE_YUV422 = 1,
|
|
+ HDMI_COLORSPACE_YUV444 = 2,
|
|
+ HDMI_COLORSPACE_YUV420 = 3,
|
|
+ HDMI_COLORSPACE_RESERVED4 = 4,
|
|
+ HDMI_COLORSPACE_RESERVED5 = 5,
|
|
+ HDMI_COLORSPACE_RESERVED6 = 6,
|
|
+ HDMI_COLORSPACE_IDO_DEFINED = 7,
|
|
+};
|
|
+
|
|
+enum hdmi_scan_mode {
|
|
+ HDMI_SCAN_MODE_NONE = 0,
|
|
+ HDMI_SCAN_MODE_OVERSCAN = 1,
|
|
+ HDMI_SCAN_MODE_UNDERSCAN = 2,
|
|
+ HDMI_SCAN_MODE_RESERVED = 3,
|
|
+};
|
|
+
|
|
+enum hdmi_colorimetry {
|
|
+ HDMI_COLORIMETRY_NONE = 0,
|
|
+ HDMI_COLORIMETRY_ITU_601 = 1,
|
|
+ HDMI_COLORIMETRY_ITU_709 = 2,
|
|
+ HDMI_COLORIMETRY_EXTENDED = 3,
|
|
+};
|
|
+
|
|
+enum hdmi_picture_aspect {
|
|
+ HDMI_PICTURE_ASPECT_NONE = 0,
|
|
+ HDMI_PICTURE_ASPECT_4_3 = 1,
|
|
+ HDMI_PICTURE_ASPECT_16_9 = 2,
|
|
+ HDMI_PICTURE_ASPECT_64_27 = 3,
|
|
+ HDMI_PICTURE_ASPECT_256_135 = 4,
|
|
+ HDMI_PICTURE_ASPECT_RESERVED = 5,
|
|
+};
|
|
+
|
|
+enum hdmi_active_aspect {
|
|
+ HDMI_ACTIVE_ASPECT_16_9_TOP = 2,
|
|
+ HDMI_ACTIVE_ASPECT_14_9_TOP = 3,
|
|
+ HDMI_ACTIVE_ASPECT_16_9_CENTER = 4,
|
|
+ HDMI_ACTIVE_ASPECT_PICTURE = 8,
|
|
+ HDMI_ACTIVE_ASPECT_4_3 = 9,
|
|
+ HDMI_ACTIVE_ASPECT_16_9 = 10,
|
|
+ HDMI_ACTIVE_ASPECT_14_9 = 11,
|
|
+ HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13,
|
|
+ HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14,
|
|
+ HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15,
|
|
+};
|
|
+
|
|
+enum hdmi_extended_colorimetry {
|
|
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_601 = 0,
|
|
+ HDMI_EXTENDED_COLORIMETRY_XV_YCC_709 = 1,
|
|
+ HDMI_EXTENDED_COLORIMETRY_S_YCC_601 = 2,
|
|
+ HDMI_EXTENDED_COLORIMETRY_OPYCC_601 = 3,
|
|
+ HDMI_EXTENDED_COLORIMETRY_OPRGB = 4,
|
|
+ HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM = 5,
|
|
+ HDMI_EXTENDED_COLORIMETRY_BT2020 = 6,
|
|
+ HDMI_EXTENDED_COLORIMETRY_RESERVED = 7,
|
|
+};
|
|
+
|
|
+enum hdmi_quantization_range {
|
|
+ HDMI_QUANTIZATION_RANGE_DEFAULT = 0,
|
|
+ HDMI_QUANTIZATION_RANGE_LIMITED = 1,
|
|
+ HDMI_QUANTIZATION_RANGE_FULL = 2,
|
|
+ HDMI_QUANTIZATION_RANGE_RESERVED = 3,
|
|
+};
|
|
+
|
|
+enum hdmi_nups {
|
|
+ HDMI_NUPS_UNKNOWN = 0,
|
|
+ HDMI_NUPS_HORIZONTAL = 1,
|
|
+ HDMI_NUPS_VERTICAL = 2,
|
|
+ HDMI_NUPS_BOTH = 3,
|
|
+};
|
|
+
|
|
+enum hdmi_ycc_quantization_range {
|
|
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED = 0,
|
|
+ HDMI_YCC_QUANTIZATION_RANGE_FULL = 1,
|
|
+};
|
|
+
|
|
+enum hdmi_content_type {
|
|
+ HDMI_CONTENT_TYPE_GRAPHICS = 0,
|
|
+ HDMI_CONTENT_TYPE_PHOTO = 1,
|
|
+ HDMI_CONTENT_TYPE_CINEMA = 2,
|
|
+ HDMI_CONTENT_TYPE_GAME = 3,
|
|
+};
|
|
+
|
|
+struct hdmi_avi_infoframe {
|
|
+ enum hdmi_infoframe_type type;
|
|
+ unsigned char version;
|
|
+ unsigned char length;
|
|
+ enum hdmi_colorspace colorspace;
|
|
+ enum hdmi_scan_mode scan_mode;
|
|
+ enum hdmi_colorimetry colorimetry;
|
|
+ enum hdmi_picture_aspect picture_aspect;
|
|
+ enum hdmi_active_aspect active_aspect;
|
|
+ bool itc;
|
|
+ enum hdmi_extended_colorimetry extended_colorimetry;
|
|
+ enum hdmi_quantization_range quantization_range;
|
|
+ enum hdmi_nups nups;
|
|
+ unsigned char video_code;
|
|
+ enum hdmi_ycc_quantization_range ycc_quantization_range;
|
|
+ enum hdmi_content_type content_type;
|
|
+ unsigned char pixel_repeat;
|
|
+ short unsigned int top_bar;
|
|
+ short unsigned int bottom_bar;
|
|
+ short unsigned int left_bar;
|
|
+ short unsigned int right_bar;
|
|
+};
|
|
+
|
|
+enum hdmi_spd_sdi {
|
|
+ HDMI_SPD_SDI_UNKNOWN = 0,
|
|
+ HDMI_SPD_SDI_DSTB = 1,
|
|
+ HDMI_SPD_SDI_DVDP = 2,
|
|
+ HDMI_SPD_SDI_DVHS = 3,
|
|
+ HDMI_SPD_SDI_HDDVR = 4,
|
|
+ HDMI_SPD_SDI_DVC = 5,
|
|
+ HDMI_SPD_SDI_DSC = 6,
|
|
+ HDMI_SPD_SDI_VCD = 7,
|
|
+ HDMI_SPD_SDI_GAME = 8,
|
|
+ HDMI_SPD_SDI_PC = 9,
|
|
+ HDMI_SPD_SDI_BD = 10,
|
|
+ HDMI_SPD_SDI_SACD = 11,
|
|
+ HDMI_SPD_SDI_HDDVD = 12,
|
|
+ HDMI_SPD_SDI_PMP = 13,
|
|
+};
|
|
+
|
|
+struct hdmi_spd_infoframe {
|
|
+ enum hdmi_infoframe_type type;
|
|
+ unsigned char version;
|
|
+ unsigned char length;
|
|
+ char vendor[8];
|
|
+ char product[16];
|
|
+ enum hdmi_spd_sdi sdi;
|
|
+};
|
|
+
|
|
+enum hdmi_audio_coding_type {
|
|
+ HDMI_AUDIO_CODING_TYPE_STREAM = 0,
|
|
+ HDMI_AUDIO_CODING_TYPE_PCM = 1,
|
|
+ HDMI_AUDIO_CODING_TYPE_AC3 = 2,
|
|
+ HDMI_AUDIO_CODING_TYPE_MPEG1 = 3,
|
|
+ HDMI_AUDIO_CODING_TYPE_MP3 = 4,
|
|
+ HDMI_AUDIO_CODING_TYPE_MPEG2 = 5,
|
|
+ HDMI_AUDIO_CODING_TYPE_AAC_LC = 6,
|
|
+ HDMI_AUDIO_CODING_TYPE_DTS = 7,
|
|
+ HDMI_AUDIO_CODING_TYPE_ATRAC = 8,
|
|
+ HDMI_AUDIO_CODING_TYPE_DSD = 9,
|
|
+ HDMI_AUDIO_CODING_TYPE_EAC3 = 10,
|
|
+ HDMI_AUDIO_CODING_TYPE_DTS_HD = 11,
|
|
+ HDMI_AUDIO_CODING_TYPE_MLP = 12,
|
|
+ HDMI_AUDIO_CODING_TYPE_DST = 13,
|
|
+ HDMI_AUDIO_CODING_TYPE_WMA_PRO = 14,
|
|
+ HDMI_AUDIO_CODING_TYPE_CXT = 15,
|
|
+};
|
|
+
|
|
+enum hdmi_audio_sample_size {
|
|
+ HDMI_AUDIO_SAMPLE_SIZE_STREAM = 0,
|
|
+ HDMI_AUDIO_SAMPLE_SIZE_16 = 1,
|
|
+ HDMI_AUDIO_SAMPLE_SIZE_20 = 2,
|
|
+ HDMI_AUDIO_SAMPLE_SIZE_24 = 3,
|
|
+};
|
|
+
|
|
+enum hdmi_audio_sample_frequency {
|
|
+ HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM = 0,
|
|
+ HDMI_AUDIO_SAMPLE_FREQUENCY_32000 = 1,
|
|
+ HDMI_AUDIO_SAMPLE_FREQUENCY_44100 = 2,
|
|
+ HDMI_AUDIO_SAMPLE_FREQUENCY_48000 = 3,
|
|
+ HDMI_AUDIO_SAMPLE_FREQUENCY_88200 = 4,
|
|
+ HDMI_AUDIO_SAMPLE_FREQUENCY_96000 = 5,
|
|
+ HDMI_AUDIO_SAMPLE_FREQUENCY_176400 = 6,
|
|
+ HDMI_AUDIO_SAMPLE_FREQUENCY_192000 = 7,
|
|
+};
|
|
+
|
|
+enum hdmi_audio_coding_type_ext {
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_CT = 0,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC = 1,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2 = 2,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND = 3,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC = 4,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2 = 5,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC = 6,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_DRA = 7,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND = 8,
|
|
+ HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
|
|
+};
|
|
+
|
|
+struct hdmi_audio_infoframe {
|
|
+ enum hdmi_infoframe_type type;
|
|
+ unsigned char version;
|
|
+ unsigned char length;
|
|
+ unsigned char channels;
|
|
+ enum hdmi_audio_coding_type coding_type;
|
|
+ enum hdmi_audio_sample_size sample_size;
|
|
+ enum hdmi_audio_sample_frequency sample_frequency;
|
|
+ enum hdmi_audio_coding_type_ext coding_type_ext;
|
|
+ unsigned char channel_allocation;
|
|
+ unsigned char level_shift_value;
|
|
+ bool downmix_inhibit;
|
|
+};
|
|
+
|
|
+enum hdmi_3d_structure {
|
|
+ HDMI_3D_STRUCTURE_INVALID = -1,
|
|
+ HDMI_3D_STRUCTURE_FRAME_PACKING = 0,
|
|
+ HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE = 1,
|
|
+ HDMI_3D_STRUCTURE_LINE_ALTERNATIVE = 2,
|
|
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL = 3,
|
|
+ HDMI_3D_STRUCTURE_L_DEPTH = 4,
|
|
+ HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH = 5,
|
|
+ HDMI_3D_STRUCTURE_TOP_AND_BOTTOM = 6,
|
|
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8,
|
|
+};
|
|
+
|
|
+struct hdmi_vendor_infoframe {
|
|
+ enum hdmi_infoframe_type type;
|
|
+ unsigned char version;
|
|
+ unsigned char length;
|
|
+ unsigned int oui;
|
|
+ u8 vic;
|
|
+ enum hdmi_3d_structure s3d_struct;
|
|
+ unsigned int s3d_ext_data;
|
|
+};
|
|
+
|
|
+union hdmi_vendor_any_infoframe {
|
|
+ struct {
|
|
+ enum hdmi_infoframe_type type;
|
|
+ unsigned char version;
|
|
+ unsigned char length;
|
|
+ unsigned int oui;
|
|
+ } any;
|
|
+ struct hdmi_vendor_infoframe hdmi;
|
|
+};
|
|
+
|
|
+union hdmi_infoframe {
|
|
+ struct hdmi_any_infoframe any;
|
|
+ struct hdmi_avi_infoframe avi;
|
|
+ struct hdmi_spd_infoframe spd;
|
|
+ union hdmi_vendor_any_infoframe vendor;
|
|
+ struct hdmi_audio_infoframe audio;
|
|
+};
|
|
+
|
|
+struct vgastate {
|
|
+ void *vgabase;
|
|
+ long unsigned int membase;
|
|
+ __u32 memsize;
|
|
+ __u32 flags;
|
|
+ __u32 depth;
|
|
+ __u32 num_attr;
|
|
+ __u32 num_crtc;
|
|
+ __u32 num_gfx;
|
|
+ __u32 num_seq;
|
|
+ void *vidstate;
|
|
+};
|
|
+
|
|
+struct linux_logo {
|
|
+ int type;
|
|
+ unsigned int width;
|
|
+ unsigned int height;
|
|
+ unsigned int clutsize;
|
|
+ const unsigned char *clut;
|
|
+ const unsigned char *data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FB_BLANK_UNBLANK = 0,
|
|
+ FB_BLANK_NORMAL = 1,
|
|
+ FB_BLANK_VSYNC_SUSPEND = 2,
|
|
+ FB_BLANK_HSYNC_SUSPEND = 3,
|
|
+ FB_BLANK_POWERDOWN = 4,
|
|
+};
|
|
+
|
|
+struct fb_event {
|
|
+ struct fb_info *info;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+enum backlight_update_reason {
|
|
+ BACKLIGHT_UPDATE_HOTKEY = 0,
|
|
+ BACKLIGHT_UPDATE_SYSFS = 1,
|
|
+};
|
|
+
|
|
+enum backlight_notification {
|
|
+ BACKLIGHT_REGISTERED = 0,
|
|
+ BACKLIGHT_UNREGISTERED = 1,
|
|
+};
|
|
+
|
|
+struct fb_con2fbmap {
|
|
+ __u32 console;
|
|
+ __u32 framebuffer;
|
|
+};
|
|
+
|
|
+struct fb_cmap_user {
|
|
+ __u32 start;
|
|
+ __u32 len;
|
|
+ __u16 *red;
|
|
+ __u16 *green;
|
|
+ __u16 *blue;
|
|
+ __u16 *transp;
|
|
+};
|
|
+
|
|
+struct fb_modelist {
|
|
+ struct list_head list;
|
|
+ struct fb_videomode mode;
|
|
+};
|
|
+
|
|
+struct logo_data {
|
|
+ int depth;
|
|
+ int needs_directpalette;
|
|
+ int needs_truepalette;
|
|
+ int needs_cmapreset;
|
|
+ const struct linux_logo *logo;
|
|
+};
|
|
+
|
|
+struct fb_fix_screeninfo32 {
|
|
+ char id[16];
|
|
+ compat_caddr_t smem_start;
|
|
+ u32 smem_len;
|
|
+ u32 type;
|
|
+ u32 type_aux;
|
|
+ u32 visual;
|
|
+ u16 xpanstep;
|
|
+ u16 ypanstep;
|
|
+ u16 ywrapstep;
|
|
+ u32 line_length;
|
|
+ compat_caddr_t mmio_start;
|
|
+ u32 mmio_len;
|
|
+ u32 accel;
|
|
+ u16 reserved[3];
|
|
+};
|
|
+
|
|
+struct fb_cmap32 {
|
|
+ u32 start;
|
|
+ u32 len;
|
|
+ compat_caddr_t red;
|
|
+ compat_caddr_t green;
|
|
+ compat_caddr_t blue;
|
|
+ compat_caddr_t transp;
|
|
+};
|
|
+
|
|
+struct fb_cvt_data {
|
|
+ u32 xres;
|
|
+ u32 yres;
|
|
+ u32 refresh;
|
|
+ u32 f_refresh;
|
|
+ u32 pixclock;
|
|
+ u32 hperiod;
|
|
+ u32 hblank;
|
|
+ u32 hfreq;
|
|
+ u32 htotal;
|
|
+ u32 vtotal;
|
|
+ u32 vsync;
|
|
+ u32 hsync;
|
|
+ u32 h_front_porch;
|
|
+ u32 h_back_porch;
|
|
+ u32 v_front_porch;
|
|
+ u32 v_back_porch;
|
|
+ u32 h_margin;
|
|
+ u32 v_margin;
|
|
+ u32 interlace;
|
|
+ u32 aspect_ratio;
|
|
+ u32 active_pixels;
|
|
+ u32 flags;
|
|
+ u32 status;
|
|
+};
|
|
+
|
|
+typedef unsigned char u_char;
|
|
+
|
|
+struct display {
|
|
+ const u_char *fontdata;
|
|
+ int userfont;
|
|
+ u_short scrollmode;
|
|
+ u_short inverse;
|
|
+ short int yscroll;
|
|
+ int vrows;
|
|
+ int cursor_shape;
|
|
+ int con_rotate;
|
|
+ u32 xres_virtual;
|
|
+ u32 yres_virtual;
|
|
+ u32 height;
|
|
+ u32 width;
|
|
+ u32 bits_per_pixel;
|
|
+ u32 grayscale;
|
|
+ u32 nonstd;
|
|
+ u32 accel_flags;
|
|
+ u32 rotate;
|
|
+ struct fb_bitfield red;
|
|
+ struct fb_bitfield green;
|
|
+ struct fb_bitfield blue;
|
|
+ struct fb_bitfield transp;
|
|
+ const struct fb_videomode *mode;
|
|
+};
|
|
+
|
|
+struct fbcon_ops {
|
|
+ void (*bmove)(struct vc_data *, struct fb_info *, int, int, int, int, int, int);
|
|
+ void (*clear)(struct vc_data *, struct fb_info *, int, int, int, int);
|
|
+ void (*putcs)(struct vc_data *, struct fb_info *, const short unsigned int *, int, int, int, int, int);
|
|
+ void (*clear_margins)(struct vc_data *, struct fb_info *, int, int);
|
|
+ void (*cursor)(struct vc_data *, struct fb_info *, int, int, int);
|
|
+ int (*update_start)(struct fb_info *);
|
|
+ int (*rotate_font)(struct fb_info *, struct vc_data *);
|
|
+ struct fb_var_screeninfo var;
|
|
+ struct timer_list cursor_timer;
|
|
+ struct fb_cursor cursor_state;
|
|
+ struct display *p;
|
|
+ struct fb_info *info;
|
|
+ int currcon;
|
|
+ int cur_blink_jiffies;
|
|
+ int cursor_flash;
|
|
+ int cursor_reset;
|
|
+ int blank_state;
|
|
+ int graphics;
|
|
+ int save_graphics;
|
|
+ int flags;
|
|
+ int rotate;
|
|
+ int cur_rotate;
|
|
+ char *cursor_data;
|
|
+ u8 *fontbuffer;
|
|
+ u8 *fontdata;
|
|
+ u8 *cursor_src;
|
|
+ u32 cursor_size;
|
|
+ u32 fd_size;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FBCON_LOGO_CANSHOW = -1,
|
|
+ FBCON_LOGO_DRAW = -2,
|
|
+ FBCON_LOGO_DONTSHOW = -3,
|
|
+};
|
|
+
|
|
+struct vesafb_par {
|
|
+ u32 pseudo_palette[256];
|
|
+ int wc_cookie;
|
|
+};
|
|
+
|
|
+enum drm_panel_orientation {
|
|
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN = -1,
|
|
+ DRM_MODE_PANEL_ORIENTATION_NORMAL = 0,
|
|
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP = 1,
|
|
+ DRM_MODE_PANEL_ORIENTATION_LEFT_UP = 2,
|
|
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP = 3,
|
|
+};
|
|
+
|
|
+struct idle_cpu {
|
|
+ struct cpuidle_state *state_table;
|
|
+ long unsigned int auto_demotion_disable_flags;
|
|
+ bool byt_auto_demotion_disable_flag;
|
|
+ bool disable_promotion_to_c1e;
|
|
+ bool use_acpi;
|
|
+};
|
|
+
|
|
+enum ipmi_addr_src {
|
|
+ SI_INVALID = 0,
|
|
+ SI_HOTMOD = 1,
|
|
+ SI_HARDCODED = 2,
|
|
+ SI_SPMI = 3,
|
|
+ SI_ACPI = 4,
|
|
+ SI_SMBIOS = 5,
|
|
+ SI_PCI = 6,
|
|
+ SI_DEVICETREE = 7,
|
|
+ SI_PLATFORM = 8,
|
|
+ SI_LAST = 9,
|
|
+};
|
|
+
|
|
+union ipmi_smi_info_union {
|
|
+ struct {
|
|
+ acpi_handle acpi_handle;
|
|
+ } acpi_info;
|
|
+};
|
|
+
|
|
+struct dmi_header {
|
|
+ u8 type;
|
|
+ u8 length;
|
|
+ u16 handle;
|
|
+};
|
|
+
|
|
+enum si_type {
|
|
+ SI_TYPE_INVALID = 0,
|
|
+ SI_KCS = 1,
|
|
+ SI_SMIC = 2,
|
|
+ SI_BT = 3,
|
|
+};
|
|
+
|
|
+struct si_sm_io {
|
|
+ unsigned char (*inputb)(const struct si_sm_io *, unsigned int);
|
|
+ void (*outputb)(const struct si_sm_io *, unsigned int, unsigned char);
|
|
+ void *addr;
|
|
+ int regspacing;
|
|
+ int regsize;
|
|
+ int regshift;
|
|
+ int addr_type;
|
|
+ long int addr_data;
|
|
+ enum ipmi_addr_src addr_source;
|
|
+ void (*addr_source_cleanup)(struct si_sm_io *);
|
|
+ void *addr_source_data;
|
|
+ union ipmi_smi_info_union addr_info;
|
|
+ int (*io_setup)(struct si_sm_io *);
|
|
+ void (*io_cleanup)(struct si_sm_io *);
|
|
+ unsigned int io_size;
|
|
+ int irq;
|
|
+ int (*irq_setup)(struct si_sm_io *);
|
|
+ void *irq_handler_data;
|
|
+ void (*irq_cleanup)(struct si_sm_io *);
|
|
+ u8 slave_addr;
|
|
+ enum si_type si_type;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+enum si_sm_result {
|
|
+ SI_SM_CALL_WITHOUT_DELAY = 0,
|
|
+ SI_SM_CALL_WITH_DELAY = 1,
|
|
+ SI_SM_CALL_WITH_TICK_DELAY = 2,
|
|
+ SI_SM_TRANSACTION_COMPLETE = 3,
|
|
+ SI_SM_IDLE = 4,
|
|
+ SI_SM_HOSED = 5,
|
|
+ SI_SM_ATTN = 6,
|
|
+};
|
|
+
|
|
+struct si_sm_data;
|
|
+
|
|
+struct si_sm_handlers {
|
|
+ char *version;
|
|
+ unsigned int (*init_data)(struct si_sm_data *, struct si_sm_io *);
|
|
+ int (*start_transaction)(struct si_sm_data *, unsigned char *, unsigned int);
|
|
+ int (*get_result)(struct si_sm_data *, unsigned char *, unsigned int);
|
|
+ enum si_sm_result (*event)(struct si_sm_data *, long int);
|
|
+ int (*detect)(struct si_sm_data *);
|
|
+ void (*cleanup)(struct si_sm_data *);
|
|
+ int (*size)();
|
|
+};
|
|
+
|
|
+struct ipmi_dmi_info {
|
|
+ enum si_type si_type;
|
|
+ u32 flags;
|
|
+ long unsigned int addr;
|
|
+ u8 slave_addr;
|
|
+ struct ipmi_dmi_info *next;
|
|
+};
|
|
+
|
|
+typedef u8 acpi_owner_id;
|
|
+
|
|
+union acpi_name_union {
|
|
+ u32 integer;
|
|
+ char ascii[4];
|
|
+};
|
|
+
|
|
+struct acpi_table_desc {
|
|
+ acpi_physical_address address;
|
|
+ struct acpi_table_header *pointer;
|
|
+ u32 length;
|
|
+ union acpi_name_union signature;
|
|
+ acpi_owner_id owner_id;
|
|
+ u8 flags;
|
|
+ u16 validation_count;
|
|
+};
|
|
+
|
|
+struct acpi_madt_io_sapic {
|
|
+ struct acpi_subtable_header header;
|
|
+ u8 id;
|
|
+ u8 reserved;
|
|
+ u32 global_irq_base;
|
|
+ u64 address;
|
|
+};
|
|
+
|
|
+struct acpi_madt_interrupt_source {
|
|
+ struct acpi_subtable_header header;
|
|
+ u16 inti_flags;
|
|
+ u8 type;
|
|
+ u8 id;
|
|
+ u8 eid;
|
|
+ u8 io_sapic_vector;
|
|
+ u32 global_irq;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct acpi_madt_generic_interrupt {
|
|
+ struct acpi_subtable_header header;
|
|
+ u16 reserved;
|
|
+ u32 cpu_interface_number;
|
|
+ u32 uid;
|
|
+ u32 flags;
|
|
+ u32 parking_version;
|
|
+ u32 performance_interrupt;
|
|
+ u64 parked_address;
|
|
+ u64 base_address;
|
|
+ u64 gicv_base_address;
|
|
+ u64 gich_base_address;
|
|
+ u32 vgic_interrupt;
|
|
+ u64 gicr_base_address;
|
|
+ u64 arm_mpidr;
|
|
+ u8 efficiency_class;
|
|
+ u8 reserved2[1];
|
|
+ u16 spe_interrupt;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_madt_generic_distributor {
|
|
+ struct acpi_subtable_header header;
|
|
+ u16 reserved;
|
|
+ u32 gic_id;
|
|
+ u64 base_address;
|
|
+ u32 global_irq_base;
|
|
+ u8 version;
|
|
+ u8 reserved2[3];
|
|
+};
|
|
+
|
|
+typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *);
|
|
+
|
|
+struct transaction;
|
|
+
|
|
+struct acpi_ec {
|
|
+ acpi_handle handle;
|
|
+ u32 gpe;
|
|
+ long unsigned int command_addr;
|
|
+ long unsigned int data_addr;
|
|
+ bool global_lock;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int reference_count;
|
|
+ struct mutex mutex;
|
|
+ wait_queue_head_t wait;
|
|
+ struct list_head list;
|
|
+ struct transaction *curr;
|
|
+ spinlock_t lock;
|
|
+ struct work_struct work;
|
|
+ long unsigned int timestamp;
|
|
+ long unsigned int nr_pending_queries;
|
|
+ bool busy_polling;
|
|
+ unsigned int polling_guard;
|
|
+};
|
|
+
|
|
+enum acpi_subtable_type {
|
|
+ ACPI_SUBTABLE_COMMON = 0,
|
|
+ ACPI_SUBTABLE_HMAT = 1,
|
|
+};
|
|
+
|
|
+struct acpi_subtable_entry {
|
|
+ union acpi_subtable_headers *hdr;
|
|
+ enum acpi_subtable_type type;
|
|
+};
|
|
+
|
|
+enum acpi_predicate {
|
|
+ all_versions = 0,
|
|
+ less_than_or_equal = 1,
|
|
+ equal = 2,
|
|
+ greater_than_or_equal = 3,
|
|
+};
|
|
+
|
|
+struct acpi_platform_list {
|
|
+ char oem_id[7];
|
|
+ char oem_table_id[9];
|
|
+ u32 oem_revision;
|
|
+ char *table;
|
|
+ enum acpi_predicate pred;
|
|
+ char *reason;
|
|
+ u32 data;
|
|
+};
|
|
+
|
|
+typedef char *acpi_string;
|
|
+
|
|
+struct acpi_osi_entry {
|
|
+ char string[64];
|
|
+ bool enable;
|
|
+};
|
|
+
|
|
+struct acpi_osi_config {
|
|
+ u8 default_disabling;
|
|
+ unsigned int linux_enable: 1;
|
|
+ unsigned int linux_dmi: 1;
|
|
+ unsigned int linux_cmdline: 1;
|
|
+ unsigned int darwin_enable: 1;
|
|
+ unsigned int darwin_dmi: 1;
|
|
+ unsigned int darwin_cmdline: 1;
|
|
+};
|
|
+
|
|
+typedef u32 acpi_name;
|
|
+
|
|
+struct acpi_predefined_names {
|
|
+ const char *name;
|
|
+ u8 type;
|
|
+ char *val;
|
|
+};
|
|
+
|
|
+typedef u32 (*acpi_osd_handler)(void *);
|
|
+
|
|
+typedef void (*acpi_osd_exec_callback)(void *);
|
|
+
|
|
+typedef u32 (*acpi_sci_handler)(void *);
|
|
+
|
|
+typedef void (*acpi_gbl_event_handler)(u32, acpi_handle, u32, void *);
|
|
+
|
|
+typedef u32 (*acpi_event_handler)(void *);
|
|
+
|
|
+typedef u32 (*acpi_gpe_handler)(acpi_handle, u32, void *);
|
|
+
|
|
+typedef void (*acpi_notify_handler)(acpi_handle, u32, void *);
|
|
+
|
|
+typedef void (*acpi_object_handler)(acpi_handle, void *);
|
|
+
|
|
+typedef acpi_status (*acpi_init_handler)(acpi_handle, u32);
|
|
+
|
|
+typedef acpi_status (*acpi_exception_handler)(acpi_status, acpi_name, u16, u32, void *);
|
|
+
|
|
+typedef acpi_status (*acpi_table_handler)(u32, void *, void *);
|
|
+
|
|
+typedef acpi_status (*acpi_adr_space_handler)(u32, acpi_physical_address, u32, u64 *, void *, void *);
|
|
+
|
|
+typedef acpi_status (*acpi_adr_space_setup)(acpi_handle, u32, void *, void **);
|
|
+
|
|
+typedef u32 (*acpi_interface_handler)(acpi_string, u32);
|
|
+
|
|
+struct acpi_pci_id {
|
|
+ u16 segment;
|
|
+ u16 bus;
|
|
+ u16 device;
|
|
+ u16 function;
|
|
+};
|
|
+
|
|
+struct acpi_mem_space_context {
|
|
+ u32 length;
|
|
+ acpi_physical_address address;
|
|
+ acpi_physical_address mapped_physical_address;
|
|
+ u8 *mapped_logical_address;
|
|
+ acpi_size mapped_length;
|
|
+};
|
|
+
|
|
+struct acpi_table_facs {
|
|
+ char signature[4];
|
|
+ u32 length;
|
|
+ u32 hardware_signature;
|
|
+ u32 firmware_waking_vector;
|
|
+ u32 global_lock;
|
|
+ u32 flags;
|
|
+ u64 xfirmware_waking_vector;
|
|
+ u8 version;
|
|
+ u8 reserved[3];
|
|
+ u32 ospm_flags;
|
|
+ u8 reserved1[24];
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ OSL_GLOBAL_LOCK_HANDLER = 0,
|
|
+ OSL_NOTIFY_HANDLER = 1,
|
|
+ OSL_GPE_HANDLER = 2,
|
|
+ OSL_DEBUGGER_MAIN_THREAD = 3,
|
|
+ OSL_DEBUGGER_EXEC_THREAD = 4,
|
|
+ OSL_EC_POLL_HANDLER = 5,
|
|
+ OSL_EC_BURST_HANDLER = 6,
|
|
+} acpi_execute_type;
|
|
+
|
|
+struct acpi_rw_lock {
|
|
+ void *writer_mutex;
|
|
+ void *reader_mutex;
|
|
+ u32 num_readers;
|
|
+};
|
|
+
|
|
+struct acpi_mutex_info {
|
|
+ void *mutex;
|
|
+ u32 use_count;
|
|
+ u64 thread_id;
|
|
+};
|
|
+
|
|
+union acpi_operand_object;
|
|
+
|
|
+struct acpi_namespace_node {
|
|
+ union acpi_operand_object *object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u8 flags;
|
|
+ acpi_owner_id owner_id;
|
|
+ union acpi_name_union name;
|
|
+ struct acpi_namespace_node *parent;
|
|
+ struct acpi_namespace_node *child;
|
|
+ struct acpi_namespace_node *peer;
|
|
+};
|
|
+
|
|
+struct acpi_object_common {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+struct acpi_object_integer {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 fill[3];
|
|
+ u64 value;
|
|
+};
|
|
+
|
|
+struct acpi_object_string {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ char *pointer;
|
|
+ u32 length;
|
|
+};
|
|
+
|
|
+struct acpi_object_buffer {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 *pointer;
|
|
+ u32 length;
|
|
+ u32 aml_length;
|
|
+ u8 *aml_start;
|
|
+ struct acpi_namespace_node *node;
|
|
+};
|
|
+
|
|
+struct acpi_object_package {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ struct acpi_namespace_node *node;
|
|
+ union acpi_operand_object **elements;
|
|
+ u8 *aml_start;
|
|
+ u32 aml_length;
|
|
+ u32 count;
|
|
+};
|
|
+
|
|
+struct acpi_object_event {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ void *os_semaphore;
|
|
+};
|
|
+
|
|
+struct acpi_walk_state;
|
|
+
|
|
+typedef acpi_status (*acpi_internal_method)(struct acpi_walk_state *);
|
|
+
|
|
+struct acpi_object_method {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 info_flags;
|
|
+ u8 param_count;
|
|
+ u8 sync_level;
|
|
+ union acpi_operand_object *mutex;
|
|
+ union acpi_operand_object *node;
|
|
+ u8 *aml_start;
|
|
+ union {
|
|
+ acpi_internal_method implementation;
|
|
+ union acpi_operand_object *handler;
|
|
+ } dispatch;
|
|
+ u32 aml_length;
|
|
+ u8 thread_count;
|
|
+ acpi_owner_id owner_id;
|
|
+};
|
|
+
|
|
+struct acpi_thread_state;
|
|
+
|
|
+struct acpi_object_mutex {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 sync_level;
|
|
+ u16 acquisition_depth;
|
|
+ void *os_mutex;
|
|
+ u64 thread_id;
|
|
+ struct acpi_thread_state *owner_thread;
|
|
+ union acpi_operand_object *prev;
|
|
+ union acpi_operand_object *next;
|
|
+ struct acpi_namespace_node *node;
|
|
+ u8 original_sync_level;
|
|
+};
|
|
+
|
|
+struct acpi_object_region {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 space_id;
|
|
+ struct acpi_namespace_node *node;
|
|
+ union acpi_operand_object *handler;
|
|
+ union acpi_operand_object *next;
|
|
+ acpi_physical_address address;
|
|
+ u32 length;
|
|
+};
|
|
+
|
|
+struct acpi_object_notify_common {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ union acpi_operand_object *notify_list[2];
|
|
+ union acpi_operand_object *handler;
|
|
+};
|
|
+
|
|
+struct acpi_gpe_block_info;
|
|
+
|
|
+struct acpi_object_device {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ union acpi_operand_object *notify_list[2];
|
|
+ union acpi_operand_object *handler;
|
|
+ struct acpi_gpe_block_info *gpe_block;
|
|
+};
|
|
+
|
|
+struct acpi_object_power_resource {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ union acpi_operand_object *notify_list[2];
|
|
+ union acpi_operand_object *handler;
|
|
+ u32 system_level;
|
|
+ u32 resource_order;
|
|
+};
|
|
+
|
|
+struct acpi_object_processor {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 proc_id;
|
|
+ u8 length;
|
|
+ union acpi_operand_object *notify_list[2];
|
|
+ union acpi_operand_object *handler;
|
|
+ acpi_io_address address;
|
|
+};
|
|
+
|
|
+struct acpi_object_thermal_zone {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ union acpi_operand_object *notify_list[2];
|
|
+ union acpi_operand_object *handler;
|
|
+};
|
|
+
|
|
+struct acpi_object_field_common {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 field_flags;
|
|
+ u8 attribute;
|
|
+ u8 access_byte_width;
|
|
+ struct acpi_namespace_node *node;
|
|
+ u32 bit_length;
|
|
+ u32 base_byte_offset;
|
|
+ u32 value;
|
|
+ u8 start_field_bit_offset;
|
|
+ u8 access_length;
|
|
+ union acpi_operand_object *region_obj;
|
|
+};
|
|
+
|
|
+struct acpi_object_region_field {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 field_flags;
|
|
+ u8 attribute;
|
|
+ u8 access_byte_width;
|
|
+ struct acpi_namespace_node *node;
|
|
+ u32 bit_length;
|
|
+ u32 base_byte_offset;
|
|
+ u32 value;
|
|
+ u8 start_field_bit_offset;
|
|
+ u8 access_length;
|
|
+ u16 resource_length;
|
|
+ union acpi_operand_object *region_obj;
|
|
+ u8 *resource_buffer;
|
|
+ u16 pin_number_index;
|
|
+};
|
|
+
|
|
+struct acpi_object_buffer_field {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 field_flags;
|
|
+ u8 attribute;
|
|
+ u8 access_byte_width;
|
|
+ struct acpi_namespace_node *node;
|
|
+ u32 bit_length;
|
|
+ u32 base_byte_offset;
|
|
+ u32 value;
|
|
+ u8 start_field_bit_offset;
|
|
+ u8 access_length;
|
|
+ union acpi_operand_object *buffer_obj;
|
|
+};
|
|
+
|
|
+struct acpi_object_bank_field {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 field_flags;
|
|
+ u8 attribute;
|
|
+ u8 access_byte_width;
|
|
+ struct acpi_namespace_node *node;
|
|
+ u32 bit_length;
|
|
+ u32 base_byte_offset;
|
|
+ u32 value;
|
|
+ u8 start_field_bit_offset;
|
|
+ u8 access_length;
|
|
+ union acpi_operand_object *region_obj;
|
|
+ union acpi_operand_object *bank_obj;
|
|
+};
|
|
+
|
|
+struct acpi_object_index_field {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 field_flags;
|
|
+ u8 attribute;
|
|
+ u8 access_byte_width;
|
|
+ struct acpi_namespace_node *node;
|
|
+ u32 bit_length;
|
|
+ u32 base_byte_offset;
|
|
+ u32 value;
|
|
+ u8 start_field_bit_offset;
|
|
+ u8 access_length;
|
|
+ union acpi_operand_object *index_obj;
|
|
+ union acpi_operand_object *data_obj;
|
|
+};
|
|
+
|
|
+struct acpi_object_notify_handler {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ struct acpi_namespace_node *node;
|
|
+ u32 handler_type;
|
|
+ acpi_notify_handler handler;
|
|
+ void *context;
|
|
+ union acpi_operand_object *next[2];
|
|
+};
|
|
+
|
|
+struct acpi_object_addr_handler {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 space_id;
|
|
+ u8 handler_flags;
|
|
+ acpi_adr_space_handler handler;
|
|
+ struct acpi_namespace_node *node;
|
|
+ void *context;
|
|
+ acpi_adr_space_setup setup;
|
|
+ union acpi_operand_object *region_list;
|
|
+ union acpi_operand_object *next;
|
|
+};
|
|
+
|
|
+struct acpi_object_reference {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ u8 class;
|
|
+ u8 target_type;
|
|
+ u8 resolved;
|
|
+ void *object;
|
|
+ struct acpi_namespace_node *node;
|
|
+ union acpi_operand_object **where;
|
|
+ u8 *index_pointer;
|
|
+ u8 *aml;
|
|
+ u32 value;
|
|
+};
|
|
+
|
|
+struct acpi_object_extra {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ struct acpi_namespace_node *method_REG;
|
|
+ struct acpi_namespace_node *scope_node;
|
|
+ void *region_context;
|
|
+ u8 *aml_start;
|
|
+ u32 aml_length;
|
|
+};
|
|
+
|
|
+struct acpi_object_data {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ acpi_object_handler handler;
|
|
+ void *pointer;
|
|
+};
|
|
+
|
|
+struct acpi_object_cache_list {
|
|
+ union acpi_operand_object *next_object;
|
|
+ u8 descriptor_type;
|
|
+ u8 type;
|
|
+ u16 reference_count;
|
|
+ u8 flags;
|
|
+ union acpi_operand_object *next;
|
|
+};
|
|
+
|
|
+union acpi_operand_object {
|
|
+ struct acpi_object_common common;
|
|
+ struct acpi_object_integer integer;
|
|
+ struct acpi_object_string string;
|
|
+ struct acpi_object_buffer buffer;
|
|
+ struct acpi_object_package package;
|
|
+ struct acpi_object_event event;
|
|
+ struct acpi_object_method method;
|
|
+ struct acpi_object_mutex mutex;
|
|
+ struct acpi_object_region region;
|
|
+ struct acpi_object_notify_common common_notify;
|
|
+ struct acpi_object_device device;
|
|
+ struct acpi_object_power_resource power_resource;
|
|
+ struct acpi_object_processor processor;
|
|
+ struct acpi_object_thermal_zone thermal_zone;
|
|
+ struct acpi_object_field_common common_field;
|
|
+ struct acpi_object_region_field field;
|
|
+ struct acpi_object_buffer_field buffer_field;
|
|
+ struct acpi_object_bank_field bank_field;
|
|
+ struct acpi_object_index_field index_field;
|
|
+ struct acpi_object_notify_handler notify;
|
|
+ struct acpi_object_addr_handler address_space;
|
|
+ struct acpi_object_reference reference;
|
|
+ struct acpi_object_extra extra;
|
|
+ struct acpi_object_data data;
|
|
+ struct acpi_object_cache_list cache;
|
|
+ struct acpi_namespace_node node;
|
|
+};
|
|
+
|
|
+struct acpi_table_list {
|
|
+ struct acpi_table_desc *tables;
|
|
+ u32 current_table_count;
|
|
+ u32 max_table_count;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+union acpi_parse_object;
|
|
+
|
|
+union acpi_generic_state;
|
|
+
|
|
+struct acpi_parse_state {
|
|
+ u8 *aml_start;
|
|
+ u8 *aml;
|
|
+ u8 *aml_end;
|
|
+ u8 *pkg_start;
|
|
+ u8 *pkg_end;
|
|
+ union acpi_parse_object *start_op;
|
|
+ struct acpi_namespace_node *start_node;
|
|
+ union acpi_generic_state *scope;
|
|
+ union acpi_parse_object *start_scope;
|
|
+ u32 aml_size;
|
|
+};
|
|
+
|
|
+typedef acpi_status (*acpi_parse_downwards)(struct acpi_walk_state *, union acpi_parse_object **);
|
|
+
|
|
+typedef acpi_status (*acpi_parse_upwards)(struct acpi_walk_state *);
|
|
+
|
|
+struct acpi_opcode_info;
|
|
+
|
|
+struct acpi_walk_state {
|
|
+ struct acpi_walk_state *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 walk_type;
|
|
+ u16 opcode;
|
|
+ u8 next_op_info;
|
|
+ u8 num_operands;
|
|
+ u8 operand_index;
|
|
+ acpi_owner_id owner_id;
|
|
+ u8 last_predicate;
|
|
+ u8 current_result;
|
|
+ u8 return_used;
|
|
+ u8 scope_depth;
|
|
+ u8 pass_number;
|
|
+ u8 namespace_override;
|
|
+ u8 result_size;
|
|
+ u8 result_count;
|
|
+ u8 *aml;
|
|
+ u32 arg_types;
|
|
+ u32 method_breakpoint;
|
|
+ u32 user_breakpoint;
|
|
+ u32 parse_flags;
|
|
+ struct acpi_parse_state parser_state;
|
|
+ u32 prev_arg_types;
|
|
+ u32 arg_count;
|
|
+ struct acpi_namespace_node arguments[7];
|
|
+ struct acpi_namespace_node local_variables[8];
|
|
+ union acpi_operand_object *operands[9];
|
|
+ union acpi_operand_object **params;
|
|
+ u8 *aml_last_while;
|
|
+ union acpi_operand_object **caller_return_desc;
|
|
+ union acpi_generic_state *control_state;
|
|
+ struct acpi_namespace_node *deferred_node;
|
|
+ union acpi_operand_object *implicit_return_obj;
|
|
+ struct acpi_namespace_node *method_call_node;
|
|
+ union acpi_parse_object *method_call_op;
|
|
+ union acpi_operand_object *method_desc;
|
|
+ struct acpi_namespace_node *method_node;
|
|
+ union acpi_parse_object *op;
|
|
+ const struct acpi_opcode_info *op_info;
|
|
+ union acpi_parse_object *origin;
|
|
+ union acpi_operand_object *result_obj;
|
|
+ union acpi_generic_state *results;
|
|
+ union acpi_operand_object *return_desc;
|
|
+ union acpi_generic_state *scope_info;
|
|
+ union acpi_parse_object *prev_op;
|
|
+ union acpi_parse_object *next_op;
|
|
+ struct acpi_thread_state *thread;
|
|
+ acpi_parse_downwards descending_callback;
|
|
+ acpi_parse_upwards ascending_callback;
|
|
+};
|
|
+
|
|
+struct acpi_sci_handler_info {
|
|
+ struct acpi_sci_handler_info *next;
|
|
+ acpi_sci_handler address;
|
|
+ void *context;
|
|
+};
|
|
+
|
|
+struct acpi_gpe_handler_info {
|
|
+ acpi_gpe_handler address;
|
|
+ void *context;
|
|
+ struct acpi_namespace_node *method_node;
|
|
+ u8 original_flags;
|
|
+ u8 originally_enabled;
|
|
+};
|
|
+
|
|
+struct acpi_gpe_notify_info {
|
|
+ struct acpi_namespace_node *device_node;
|
|
+ struct acpi_gpe_notify_info *next;
|
|
+};
|
|
+
|
|
+union acpi_gpe_dispatch_info {
|
|
+ struct acpi_namespace_node *method_node;
|
|
+ struct acpi_gpe_handler_info *handler;
|
|
+ struct acpi_gpe_notify_info *notify_list;
|
|
+};
|
|
+
|
|
+struct acpi_gpe_register_info;
|
|
+
|
|
+struct acpi_gpe_event_info {
|
|
+ union acpi_gpe_dispatch_info dispatch;
|
|
+ struct acpi_gpe_register_info *register_info;
|
|
+ u8 flags;
|
|
+ u8 gpe_number;
|
|
+ u8 runtime_count;
|
|
+ u8 disable_for_dispatch;
|
|
+};
|
|
+
|
|
+struct acpi_gpe_register_info {
|
|
+ struct acpi_generic_address status_address;
|
|
+ struct acpi_generic_address enable_address;
|
|
+ u16 base_gpe_number;
|
|
+ u8 enable_for_wake;
|
|
+ u8 enable_for_run;
|
|
+ u8 mask_for_run;
|
|
+ u8 enable_mask;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_gpe_xrupt_info;
|
|
+
|
|
+struct acpi_gpe_block_info {
|
|
+ struct acpi_namespace_node *node;
|
|
+ struct acpi_gpe_block_info *previous;
|
|
+ struct acpi_gpe_block_info *next;
|
|
+ struct acpi_gpe_xrupt_info *xrupt_block;
|
|
+ struct acpi_gpe_register_info *register_info;
|
|
+ struct acpi_gpe_event_info *event_info;
|
|
+ u64 address;
|
|
+ u32 register_count;
|
|
+ u16 gpe_count;
|
|
+ u16 block_base_number;
|
|
+ u8 space_id;
|
|
+ u8 initialized;
|
|
+};
|
|
+
|
|
+struct acpi_gpe_xrupt_info {
|
|
+ struct acpi_gpe_xrupt_info *previous;
|
|
+ struct acpi_gpe_xrupt_info *next;
|
|
+ struct acpi_gpe_block_info *gpe_block_list_head;
|
|
+ u32 interrupt_number;
|
|
+};
|
|
+
|
|
+struct acpi_fixed_event_handler {
|
|
+ acpi_event_handler handler;
|
|
+ void *context;
|
|
+};
|
|
+
|
|
+struct acpi_fixed_event_info {
|
|
+ u8 status_register_id;
|
|
+ u8 enable_register_id;
|
|
+ u16 status_bit_mask;
|
|
+ u16 enable_bit_mask;
|
|
+};
|
|
+
|
|
+struct acpi_common_state {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+};
|
|
+
|
|
+struct acpi_update_state {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+ union acpi_operand_object *object;
|
|
+};
|
|
+
|
|
+struct acpi_pkg_state {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+ u32 index;
|
|
+ union acpi_operand_object *source_object;
|
|
+ union acpi_operand_object *dest_object;
|
|
+ struct acpi_walk_state *walk_state;
|
|
+ void *this_target_obj;
|
|
+ u32 num_packages;
|
|
+};
|
|
+
|
|
+struct acpi_control_state {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+ u16 opcode;
|
|
+ union acpi_parse_object *predicate_op;
|
|
+ u8 *aml_predicate_start;
|
|
+ u8 *package_end;
|
|
+ u64 loop_timeout;
|
|
+};
|
|
+
|
|
+union acpi_parse_value {
|
|
+ u64 integer;
|
|
+ u32 size;
|
|
+ char *string;
|
|
+ u8 *buffer;
|
|
+ char *name;
|
|
+ union acpi_parse_object *arg;
|
|
+};
|
|
+
|
|
+struct acpi_parse_obj_common {
|
|
+ union acpi_parse_object *parent;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 aml_opcode;
|
|
+ u8 *aml;
|
|
+ union acpi_parse_object *next;
|
|
+ struct acpi_namespace_node *node;
|
|
+ union acpi_parse_value value;
|
|
+ u8 arg_list_length;
|
|
+};
|
|
+
|
|
+struct acpi_parse_obj_named {
|
|
+ union acpi_parse_object *parent;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 aml_opcode;
|
|
+ u8 *aml;
|
|
+ union acpi_parse_object *next;
|
|
+ struct acpi_namespace_node *node;
|
|
+ union acpi_parse_value value;
|
|
+ u8 arg_list_length;
|
|
+ char *path;
|
|
+ u8 *data;
|
|
+ u32 length;
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct acpi_parse_obj_asl {
|
|
+ union acpi_parse_object *parent;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 aml_opcode;
|
|
+ u8 *aml;
|
|
+ union acpi_parse_object *next;
|
|
+ struct acpi_namespace_node *node;
|
|
+ union acpi_parse_value value;
|
|
+ u8 arg_list_length;
|
|
+ union acpi_parse_object *child;
|
|
+ union acpi_parse_object *parent_method;
|
|
+ char *filename;
|
|
+ u8 file_changed;
|
|
+ char *parent_filename;
|
|
+ char *external_name;
|
|
+ char *namepath;
|
|
+ char name_seg[4];
|
|
+ u32 extra_value;
|
|
+ u32 column;
|
|
+ u32 line_number;
|
|
+ u32 logical_line_number;
|
|
+ u32 logical_byte_offset;
|
|
+ u32 end_line;
|
|
+ u32 end_logical_line;
|
|
+ u32 acpi_btype;
|
|
+ u32 aml_length;
|
|
+ u32 aml_subtree_length;
|
|
+ u32 final_aml_length;
|
|
+ u32 final_aml_offset;
|
|
+ u32 compile_flags;
|
|
+ u16 parse_opcode;
|
|
+ u8 aml_opcode_length;
|
|
+ u8 aml_pkg_len_bytes;
|
|
+ u8 extra;
|
|
+ char parse_op_name[20];
|
|
+};
|
|
+
|
|
+union acpi_parse_object {
|
|
+ struct acpi_parse_obj_common common;
|
|
+ struct acpi_parse_obj_named named;
|
|
+ struct acpi_parse_obj_asl asl;
|
|
+};
|
|
+
|
|
+struct acpi_scope_state {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+ struct acpi_namespace_node *node;
|
|
+};
|
|
+
|
|
+struct acpi_pscope_state {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+ u32 arg_count;
|
|
+ union acpi_parse_object *op;
|
|
+ u8 *arg_end;
|
|
+ u8 *pkg_end;
|
|
+ u32 arg_list;
|
|
+};
|
|
+
|
|
+struct acpi_thread_state {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+ u8 current_sync_level;
|
|
+ struct acpi_walk_state *walk_state_list;
|
|
+ union acpi_operand_object *acquired_mutex_list;
|
|
+ u64 thread_id;
|
|
+};
|
|
+
|
|
+struct acpi_result_values {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+ union acpi_operand_object *obj_desc[8];
|
|
+};
|
|
+
|
|
+struct acpi_global_notify_handler {
|
|
+ acpi_notify_handler handler;
|
|
+ void *context;
|
|
+};
|
|
+
|
|
+struct acpi_notify_info {
|
|
+ void *next;
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 value;
|
|
+ u16 state;
|
|
+ u8 handler_list_id;
|
|
+ struct acpi_namespace_node *node;
|
|
+ union acpi_operand_object *handler_list_head;
|
|
+ struct acpi_global_notify_handler *global;
|
|
+};
|
|
+
|
|
+union acpi_generic_state {
|
|
+ struct acpi_common_state common;
|
|
+ struct acpi_control_state control;
|
|
+ struct acpi_update_state update;
|
|
+ struct acpi_scope_state scope;
|
|
+ struct acpi_pscope_state parse_scope;
|
|
+ struct acpi_pkg_state pkg;
|
|
+ struct acpi_thread_state thread;
|
|
+ struct acpi_result_values results;
|
|
+ struct acpi_notify_info notify;
|
|
+};
|
|
+
|
|
+struct acpi_address_range {
|
|
+ struct acpi_address_range *next;
|
|
+ struct acpi_namespace_node *region_node;
|
|
+ acpi_physical_address start_address;
|
|
+ acpi_physical_address end_address;
|
|
+};
|
|
+
|
|
+struct acpi_opcode_info {
|
|
+ u32 parse_args;
|
|
+ u32 runtime_args;
|
|
+ u16 flags;
|
|
+ u8 object_type;
|
|
+ u8 class;
|
|
+ u8 type;
|
|
+};
|
|
+
|
|
+struct acpi_comment_node {
|
|
+ char *comment;
|
|
+ struct acpi_comment_node *next;
|
|
+};
|
|
+
|
|
+struct acpi_bit_register_info {
|
|
+ u8 parent_register;
|
|
+ u8 bit_position;
|
|
+ u16 access_bit_mask;
|
|
+};
|
|
+
|
|
+struct acpi_interface_info {
|
|
+ char *name;
|
|
+ struct acpi_interface_info *next;
|
|
+ u8 flags;
|
|
+ u8 value;
|
|
+};
|
|
+
|
|
+struct acpi_os_dpc {
|
|
+ acpi_osd_exec_callback function;
|
|
+ void *context;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+struct acpi_ioremap {
|
|
+ struct list_head list;
|
|
+ void *virt;
|
|
+ acpi_physical_address phys;
|
|
+ acpi_size size;
|
|
+ long unsigned int refcount;
|
|
+};
|
|
+
|
|
+struct acpi_hp_work {
|
|
+ struct work_struct work;
|
|
+ struct acpi_device *adev;
|
|
+ u32 src;
|
|
+};
|
|
+
|
|
+struct acpi_pld_info {
|
|
+ u8 revision;
|
|
+ u8 ignore_color;
|
|
+ u8 red;
|
|
+ u8 green;
|
|
+ u8 blue;
|
|
+ u16 width;
|
|
+ u16 height;
|
|
+ u8 user_visible;
|
|
+ u8 dock;
|
|
+ u8 lid;
|
|
+ u8 panel;
|
|
+ u8 vertical_position;
|
|
+ u8 horizontal_position;
|
|
+ u8 shape;
|
|
+ u8 group_orientation;
|
|
+ u8 group_token;
|
|
+ u8 group_position;
|
|
+ u8 bay;
|
|
+ u8 ejectable;
|
|
+ u8 ospm_eject_required;
|
|
+ u8 cabinet_number;
|
|
+ u8 card_cage_number;
|
|
+ u8 reference;
|
|
+ u8 rotation;
|
|
+ u8 order;
|
|
+ u8 reserved;
|
|
+ u16 vertical_offset;
|
|
+ u16 horizontal_offset;
|
|
+};
|
|
+
|
|
+struct acpi_handle_list {
|
|
+ u32 count;
|
|
+ acpi_handle handles[10];
|
|
+};
|
|
+
|
|
+struct acpi_device_bus_id {
|
|
+ const char *bus_id;
|
|
+ unsigned int instance_no;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct acpi_dev_match_info {
|
|
+ const char *dev_name;
|
|
+ struct acpi_device_id hid[2];
|
|
+ const char *uid;
|
|
+ s64 hrv;
|
|
+};
|
|
+
|
|
+struct nvs_region {
|
|
+ __u64 phys_start;
|
|
+ __u64 size;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct nvs_page {
|
|
+ long unsigned int phys_start;
|
|
+ unsigned int size;
|
|
+ void *kaddr;
|
|
+ void *data;
|
|
+ bool unmap;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+typedef u32 acpi_event_status;
|
|
+
|
|
+struct lpi_device_info {
|
|
+ char *name;
|
|
+ int enabled;
|
|
+ union acpi_object *package;
|
|
+};
|
|
+
|
|
+struct lpi_device_constraint {
|
|
+ int uid;
|
|
+ int min_dstate;
|
|
+ int function_states;
|
|
+};
|
|
+
|
|
+struct lpi_constraints {
|
|
+ acpi_handle handle;
|
|
+ int min_dstate;
|
|
+};
|
|
+
|
|
+struct acpi_hardware_id {
|
|
+ struct list_head list;
|
|
+ const char *id;
|
|
+};
|
|
+
|
|
+struct acpi_data_node {
|
|
+ const char *name;
|
|
+ acpi_handle handle;
|
|
+ struct fwnode_handle fwnode;
|
|
+ struct fwnode_handle *parent;
|
|
+ struct acpi_device_data data;
|
|
+ struct list_head sibling;
|
|
+ struct kobject kobj;
|
|
+ struct completion kobj_done;
|
|
+};
|
|
+
|
|
+struct acpi_data_node_attr {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct acpi_data_node *, char *);
|
|
+ ssize_t (*store)(struct acpi_data_node *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct pm_domain_data {
|
|
+ struct list_head list_node;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct dev_power_governor {
|
|
+ bool (*power_down_ok)(struct dev_pm_domain *);
|
|
+ bool (*suspend_ok)(struct device *);
|
|
+};
|
|
+
|
|
+struct acpi_device_physical_node {
|
|
+ unsigned int node_id;
|
|
+ struct list_head node;
|
|
+ struct device *dev;
|
|
+ bool put_online: 1;
|
|
+};
|
|
+
|
|
+enum acpi_bus_device_type {
|
|
+ ACPI_BUS_TYPE_DEVICE = 0,
|
|
+ ACPI_BUS_TYPE_POWER = 1,
|
|
+ ACPI_BUS_TYPE_PROCESSOR = 2,
|
|
+ ACPI_BUS_TYPE_THERMAL = 3,
|
|
+ ACPI_BUS_TYPE_POWER_BUTTON = 4,
|
|
+ ACPI_BUS_TYPE_SLEEP_BUTTON = 5,
|
|
+ ACPI_BUS_TYPE_ECDT_EC = 6,
|
|
+ ACPI_BUS_DEVICE_TYPE_COUNT = 7,
|
|
+};
|
|
+
|
|
+struct acpi_osc_context {
|
|
+ char *uuid_str;
|
|
+ int rev;
|
|
+ struct acpi_buffer cap;
|
|
+ struct acpi_buffer ret;
|
|
+};
|
|
+
|
|
+struct acpi_pnp_device_id {
|
|
+ u32 length;
|
|
+ char *string;
|
|
+};
|
|
+
|
|
+struct acpi_pnp_device_id_list {
|
|
+ u32 count;
|
|
+ u32 list_size;
|
|
+ struct acpi_pnp_device_id ids[1];
|
|
+};
|
|
+
|
|
+struct acpi_device_info {
|
|
+ u32 info_size;
|
|
+ u32 name;
|
|
+ acpi_object_type type;
|
|
+ u8 param_count;
|
|
+ u16 valid;
|
|
+ u8 flags;
|
|
+ u8 highest_dstates[4];
|
|
+ u8 lowest_dstates[5];
|
|
+ u64 address;
|
|
+ struct acpi_pnp_device_id hardware_id;
|
|
+ struct acpi_pnp_device_id unique_id;
|
|
+ struct acpi_pnp_device_id class_code;
|
|
+ struct acpi_pnp_device_id_list compatible_id_list;
|
|
+};
|
|
+
|
|
+struct acpi_table_spcr {
|
|
+ struct acpi_table_header header;
|
|
+ u8 interface_type;
|
|
+ u8 reserved[3];
|
|
+ struct acpi_generic_address serial_port;
|
|
+ u8 interrupt_type;
|
|
+ u8 pc_interrupt;
|
|
+ u32 interrupt;
|
|
+ u8 baud_rate;
|
|
+ u8 parity;
|
|
+ u8 stop_bits;
|
|
+ u8 flow_control;
|
|
+ u8 terminal_type;
|
|
+ u8 reserved1;
|
|
+ u16 pci_device_id;
|
|
+ u16 pci_vendor_id;
|
|
+ u8 pci_bus;
|
|
+ u8 pci_device;
|
|
+ u8 pci_function;
|
|
+ u32 pci_flags;
|
|
+ u8 pci_segment;
|
|
+ u32 reserved2;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_table_stao {
|
|
+ struct acpi_table_header header;
|
|
+ u8 ignore_uart;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum acpi_reconfig_event {
|
|
+ ACPI_RECONFIG_DEVICE_ADD = 0,
|
|
+ ACPI_RECONFIG_DEVICE_REMOVE = 1,
|
|
+};
|
|
+
|
|
+struct acpi_probe_entry;
|
|
+
|
|
+typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *, struct acpi_probe_entry *);
|
|
+
|
|
+struct acpi_probe_entry {
|
|
+ __u8 id[5];
|
|
+ __u8 type;
|
|
+ acpi_probe_entry_validate_subtbl subtable_valid;
|
|
+ union {
|
|
+ acpi_tbl_table_handler probe_table;
|
|
+ acpi_tbl_entry_handler probe_subtbl;
|
|
+ };
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+struct acpi_dep_data {
|
|
+ struct list_head node;
|
|
+ acpi_handle master;
|
|
+ acpi_handle slave;
|
|
+};
|
|
+
|
|
+struct acpi_table_events_work {
|
|
+ struct work_struct work;
|
|
+ void *table;
|
|
+ u32 event;
|
|
+};
|
|
+
|
|
+struct resource_win {
|
|
+ struct resource res;
|
|
+ resource_size_t offset;
|
|
+};
|
|
+
|
|
+struct res_proc_context {
|
|
+ struct list_head *list;
|
|
+ int (*preproc)(struct acpi_resource *, void *);
|
|
+ void *preproc_data;
|
|
+ int count;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+struct acpi_table_ecdt {
|
|
+ struct acpi_table_header header;
|
|
+ struct acpi_generic_address control;
|
|
+ struct acpi_generic_address data;
|
|
+ u32 uid;
|
|
+ u8 gpe;
|
|
+ u8 id[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct transaction {
|
|
+ const u8 *wdata;
|
|
+ u8 *rdata;
|
|
+ short unsigned int irq_count;
|
|
+ u8 command;
|
|
+ u8 wi;
|
|
+ u8 ri;
|
|
+ u8 wlen;
|
|
+ u8 rlen;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+typedef int (*acpi_ec_query_func)(void *);
|
|
+
|
|
+enum ec_command {
|
|
+ ACPI_EC_COMMAND_READ = 128,
|
|
+ ACPI_EC_COMMAND_WRITE = 129,
|
|
+ ACPI_EC_BURST_ENABLE = 130,
|
|
+ ACPI_EC_BURST_DISABLE = 131,
|
|
+ ACPI_EC_COMMAND_QUERY = 132,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ EC_FLAGS_QUERY_ENABLED = 0,
|
|
+ EC_FLAGS_QUERY_PENDING = 1,
|
|
+ EC_FLAGS_QUERY_GUARDING = 2,
|
|
+ EC_FLAGS_GPE_HANDLER_INSTALLED = 3,
|
|
+ EC_FLAGS_EC_HANDLER_INSTALLED = 4,
|
|
+ EC_FLAGS_EVT_HANDLER_INSTALLED = 5,
|
|
+ EC_FLAGS_STARTED = 6,
|
|
+ EC_FLAGS_STOPPED = 7,
|
|
+ EC_FLAGS_GPE_MASKED = 8,
|
|
+};
|
|
+
|
|
+struct acpi_ec_query_handler {
|
|
+ struct list_head node;
|
|
+ acpi_ec_query_func func;
|
|
+ acpi_handle handle;
|
|
+ void *data;
|
|
+ u8 query_bit;
|
|
+ struct kref kref;
|
|
+};
|
|
+
|
|
+struct acpi_ec_query {
|
|
+ struct transaction transaction;
|
|
+ struct work_struct work;
|
|
+ struct acpi_ec_query_handler *handler;
|
|
+};
|
|
+
|
|
+struct dock_station {
|
|
+ acpi_handle handle;
|
|
+ long unsigned int last_dock_time;
|
|
+ u32 flags;
|
|
+ struct list_head dependent_devices;
|
|
+ struct list_head sibling;
|
|
+ struct platform_device *dock_device;
|
|
+};
|
|
+
|
|
+struct dock_dependent_device {
|
|
+ struct list_head list;
|
|
+ struct acpi_device *adev;
|
|
+};
|
|
+
|
|
+enum dock_callback_type {
|
|
+ DOCK_CALL_HANDLER = 0,
|
|
+ DOCK_CALL_FIXUP = 1,
|
|
+ DOCK_CALL_UEVENT = 2,
|
|
+};
|
|
+
|
|
+struct acpi_pci_root_ops;
|
|
+
|
|
+struct acpi_pci_root_info {
|
|
+ struct acpi_pci_root *root;
|
|
+ struct acpi_device *bridge;
|
|
+ struct acpi_pci_root_ops *ops;
|
|
+ struct list_head resources;
|
|
+ char name[16];
|
|
+};
|
|
+
|
|
+struct acpi_pci_root_ops {
|
|
+ struct pci_ops *pci_ops;
|
|
+ int (*init_info)(struct acpi_pci_root_info *);
|
|
+ void (*release_info)(struct acpi_pci_root_info *);
|
|
+ int (*prepare_resources)(struct acpi_pci_root_info *);
|
|
+};
|
|
+
|
|
+struct pci_osc_bit_struct {
|
|
+ u32 bit;
|
|
+ char *desc;
|
|
+};
|
|
+
|
|
+struct acpi_handle_node {
|
|
+ struct list_head node;
|
|
+ acpi_handle handle;
|
|
+};
|
|
+
|
|
+struct acpi_pci_link_irq {
|
|
+ u32 active;
|
|
+ u8 triggering;
|
|
+ u8 polarity;
|
|
+ u8 resource_type;
|
|
+ u8 possible_count;
|
|
+ u32 possible[16];
|
|
+ u8 initialized: 1;
|
|
+ u8 reserved: 7;
|
|
+};
|
|
+
|
|
+struct acpi_pci_link {
|
|
+ struct list_head list;
|
|
+ struct acpi_device *device;
|
|
+ struct acpi_pci_link_irq irq;
|
|
+ int refcnt;
|
|
+};
|
|
+
|
|
+struct acpi_pci_routing_table {
|
|
+ u32 length;
|
|
+ u32 pin;
|
|
+ u64 address;
|
|
+ u32 source_index;
|
|
+ char source[4];
|
|
+};
|
|
+
|
|
+struct acpi_prt_entry {
|
|
+ struct acpi_pci_id id;
|
|
+ u8 pin;
|
|
+ acpi_handle link;
|
|
+ u32 index;
|
|
+};
|
|
+
|
|
+struct prt_quirk {
|
|
+ const struct dmi_system_id *system;
|
|
+ unsigned int segment;
|
|
+ unsigned int bus;
|
|
+ unsigned int device;
|
|
+ unsigned char pin;
|
|
+ const char *source;
|
|
+ const char *actual_source;
|
|
+};
|
|
+
|
|
+struct clk_core;
|
|
+
|
|
+struct clk_init_data;
|
|
+
|
|
+struct clk_hw {
|
|
+ struct clk_core *core;
|
|
+ struct clk *clk;
|
|
+ const struct clk_init_data *init;
|
|
+};
|
|
+
|
|
+struct clk_rate_request {
|
|
+ long unsigned int rate;
|
|
+ long unsigned int min_rate;
|
|
+ long unsigned int max_rate;
|
|
+ long unsigned int best_parent_rate;
|
|
+ struct clk_hw *best_parent_hw;
|
|
+};
|
|
+
|
|
+struct clk_duty {
|
|
+ unsigned int num;
|
|
+ unsigned int den;
|
|
+};
|
|
+
|
|
+struct clk_ops {
|
|
+ int (*prepare)(struct clk_hw *);
|
|
+ void (*unprepare)(struct clk_hw *);
|
|
+ int (*is_prepared)(struct clk_hw *);
|
|
+ void (*unprepare_unused)(struct clk_hw *);
|
|
+ int (*enable)(struct clk_hw *);
|
|
+ void (*disable)(struct clk_hw *);
|
|
+ int (*is_enabled)(struct clk_hw *);
|
|
+ void (*disable_unused)(struct clk_hw *);
|
|
+ long unsigned int (*recalc_rate)(struct clk_hw *, long unsigned int);
|
|
+ long int (*round_rate)(struct clk_hw *, long unsigned int, long unsigned int *);
|
|
+ int (*determine_rate)(struct clk_hw *, struct clk_rate_request *);
|
|
+ int (*set_parent)(struct clk_hw *, u8);
|
|
+ u8 (*get_parent)(struct clk_hw *);
|
|
+ int (*set_rate)(struct clk_hw *, long unsigned int, long unsigned int);
|
|
+ int (*set_rate_and_parent)(struct clk_hw *, long unsigned int, long unsigned int, u8);
|
|
+ long unsigned int (*recalc_accuracy)(struct clk_hw *, long unsigned int);
|
|
+ int (*get_phase)(struct clk_hw *);
|
|
+ int (*set_phase)(struct clk_hw *, int);
|
|
+ int (*get_duty_cycle)(struct clk_hw *, struct clk_duty *);
|
|
+ int (*set_duty_cycle)(struct clk_hw *, struct clk_duty *);
|
|
+ void (*init)(struct clk_hw *);
|
|
+ void (*debug_init)(struct clk_hw *, struct dentry *);
|
|
+};
|
|
+
|
|
+struct clk_init_data {
|
|
+ const char *name;
|
|
+ const struct clk_ops *ops;
|
|
+ const char * const *parent_names;
|
|
+ u8 num_parents;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct lpss_clk_data {
|
|
+ const char *name;
|
|
+ struct clk *clk;
|
|
+};
|
|
+
|
|
+struct lpss_private_data;
|
|
+
|
|
+struct lpss_device_desc {
|
|
+ unsigned int flags;
|
|
+ const char *clk_con_id;
|
|
+ unsigned int prv_offset;
|
|
+ size_t prv_size_override;
|
|
+ struct property_entry *properties;
|
|
+ void (*setup)(struct lpss_private_data *);
|
|
+ bool resume_from_noirq;
|
|
+};
|
|
+
|
|
+struct lpss_private_data {
|
|
+ struct acpi_device *adev;
|
|
+ void *mmio_base;
|
|
+ resource_size_t mmio_size;
|
|
+ unsigned int fixed_clk_rate;
|
|
+ struct clk *clk;
|
|
+ const struct lpss_device_desc *dev_desc;
|
|
+ u32 prv_reg_ctx[9];
|
|
+};
|
|
+
|
|
+struct lpss_device_links {
|
|
+ const char *supplier_hid;
|
|
+ const char *supplier_uid;
|
|
+ const char *consumer_hid;
|
|
+ const char *consumer_uid;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct hid_uid {
|
|
+ const char *hid;
|
|
+ const char *uid;
|
|
+};
|
|
+
|
|
+struct st_clk_data {
|
|
+ void *base;
|
|
+};
|
|
+
|
|
+struct apd_private_data;
|
|
+
|
|
+struct apd_device_desc {
|
|
+ unsigned int flags;
|
|
+ unsigned int fixed_clk_rate;
|
|
+ struct property_entry *properties;
|
|
+ int (*setup)(struct apd_private_data *);
|
|
+};
|
|
+
|
|
+struct apd_private_data {
|
|
+ struct clk *clk;
|
|
+ struct acpi_device *adev;
|
|
+ const struct apd_device_desc *dev_desc;
|
|
+};
|
|
+
|
|
+struct acpi_power_resource {
|
|
+ struct acpi_device device;
|
|
+ struct list_head list_node;
|
|
+ char *name;
|
|
+ u32 system_level;
|
|
+ u32 order;
|
|
+ unsigned int ref_count;
|
|
+ bool wakeup_enabled;
|
|
+ struct mutex resource_lock;
|
|
+};
|
|
+
|
|
+struct acpi_power_resource_entry {
|
|
+ struct list_head node;
|
|
+ struct acpi_power_resource *resource;
|
|
+};
|
|
+
|
|
+struct acpi_bus_event {
|
|
+ struct list_head node;
|
|
+ acpi_device_class device_class;
|
|
+ acpi_bus_id bus_id;
|
|
+ u32 type;
|
|
+ u32 data;
|
|
+};
|
|
+
|
|
+struct acpi_genl_event {
|
|
+ acpi_device_class device_class;
|
|
+ char bus_id[15];
|
|
+ u32 type;
|
|
+ u32 data;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ACPI_GENL_ATTR_UNSPEC = 0,
|
|
+ ACPI_GENL_ATTR_EVENT = 1,
|
|
+ __ACPI_GENL_ATTR_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ACPI_GENL_CMD_UNSPEC = 0,
|
|
+ ACPI_GENL_CMD_EVENT = 1,
|
|
+ __ACPI_GENL_CMD_MAX = 2,
|
|
+};
|
|
+
|
|
+struct acpi_table_bert {
|
|
+ struct acpi_table_header header;
|
|
+ u32 region_length;
|
|
+ u64 address;
|
|
+};
|
|
+
|
|
+struct acpi_table_attr {
|
|
+ struct bin_attribute attr;
|
|
+ char name[4];
|
|
+ int instance;
|
|
+ char filename[8];
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct acpi_data_attr {
|
|
+ struct bin_attribute attr;
|
|
+ u64 addr;
|
|
+};
|
|
+
|
|
+struct acpi_data_obj {
|
|
+ char *name;
|
|
+ int (*fn)(void *, struct acpi_data_attr *);
|
|
+};
|
|
+
|
|
+struct event_counter {
|
|
+ u32 count;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct always_present_id {
|
|
+ struct acpi_device_id hid[2];
|
|
+ struct x86_cpu_id cpu_ids[2];
|
|
+ struct dmi_system_id dmi_ids[2];
|
|
+ const char *uid;
|
|
+};
|
|
+
|
|
+struct acpi_table_slit {
|
|
+ struct acpi_table_header header;
|
|
+ u64 locality_count;
|
|
+ u8 entry[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_table_srat {
|
|
+ struct acpi_table_header header;
|
|
+ u32 table_revision;
|
|
+ u64 reserved;
|
|
+};
|
|
+
|
|
+enum acpi_srat_type {
|
|
+ ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
|
|
+ ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
|
|
+ ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
|
|
+ ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
|
|
+ ACPI_SRAT_TYPE_GIC_ITS_AFFINITY = 4,
|
|
+ ACPI_SRAT_TYPE_RESERVED = 5,
|
|
+};
|
|
+
|
|
+struct acpi_srat_mem_affinity {
|
|
+ struct acpi_subtable_header header;
|
|
+ u32 proximity_domain;
|
|
+ u16 reserved;
|
|
+ u64 base_address;
|
|
+ u64 length;
|
|
+ u32 reserved1;
|
|
+ u32 flags;
|
|
+ u64 reserved2;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_srat_gicc_affinity {
|
|
+ struct acpi_subtable_header header;
|
|
+ u32 proximity_domain;
|
|
+ u32 acpi_processor_uid;
|
|
+ u32 flags;
|
|
+ u32 clock_domain;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_lpat {
|
|
+ int temp;
|
|
+ int raw;
|
|
+};
|
|
+
|
|
+struct acpi_lpat_conversion_table {
|
|
+ struct acpi_lpat *lpat;
|
|
+ int lpat_count;
|
|
+};
|
|
+
|
|
+struct acpi_table_lpit {
|
|
+ struct acpi_table_header header;
|
|
+};
|
|
+
|
|
+struct acpi_lpit_header {
|
|
+ u32 type;
|
|
+ u32 length;
|
|
+ u16 unique_id;
|
|
+ u16 reserved;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct acpi_lpit_native {
|
|
+ struct acpi_lpit_header header;
|
|
+ struct acpi_generic_address entry_trigger;
|
|
+ u32 residency;
|
|
+ u32 latency;
|
|
+ struct acpi_generic_address residency_counter;
|
|
+ u64 counter_frequency;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct lpit_residency_info {
|
|
+ struct acpi_generic_address gaddr;
|
|
+ u64 frequency;
|
|
+ void *iomem_addr;
|
|
+};
|
|
+
|
|
+struct acpi_table_wdat {
|
|
+ struct acpi_table_header header;
|
|
+ u32 header_length;
|
|
+ u16 pci_segment;
|
|
+ u8 pci_bus;
|
|
+ u8 pci_device;
|
|
+ u8 pci_function;
|
|
+ u8 reserved[3];
|
|
+ u32 timer_period;
|
|
+ u32 max_count;
|
|
+ u32 min_count;
|
|
+ u8 flags;
|
|
+ u8 reserved2[3];
|
|
+ u32 entries;
|
|
+};
|
|
+
|
|
+struct acpi_wdat_entry {
|
|
+ u8 action;
|
|
+ u8 instruction;
|
|
+ u16 reserved;
|
|
+ struct acpi_generic_address register_region;
|
|
+ u32 value;
|
|
+ u32 mask;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum {
|
|
+ ACPI_REFCLASS_LOCAL = 0,
|
|
+ ACPI_REFCLASS_ARG = 1,
|
|
+ ACPI_REFCLASS_REFOF = 2,
|
|
+ ACPI_REFCLASS_INDEX = 3,
|
|
+ ACPI_REFCLASS_TABLE = 4,
|
|
+ ACPI_REFCLASS_NAME = 5,
|
|
+ ACPI_REFCLASS_DEBUG = 6,
|
|
+ ACPI_REFCLASS_MAX = 6,
|
|
+};
|
|
+
|
|
+struct acpi_common_descriptor {
|
|
+ void *common_pointer;
|
|
+ u8 descriptor_type;
|
|
+};
|
|
+
|
|
+union acpi_descriptor {
|
|
+ struct acpi_common_descriptor common;
|
|
+ union acpi_operand_object object;
|
|
+ struct acpi_namespace_node node;
|
|
+ union acpi_parse_object op;
|
|
+};
|
|
+
|
|
+struct acpi_create_field_info {
|
|
+ struct acpi_namespace_node *region_node;
|
|
+ struct acpi_namespace_node *field_node;
|
|
+ struct acpi_namespace_node *register_node;
|
|
+ struct acpi_namespace_node *data_register_node;
|
|
+ struct acpi_namespace_node *connection_node;
|
|
+ u8 *resource_buffer;
|
|
+ u32 bank_value;
|
|
+ u32 field_bit_position;
|
|
+ u32 field_bit_length;
|
|
+ u16 resource_length;
|
|
+ u16 pin_number_index;
|
|
+ u8 field_flags;
|
|
+ u8 attribute;
|
|
+ u8 field_type;
|
|
+ u8 access_length;
|
|
+};
|
|
+
|
|
+struct acpi_init_walk_info {
|
|
+ u32 table_index;
|
|
+ u32 object_count;
|
|
+ u32 method_count;
|
|
+ u32 serial_method_count;
|
|
+ u32 non_serial_method_count;
|
|
+ u32 serialized_method_count;
|
|
+ u32 device_count;
|
|
+ u32 op_region_count;
|
|
+ u32 field_count;
|
|
+ u32 buffer_count;
|
|
+ u32 package_count;
|
|
+ u32 op_region_init;
|
|
+ u32 field_init;
|
|
+ u32 buffer_init;
|
|
+ u32 package_init;
|
|
+ acpi_owner_id owner_id;
|
|
+};
|
|
+
|
|
+struct acpi_name_info {
|
|
+ char name[4];
|
|
+ u16 argument_list;
|
|
+ u8 expected_btypes;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_package_info {
|
|
+ u8 type;
|
|
+ u8 object_type1;
|
|
+ u8 count1;
|
|
+ u8 object_type2;
|
|
+ u8 count2;
|
|
+ u16 reserved;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_package_info2 {
|
|
+ u8 type;
|
|
+ u8 count;
|
|
+ u8 object_type[4];
|
|
+ u8 reserved;
|
|
+};
|
|
+
|
|
+struct acpi_package_info3 {
|
|
+ u8 type;
|
|
+ u8 count;
|
|
+ u8 object_type[2];
|
|
+ u8 tail_object_type;
|
|
+ u16 reserved;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_package_info4 {
|
|
+ u8 type;
|
|
+ u8 object_type1;
|
|
+ u8 count1;
|
|
+ u8 sub_object_types;
|
|
+ u8 pkg_count;
|
|
+ u16 reserved;
|
|
+} __attribute__((packed));
|
|
+
|
|
+union acpi_predefined_info {
|
|
+ struct acpi_name_info info;
|
|
+ struct acpi_package_info ret_info;
|
|
+ struct acpi_package_info2 ret_info2;
|
|
+ struct acpi_package_info3 ret_info3;
|
|
+ struct acpi_package_info4 ret_info4;
|
|
+};
|
|
+
|
|
+struct acpi_evaluate_info {
|
|
+ struct acpi_namespace_node *prefix_node;
|
|
+ const char *relative_pathname;
|
|
+ union acpi_operand_object **parameters;
|
|
+ struct acpi_namespace_node *node;
|
|
+ union acpi_operand_object *obj_desc;
|
|
+ char *full_pathname;
|
|
+ const union acpi_predefined_info *predefined;
|
|
+ union acpi_operand_object *return_object;
|
|
+ union acpi_operand_object *parent_package;
|
|
+ u32 return_flags;
|
|
+ u32 return_btype;
|
|
+ u16 param_count;
|
|
+ u8 pass_number;
|
|
+ u8 return_object_type;
|
|
+ u8 node_flags;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ AML_FIELD_ACCESS_ANY = 0,
|
|
+ AML_FIELD_ACCESS_BYTE = 1,
|
|
+ AML_FIELD_ACCESS_WORD = 2,
|
|
+ AML_FIELD_ACCESS_DWORD = 3,
|
|
+ AML_FIELD_ACCESS_QWORD = 4,
|
|
+ AML_FIELD_ACCESS_BUFFER = 5,
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ ACPI_IMODE_LOAD_PASS1 = 1,
|
|
+ ACPI_IMODE_LOAD_PASS2 = 2,
|
|
+ ACPI_IMODE_EXECUTE = 3,
|
|
+} acpi_interpreter_mode;
|
|
+
|
|
+typedef acpi_status (*acpi_execute_op)(struct acpi_walk_state *);
|
|
+
|
|
+struct acpi_gpe_walk_info {
|
|
+ struct acpi_namespace_node *gpe_device;
|
|
+ struct acpi_gpe_block_info *gpe_block;
|
|
+ u16 count;
|
|
+ acpi_owner_id owner_id;
|
|
+ u8 execute_by_owner_id;
|
|
+};
|
|
+
|
|
+struct acpi_gpe_device_info {
|
|
+ u32 index;
|
|
+ u32 next_block_base_index;
|
|
+ acpi_status status;
|
|
+ struct acpi_namespace_node *gpe_device;
|
|
+};
|
|
+
|
|
+typedef acpi_status (*acpi_gpe_callback)(struct acpi_gpe_xrupt_info *, struct acpi_gpe_block_info *, void *);
|
|
+
|
|
+struct acpi_reg_walk_info {
|
|
+ u32 function;
|
|
+ u32 reg_run_count;
|
|
+ acpi_adr_space_type space_id;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ AML_FIELD_ATTRIB_QUICK = 2,
|
|
+ AML_FIELD_ATTRIB_SEND_RCV = 4,
|
|
+ AML_FIELD_ATTRIB_BYTE = 6,
|
|
+ AML_FIELD_ATTRIB_WORD = 8,
|
|
+ AML_FIELD_ATTRIB_BLOCK = 10,
|
|
+ AML_FIELD_ATTRIB_MULTIBYTE = 11,
|
|
+ AML_FIELD_ATTRIB_WORD_CALL = 12,
|
|
+ AML_FIELD_ATTRIB_BLOCK_CALL = 13,
|
|
+ AML_FIELD_ATTRIB_RAW_BYTES = 14,
|
|
+ AML_FIELD_ATTRIB_RAW_PROCESS = 15,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ AML_FIELD_UPDATE_PRESERVE = 0,
|
|
+ AML_FIELD_UPDATE_WRITE_AS_ONES = 32,
|
|
+ AML_FIELD_UPDATE_WRITE_AS_ZEROS = 64,
|
|
+};
|
|
+
|
|
+struct acpi_signal_fatal_info {
|
|
+ u32 type;
|
|
+ u32 code;
|
|
+ u32 argument;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MATCH_MTR = 0,
|
|
+ MATCH_MEQ = 1,
|
|
+ MATCH_MLE = 2,
|
|
+ MATCH_MLT = 3,
|
|
+ MATCH_MGE = 4,
|
|
+ MATCH_MGT = 5,
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ ACPI_TRACE_AML_METHOD = 0,
|
|
+ ACPI_TRACE_AML_OPCODE = 1,
|
|
+ ACPI_TRACE_AML_REGION = 2,
|
|
+} acpi_trace_event_type;
|
|
+
|
|
+struct acpi_pci_device {
|
|
+ acpi_handle device;
|
|
+ struct acpi_pci_device *next;
|
|
+};
|
|
+
|
|
+struct acpi_port_info {
|
|
+ char *name;
|
|
+ u16 start;
|
|
+ u16 end;
|
|
+ u8 osi_dependency;
|
|
+};
|
|
+
|
|
+typedef acpi_status (*acpi_sleep_function)(u8);
|
|
+
|
|
+struct acpi_sleep_functions {
|
|
+ acpi_sleep_function legacy_function;
|
|
+ acpi_sleep_function extended_function;
|
|
+};
|
|
+
|
|
+struct acpi_device_walk_info {
|
|
+ struct acpi_table_desc *table_desc;
|
|
+ struct acpi_evaluate_info *evaluate_info;
|
|
+ u32 device_count;
|
|
+ u32 num_STA;
|
|
+ u32 num_INI;
|
|
+};
|
|
+
|
|
+enum acpi_return_package_types {
|
|
+ ACPI_PTYPE1_FIXED = 1,
|
|
+ ACPI_PTYPE1_VAR = 2,
|
|
+ ACPI_PTYPE1_OPTION = 3,
|
|
+ ACPI_PTYPE2 = 4,
|
|
+ ACPI_PTYPE2_COUNT = 5,
|
|
+ ACPI_PTYPE2_PKG_COUNT = 6,
|
|
+ ACPI_PTYPE2_FIXED = 7,
|
|
+ ACPI_PTYPE2_MIN = 8,
|
|
+ ACPI_PTYPE2_REV_FIXED = 9,
|
|
+ ACPI_PTYPE2_FIX_VAR = 10,
|
|
+ ACPI_PTYPE2_VAR_VAR = 11,
|
|
+ ACPI_PTYPE2_UUID_PAIR = 12,
|
|
+ ACPI_PTYPE_CUSTOM = 13,
|
|
+};
|
|
+
|
|
+typedef acpi_status (*acpi_object_converter)(struct acpi_namespace_node *, union acpi_operand_object *, union acpi_operand_object **);
|
|
+
|
|
+struct acpi_simple_repair_info {
|
|
+ char name[4];
|
|
+ u32 unexpected_btypes;
|
|
+ u32 package_index;
|
|
+ acpi_object_converter object_converter;
|
|
+};
|
|
+
|
|
+typedef acpi_status (*acpi_repair_function)(struct acpi_evaluate_info *, union acpi_operand_object **);
|
|
+
|
|
+struct acpi_repair_info {
|
|
+ char name[4];
|
|
+ acpi_repair_function repair_function;
|
|
+};
|
|
+
|
|
+struct acpi_namestring_info {
|
|
+ const char *external_name;
|
|
+ const char *next_external_char;
|
|
+ char *internal_name;
|
|
+ u32 length;
|
|
+ u32 num_segments;
|
|
+ u32 num_carats;
|
|
+ u8 fully_qualified;
|
|
+};
|
|
+
|
|
+typedef acpi_status (*acpi_walk_callback)(acpi_handle, u32, void *, void **);
|
|
+
|
|
+struct acpi_get_devices_info {
|
|
+ acpi_walk_callback user_function;
|
|
+ void *context;
|
|
+ const char *hid;
|
|
+};
|
|
+
|
|
+struct aml_resource_small_header {
|
|
+ u8 descriptor_type;
|
|
+};
|
|
+
|
|
+struct aml_resource_irq {
|
|
+ u8 descriptor_type;
|
|
+ u16 irq_mask;
|
|
+ u8 flags;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_dma {
|
|
+ u8 descriptor_type;
|
|
+ u8 dma_channel_mask;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+struct aml_resource_start_dependent {
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+struct aml_resource_end_dependent {
|
|
+ u8 descriptor_type;
|
|
+};
|
|
+
|
|
+struct aml_resource_io {
|
|
+ u8 descriptor_type;
|
|
+ u8 flags;
|
|
+ u16 minimum;
|
|
+ u16 maximum;
|
|
+ u8 alignment;
|
|
+ u8 address_length;
|
|
+};
|
|
+
|
|
+struct aml_resource_fixed_io {
|
|
+ u8 descriptor_type;
|
|
+ u16 address;
|
|
+ u8 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_vendor_small {
|
|
+ u8 descriptor_type;
|
|
+};
|
|
+
|
|
+struct aml_resource_end_tag {
|
|
+ u8 descriptor_type;
|
|
+ u8 checksum;
|
|
+};
|
|
+
|
|
+struct aml_resource_fixed_dma {
|
|
+ u8 descriptor_type;
|
|
+ u16 request_lines;
|
|
+ u16 channels;
|
|
+ u8 width;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_large_header {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_memory24 {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 flags;
|
|
+ u16 minimum;
|
|
+ u16 maximum;
|
|
+ u16 alignment;
|
|
+ u16 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_vendor_large {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_memory32 {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 flags;
|
|
+ u32 minimum;
|
|
+ u32 maximum;
|
|
+ u32 alignment;
|
|
+ u32 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_fixed_memory32 {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 flags;
|
|
+ u32 address;
|
|
+ u32 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_address {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 resource_type;
|
|
+ u8 flags;
|
|
+ u8 specific_flags;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_extended_address64 {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 resource_type;
|
|
+ u8 flags;
|
|
+ u8 specific_flags;
|
|
+ u8 revision_ID;
|
|
+ u8 reserved;
|
|
+ u64 granularity;
|
|
+ u64 minimum;
|
|
+ u64 maximum;
|
|
+ u64 translation_offset;
|
|
+ u64 address_length;
|
|
+ u64 type_specific;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_address64 {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 resource_type;
|
|
+ u8 flags;
|
|
+ u8 specific_flags;
|
|
+ u64 granularity;
|
|
+ u64 minimum;
|
|
+ u64 maximum;
|
|
+ u64 translation_offset;
|
|
+ u64 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_address32 {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 resource_type;
|
|
+ u8 flags;
|
|
+ u8 specific_flags;
|
|
+ u32 granularity;
|
|
+ u32 minimum;
|
|
+ u32 maximum;
|
|
+ u32 translation_offset;
|
|
+ u32 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_address16 {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 resource_type;
|
|
+ u8 flags;
|
|
+ u8 specific_flags;
|
|
+ u16 granularity;
|
|
+ u16 minimum;
|
|
+ u16 maximum;
|
|
+ u16 translation_offset;
|
|
+ u16 address_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_extended_irq {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 flags;
|
|
+ u8 interrupt_count;
|
|
+ u32 interrupts[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_generic_register {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 address_space_id;
|
|
+ u8 bit_width;
|
|
+ u8 bit_offset;
|
|
+ u8 access_size;
|
|
+ u64 address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_gpio {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u8 connection_type;
|
|
+ u16 flags;
|
|
+ u16 int_flags;
|
|
+ u8 pin_config;
|
|
+ u16 drive_strength;
|
|
+ u16 debounce_timeout;
|
|
+ u16 pin_table_offset;
|
|
+ u8 res_source_index;
|
|
+ u16 res_source_offset;
|
|
+ u16 vendor_offset;
|
|
+ u16 vendor_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_common_serialbus {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u8 res_source_index;
|
|
+ u8 type;
|
|
+ u8 flags;
|
|
+ u16 type_specific_flags;
|
|
+ u8 type_revision_id;
|
|
+ u16 type_data_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_i2c_serialbus {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u8 res_source_index;
|
|
+ u8 type;
|
|
+ u8 flags;
|
|
+ u16 type_specific_flags;
|
|
+ u8 type_revision_id;
|
|
+ u16 type_data_length;
|
|
+ u32 connection_speed;
|
|
+ u16 slave_address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_spi_serialbus {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u8 res_source_index;
|
|
+ u8 type;
|
|
+ u8 flags;
|
|
+ u16 type_specific_flags;
|
|
+ u8 type_revision_id;
|
|
+ u16 type_data_length;
|
|
+ u32 connection_speed;
|
|
+ u8 data_bit_length;
|
|
+ u8 clock_phase;
|
|
+ u8 clock_polarity;
|
|
+ u16 device_selection;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_uart_serialbus {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u8 res_source_index;
|
|
+ u8 type;
|
|
+ u8 flags;
|
|
+ u16 type_specific_flags;
|
|
+ u8 type_revision_id;
|
|
+ u16 type_data_length;
|
|
+ u32 default_baud_rate;
|
|
+ u16 rx_fifo_size;
|
|
+ u16 tx_fifo_size;
|
|
+ u8 parity;
|
|
+ u8 lines_enabled;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_pin_function {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u16 flags;
|
|
+ u8 pin_config;
|
|
+ u16 function_number;
|
|
+ u16 pin_table_offset;
|
|
+ u8 res_source_index;
|
|
+ u16 res_source_offset;
|
|
+ u16 vendor_offset;
|
|
+ u16 vendor_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_pin_config {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u16 flags;
|
|
+ u8 pin_config_type;
|
|
+ u32 pin_config_value;
|
|
+ u16 pin_table_offset;
|
|
+ u8 res_source_index;
|
|
+ u16 res_source_offset;
|
|
+ u16 vendor_offset;
|
|
+ u16 vendor_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_pin_group {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u16 flags;
|
|
+ u16 pin_table_offset;
|
|
+ u16 label_offset;
|
|
+ u16 vendor_offset;
|
|
+ u16 vendor_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_pin_group_function {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u16 flags;
|
|
+ u16 function_number;
|
|
+ u8 res_source_index;
|
|
+ u16 res_source_offset;
|
|
+ u16 res_source_label_offset;
|
|
+ u16 vendor_offset;
|
|
+ u16 vendor_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct aml_resource_pin_group_config {
|
|
+ u8 descriptor_type;
|
|
+ u16 resource_length;
|
|
+ u8 revision_id;
|
|
+ u16 flags;
|
|
+ u8 pin_config_type;
|
|
+ u32 pin_config_value;
|
|
+ u8 res_source_index;
|
|
+ u16 res_source_offset;
|
|
+ u16 res_source_label_offset;
|
|
+ u16 vendor_offset;
|
|
+ u16 vendor_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+union aml_resource {
|
|
+ u8 descriptor_type;
|
|
+ struct aml_resource_small_header small_header;
|
|
+ struct aml_resource_large_header large_header;
|
|
+ struct aml_resource_irq irq;
|
|
+ struct aml_resource_dma dma;
|
|
+ struct aml_resource_start_dependent start_dpf;
|
|
+ struct aml_resource_end_dependent end_dpf;
|
|
+ struct aml_resource_io io;
|
|
+ struct aml_resource_fixed_io fixed_io;
|
|
+ struct aml_resource_fixed_dma fixed_dma;
|
|
+ struct aml_resource_vendor_small vendor_small;
|
|
+ struct aml_resource_end_tag end_tag;
|
|
+ struct aml_resource_memory24 memory24;
|
|
+ struct aml_resource_generic_register generic_reg;
|
|
+ struct aml_resource_vendor_large vendor_large;
|
|
+ struct aml_resource_memory32 memory32;
|
|
+ struct aml_resource_fixed_memory32 fixed_memory32;
|
|
+ struct aml_resource_address16 address16;
|
|
+ struct aml_resource_address32 address32;
|
|
+ struct aml_resource_address64 address64;
|
|
+ struct aml_resource_extended_address64 ext_address64;
|
|
+ struct aml_resource_extended_irq extended_irq;
|
|
+ struct aml_resource_gpio gpio;
|
|
+ struct aml_resource_i2c_serialbus i2c_serial_bus;
|
|
+ struct aml_resource_spi_serialbus spi_serial_bus;
|
|
+ struct aml_resource_uart_serialbus uart_serial_bus;
|
|
+ struct aml_resource_common_serialbus common_serial_bus;
|
|
+ struct aml_resource_pin_function pin_function;
|
|
+ struct aml_resource_pin_config pin_config;
|
|
+ struct aml_resource_pin_group pin_group;
|
|
+ struct aml_resource_pin_group_function pin_group_function;
|
|
+ struct aml_resource_pin_group_config pin_group_config;
|
|
+ struct aml_resource_address address;
|
|
+ u32 dword_item;
|
|
+ u16 word_item;
|
|
+ u8 byte_item;
|
|
+};
|
|
+
|
|
+struct acpi_rsconvert_info {
|
|
+ u8 opcode;
|
|
+ u8 resource_offset;
|
|
+ u8 aml_offset;
|
|
+ u8 value;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ACPI_RSC_INITGET = 0,
|
|
+ ACPI_RSC_INITSET = 1,
|
|
+ ACPI_RSC_FLAGINIT = 2,
|
|
+ ACPI_RSC_1BITFLAG = 3,
|
|
+ ACPI_RSC_2BITFLAG = 4,
|
|
+ ACPI_RSC_3BITFLAG = 5,
|
|
+ ACPI_RSC_ADDRESS = 6,
|
|
+ ACPI_RSC_BITMASK = 7,
|
|
+ ACPI_RSC_BITMASK16 = 8,
|
|
+ ACPI_RSC_COUNT = 9,
|
|
+ ACPI_RSC_COUNT16 = 10,
|
|
+ ACPI_RSC_COUNT_GPIO_PIN = 11,
|
|
+ ACPI_RSC_COUNT_GPIO_RES = 12,
|
|
+ ACPI_RSC_COUNT_GPIO_VEN = 13,
|
|
+ ACPI_RSC_COUNT_SERIAL_RES = 14,
|
|
+ ACPI_RSC_COUNT_SERIAL_VEN = 15,
|
|
+ ACPI_RSC_DATA8 = 16,
|
|
+ ACPI_RSC_EXIT_EQ = 17,
|
|
+ ACPI_RSC_EXIT_LE = 18,
|
|
+ ACPI_RSC_EXIT_NE = 19,
|
|
+ ACPI_RSC_LENGTH = 20,
|
|
+ ACPI_RSC_MOVE_GPIO_PIN = 21,
|
|
+ ACPI_RSC_MOVE_GPIO_RES = 22,
|
|
+ ACPI_RSC_MOVE_SERIAL_RES = 23,
|
|
+ ACPI_RSC_MOVE_SERIAL_VEN = 24,
|
|
+ ACPI_RSC_MOVE8 = 25,
|
|
+ ACPI_RSC_MOVE16 = 26,
|
|
+ ACPI_RSC_MOVE32 = 27,
|
|
+ ACPI_RSC_MOVE64 = 28,
|
|
+ ACPI_RSC_SET8 = 29,
|
|
+ ACPI_RSC_SOURCE = 30,
|
|
+ ACPI_RSC_SOURCEX = 31,
|
|
+};
|
|
+
|
|
+typedef u16 acpi_rs_length;
|
|
+
|
|
+typedef u32 acpi_rsdesc_size;
|
|
+
|
|
+struct acpi_vendor_uuid {
|
|
+ u8 subtype;
|
|
+ u8 data[16];
|
|
+};
|
|
+
|
|
+typedef acpi_status (*acpi_walk_resource_callback)(struct acpi_resource *, void *);
|
|
+
|
|
+struct acpi_vendor_walk_info {
|
|
+ struct acpi_vendor_uuid *uuid;
|
|
+ struct acpi_buffer *buffer;
|
|
+ acpi_status status;
|
|
+};
|
|
+
|
|
+struct acpi_fadt_info {
|
|
+ const char *name;
|
|
+ u16 address64;
|
|
+ u16 address32;
|
|
+ u16 length;
|
|
+ u8 default_length;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+struct acpi_fadt_pm_info {
|
|
+ struct acpi_generic_address *target;
|
|
+ u16 source;
|
|
+ u8 register_num;
|
|
+};
|
|
+
|
|
+struct acpi_table_rsdp {
|
|
+ char signature[8];
|
|
+ u8 checksum;
|
|
+ char oem_id[6];
|
|
+ u8 revision;
|
|
+ u32 rsdt_physical_address;
|
|
+ u32 length;
|
|
+ u64 xsdt_physical_address;
|
|
+ u8 extended_checksum;
|
|
+ u8 reserved[3];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_pkg_info {
|
|
+ u8 *free_space;
|
|
+ acpi_size length;
|
|
+ u32 object_space;
|
|
+ u32 num_packages;
|
|
+};
|
|
+
|
|
+struct acpi_exception_info {
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+typedef acpi_status (*acpi_pkg_callback)(u8, union acpi_operand_object *, union acpi_generic_state *, void *);
|
|
+
|
|
+typedef u32 acpi_mutex_handle;
|
|
+
|
|
+typedef acpi_status (*acpi_walk_aml_callback)(u8 *, u32, u32, u8, void **);
|
|
+
|
|
+enum led_brightness {
|
|
+ LED_OFF = 0,
|
|
+ LED_ON = 1,
|
|
+ LED_HALF = 127,
|
|
+ LED_FULL = 255,
|
|
+};
|
|
+
|
|
+struct led_trigger;
|
|
+
|
|
+struct led_classdev {
|
|
+ const char *name;
|
|
+ enum led_brightness brightness;
|
|
+ enum led_brightness max_brightness;
|
|
+ int flags;
|
|
+ long unsigned int work_flags;
|
|
+ void (*brightness_set)(struct led_classdev *, enum led_brightness);
|
|
+ int (*brightness_set_blocking)(struct led_classdev *, enum led_brightness);
|
|
+ enum led_brightness (*brightness_get)(struct led_classdev *);
|
|
+ int (*blink_set)(struct led_classdev *, long unsigned int *, long unsigned int *);
|
|
+ struct device *dev;
|
|
+ const struct attribute_group **groups;
|
|
+ struct list_head node;
|
|
+ const char *default_trigger;
|
|
+ long unsigned int blink_delay_on;
|
|
+ long unsigned int blink_delay_off;
|
|
+ struct timer_list blink_timer;
|
|
+ int blink_brightness;
|
|
+ int new_blink_brightness;
|
|
+ void (*flash_resume)(struct led_classdev *);
|
|
+ struct work_struct set_brightness_work;
|
|
+ int delayed_set_value;
|
|
+ struct rw_semaphore trigger_lock;
|
|
+ struct led_trigger *trigger;
|
|
+ struct list_head trig_list;
|
|
+ void *trigger_data;
|
|
+ bool activated;
|
|
+ struct mutex led_access;
|
|
+};
|
|
+
|
|
+struct led_trigger {
|
|
+ const char *name;
|
|
+ int (*activate)(struct led_classdev *);
|
|
+ void (*deactivate)(struct led_classdev *);
|
|
+ rwlock_t leddev_list_lock;
|
|
+ struct list_head led_cdevs;
|
|
+ struct list_head next_trig;
|
|
+ const struct attribute_group **groups;
|
|
+};
|
|
+
|
|
+enum power_supply_property {
|
|
+ POWER_SUPPLY_PROP_STATUS = 0,
|
|
+ POWER_SUPPLY_PROP_CHARGE_TYPE = 1,
|
|
+ POWER_SUPPLY_PROP_HEALTH = 2,
|
|
+ POWER_SUPPLY_PROP_PRESENT = 3,
|
|
+ POWER_SUPPLY_PROP_ONLINE = 4,
|
|
+ POWER_SUPPLY_PROP_AUTHENTIC = 5,
|
|
+ POWER_SUPPLY_PROP_TECHNOLOGY = 6,
|
|
+ POWER_SUPPLY_PROP_CYCLE_COUNT = 7,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_MAX = 8,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_MIN = 9,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN = 10,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN = 11,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_NOW = 12,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_AVG = 13,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_OCV = 14,
|
|
+ POWER_SUPPLY_PROP_VOLTAGE_BOOT = 15,
|
|
+ POWER_SUPPLY_PROP_CURRENT_MAX = 16,
|
|
+ POWER_SUPPLY_PROP_CURRENT_NOW = 17,
|
|
+ POWER_SUPPLY_PROP_CURRENT_AVG = 18,
|
|
+ POWER_SUPPLY_PROP_CURRENT_BOOT = 19,
|
|
+ POWER_SUPPLY_PROP_POWER_NOW = 20,
|
|
+ POWER_SUPPLY_PROP_POWER_AVG = 21,
|
|
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN = 22,
|
|
+ POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN = 23,
|
|
+ POWER_SUPPLY_PROP_CHARGE_FULL = 24,
|
|
+ POWER_SUPPLY_PROP_CHARGE_EMPTY = 25,
|
|
+ POWER_SUPPLY_PROP_CHARGE_NOW = 26,
|
|
+ POWER_SUPPLY_PROP_CHARGE_AVG = 27,
|
|
+ POWER_SUPPLY_PROP_CHARGE_COUNTER = 28,
|
|
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT = 29,
|
|
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX = 30,
|
|
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE = 31,
|
|
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX = 32,
|
|
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT = 33,
|
|
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX = 34,
|
|
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT = 35,
|
|
+ POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN = 36,
|
|
+ POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN = 37,
|
|
+ POWER_SUPPLY_PROP_ENERGY_FULL = 38,
|
|
+ POWER_SUPPLY_PROP_ENERGY_EMPTY = 39,
|
|
+ POWER_SUPPLY_PROP_ENERGY_NOW = 40,
|
|
+ POWER_SUPPLY_PROP_ENERGY_AVG = 41,
|
|
+ POWER_SUPPLY_PROP_CAPACITY = 42,
|
|
+ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN = 43,
|
|
+ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX = 44,
|
|
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL = 45,
|
|
+ POWER_SUPPLY_PROP_TEMP = 46,
|
|
+ POWER_SUPPLY_PROP_TEMP_MAX = 47,
|
|
+ POWER_SUPPLY_PROP_TEMP_MIN = 48,
|
|
+ POWER_SUPPLY_PROP_TEMP_ALERT_MIN = 49,
|
|
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX = 50,
|
|
+ POWER_SUPPLY_PROP_TEMP_AMBIENT = 51,
|
|
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN = 52,
|
|
+ POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX = 53,
|
|
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW = 54,
|
|
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG = 55,
|
|
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW = 56,
|
|
+ POWER_SUPPLY_PROP_TIME_TO_FULL_AVG = 57,
|
|
+ POWER_SUPPLY_PROP_TYPE = 58,
|
|
+ POWER_SUPPLY_PROP_USB_TYPE = 59,
|
|
+ POWER_SUPPLY_PROP_SCOPE = 60,
|
|
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT = 61,
|
|
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT = 62,
|
|
+ POWER_SUPPLY_PROP_CALIBRATE = 63,
|
|
+ POWER_SUPPLY_PROP_MODEL_NAME = 64,
|
|
+ POWER_SUPPLY_PROP_MANUFACTURER = 65,
|
|
+ POWER_SUPPLY_PROP_SERIAL_NUMBER = 66,
|
|
+};
|
|
+
|
|
+enum power_supply_type {
|
|
+ POWER_SUPPLY_TYPE_UNKNOWN = 0,
|
|
+ POWER_SUPPLY_TYPE_BATTERY = 1,
|
|
+ POWER_SUPPLY_TYPE_UPS = 2,
|
|
+ POWER_SUPPLY_TYPE_MAINS = 3,
|
|
+ POWER_SUPPLY_TYPE_USB = 4,
|
|
+ POWER_SUPPLY_TYPE_USB_DCP = 5,
|
|
+ POWER_SUPPLY_TYPE_USB_CDP = 6,
|
|
+ POWER_SUPPLY_TYPE_USB_ACA = 7,
|
|
+ POWER_SUPPLY_TYPE_USB_TYPE_C = 8,
|
|
+ POWER_SUPPLY_TYPE_USB_PD = 9,
|
|
+ POWER_SUPPLY_TYPE_USB_PD_DRP = 10,
|
|
+ POWER_SUPPLY_TYPE_APPLE_BRICK_ID = 11,
|
|
+};
|
|
+
|
|
+enum power_supply_usb_type {
|
|
+ POWER_SUPPLY_USB_TYPE_UNKNOWN = 0,
|
|
+ POWER_SUPPLY_USB_TYPE_SDP = 1,
|
|
+ POWER_SUPPLY_USB_TYPE_DCP = 2,
|
|
+ POWER_SUPPLY_USB_TYPE_CDP = 3,
|
|
+ POWER_SUPPLY_USB_TYPE_ACA = 4,
|
|
+ POWER_SUPPLY_USB_TYPE_C = 5,
|
|
+ POWER_SUPPLY_USB_TYPE_PD = 6,
|
|
+ POWER_SUPPLY_USB_TYPE_PD_DRP = 7,
|
|
+ POWER_SUPPLY_USB_TYPE_PD_PPS = 8,
|
|
+ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID = 9,
|
|
+};
|
|
+
|
|
+union power_supply_propval {
|
|
+ int intval;
|
|
+ const char *strval;
|
|
+};
|
|
+
|
|
+struct power_supply_config {
|
|
+ struct device_node *of_node;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ void *drv_data;
|
|
+ char **supplied_to;
|
|
+ size_t num_supplicants;
|
|
+};
|
|
+
|
|
+struct power_supply;
|
|
+
|
|
+struct power_supply_desc {
|
|
+ const char *name;
|
|
+ enum power_supply_type type;
|
|
+ enum power_supply_usb_type *usb_types;
|
|
+ size_t num_usb_types;
|
|
+ enum power_supply_property *properties;
|
|
+ size_t num_properties;
|
|
+ int (*get_property)(struct power_supply *, enum power_supply_property, union power_supply_propval *);
|
|
+ int (*set_property)(struct power_supply *, enum power_supply_property, const union power_supply_propval *);
|
|
+ int (*property_is_writeable)(struct power_supply *, enum power_supply_property);
|
|
+ void (*external_power_changed)(struct power_supply *);
|
|
+ void (*set_charged)(struct power_supply *);
|
|
+ bool no_thermal;
|
|
+ int use_for_apm;
|
|
+};
|
|
+
|
|
+struct power_supply {
|
|
+ const struct power_supply_desc *desc;
|
|
+ char **supplied_to;
|
|
+ size_t num_supplicants;
|
|
+ char **supplied_from;
|
|
+ size_t num_supplies;
|
|
+ struct device_node *of_node;
|
|
+ void *drv_data;
|
|
+ struct device dev;
|
|
+ struct work_struct changed_work;
|
|
+ struct delayed_work deferred_register_work;
|
|
+ spinlock_t changed_lock;
|
|
+ bool changed;
|
|
+ bool initialized;
|
|
+ bool removing;
|
|
+ atomic_t use_cnt;
|
|
+ struct thermal_zone_device *tzd;
|
|
+ struct thermal_cooling_device *tcd;
|
|
+ struct led_trigger *charging_full_trig;
|
|
+ char *charging_full_trig_name;
|
|
+ struct led_trigger *charging_trig;
|
|
+ char *charging_trig_name;
|
|
+ struct led_trigger *full_trig;
|
|
+ char *full_trig_name;
|
|
+ struct led_trigger *online_trig;
|
|
+ char *online_trig_name;
|
|
+ struct led_trigger *charging_blink_full_solid_trig;
|
|
+ char *charging_blink_full_solid_trig_name;
|
|
+};
|
|
+
|
|
+struct acpi_ac_bl {
|
|
+ const char *hid;
|
|
+ int hrv;
|
|
+};
|
|
+
|
|
+struct acpi_ac {
|
|
+ struct power_supply *charger;
|
|
+ struct power_supply_desc charger_desc;
|
|
+ struct acpi_device *device;
|
|
+ long long unsigned int state;
|
|
+ struct notifier_block battery_nb;
|
|
+};
|
|
+
|
|
+struct input_id {
|
|
+ __u16 bustype;
|
|
+ __u16 vendor;
|
|
+ __u16 product;
|
|
+ __u16 version;
|
|
+};
|
|
+
|
|
+struct input_absinfo {
|
|
+ __s32 value;
|
|
+ __s32 minimum;
|
|
+ __s32 maximum;
|
|
+ __s32 fuzz;
|
|
+ __s32 flat;
|
|
+ __s32 resolution;
|
|
+};
|
|
+
|
|
+struct input_keymap_entry {
|
|
+ __u8 flags;
|
|
+ __u8 len;
|
|
+ __u16 index;
|
|
+ __u32 keycode;
|
|
+ __u8 scancode[32];
|
|
+};
|
|
+
|
|
+struct ff_replay {
|
|
+ __u16 length;
|
|
+ __u16 delay;
|
|
+};
|
|
+
|
|
+struct ff_trigger {
|
|
+ __u16 button;
|
|
+ __u16 interval;
|
|
+};
|
|
+
|
|
+struct ff_envelope {
|
|
+ __u16 attack_length;
|
|
+ __u16 attack_level;
|
|
+ __u16 fade_length;
|
|
+ __u16 fade_level;
|
|
+};
|
|
+
|
|
+struct ff_constant_effect {
|
|
+ __s16 level;
|
|
+ struct ff_envelope envelope;
|
|
+};
|
|
+
|
|
+struct ff_ramp_effect {
|
|
+ __s16 start_level;
|
|
+ __s16 end_level;
|
|
+ struct ff_envelope envelope;
|
|
+};
|
|
+
|
|
+struct ff_condition_effect {
|
|
+ __u16 right_saturation;
|
|
+ __u16 left_saturation;
|
|
+ __s16 right_coeff;
|
|
+ __s16 left_coeff;
|
|
+ __u16 deadband;
|
|
+ __s16 center;
|
|
+};
|
|
+
|
|
+struct ff_periodic_effect {
|
|
+ __u16 waveform;
|
|
+ __u16 period;
|
|
+ __s16 magnitude;
|
|
+ __s16 offset;
|
|
+ __u16 phase;
|
|
+ struct ff_envelope envelope;
|
|
+ __u32 custom_len;
|
|
+ __s16 *custom_data;
|
|
+};
|
|
+
|
|
+struct ff_rumble_effect {
|
|
+ __u16 strong_magnitude;
|
|
+ __u16 weak_magnitude;
|
|
+};
|
|
+
|
|
+struct ff_effect {
|
|
+ __u16 type;
|
|
+ __s16 id;
|
|
+ __u16 direction;
|
|
+ struct ff_trigger trigger;
|
|
+ struct ff_replay replay;
|
|
+ union {
|
|
+ struct ff_constant_effect constant;
|
|
+ struct ff_ramp_effect ramp;
|
|
+ struct ff_periodic_effect periodic;
|
|
+ struct ff_condition_effect condition[2];
|
|
+ struct ff_rumble_effect rumble;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct input_device_id {
|
|
+ kernel_ulong_t flags;
|
|
+ __u16 bustype;
|
|
+ __u16 vendor;
|
|
+ __u16 product;
|
|
+ __u16 version;
|
|
+ kernel_ulong_t evbit[1];
|
|
+ kernel_ulong_t keybit[12];
|
|
+ kernel_ulong_t relbit[1];
|
|
+ kernel_ulong_t absbit[1];
|
|
+ kernel_ulong_t mscbit[1];
|
|
+ kernel_ulong_t ledbit[1];
|
|
+ kernel_ulong_t sndbit[1];
|
|
+ kernel_ulong_t ffbit[2];
|
|
+ kernel_ulong_t swbit[1];
|
|
+ kernel_ulong_t propbit[1];
|
|
+ kernel_ulong_t driver_info;
|
|
+};
|
|
+
|
|
+struct input_value {
|
|
+ __u16 type;
|
|
+ __u16 code;
|
|
+ __s32 value;
|
|
+};
|
|
+
|
|
+struct ff_device;
|
|
+
|
|
+struct input_mt;
|
|
+
|
|
+struct input_handle;
|
|
+
|
|
+struct input_dev {
|
|
+ const char *name;
|
|
+ const char *phys;
|
|
+ const char *uniq;
|
|
+ struct input_id id;
|
|
+ long unsigned int propbit[1];
|
|
+ long unsigned int evbit[1];
|
|
+ long unsigned int keybit[12];
|
|
+ long unsigned int relbit[1];
|
|
+ long unsigned int absbit[1];
|
|
+ long unsigned int mscbit[1];
|
|
+ long unsigned int ledbit[1];
|
|
+ long unsigned int sndbit[1];
|
|
+ long unsigned int ffbit[2];
|
|
+ long unsigned int swbit[1];
|
|
+ unsigned int hint_events_per_packet;
|
|
+ unsigned int keycodemax;
|
|
+ unsigned int keycodesize;
|
|
+ void *keycode;
|
|
+ int (*setkeycode)(struct input_dev *, const struct input_keymap_entry *, unsigned int *);
|
|
+ int (*getkeycode)(struct input_dev *, struct input_keymap_entry *);
|
|
+ struct ff_device *ff;
|
|
+ unsigned int repeat_key;
|
|
+ struct timer_list timer;
|
|
+ int rep[2];
|
|
+ struct input_mt *mt;
|
|
+ struct input_absinfo *absinfo;
|
|
+ long unsigned int key[12];
|
|
+ long unsigned int led[1];
|
|
+ long unsigned int snd[1];
|
|
+ long unsigned int sw[1];
|
|
+ int (*open)(struct input_dev *);
|
|
+ void (*close)(struct input_dev *);
|
|
+ int (*flush)(struct input_dev *, struct file *);
|
|
+ int (*event)(struct input_dev *, unsigned int, unsigned int, int);
|
|
+ struct input_handle *grab;
|
|
+ spinlock_t event_lock;
|
|
+ struct mutex mutex;
|
|
+ unsigned int users;
|
|
+ bool going_away;
|
|
+ struct device dev;
|
|
+ struct list_head h_list;
|
|
+ struct list_head node;
|
|
+ unsigned int num_vals;
|
|
+ unsigned int max_vals;
|
|
+ struct input_value *vals;
|
|
+ bool devres_managed;
|
|
+};
|
|
+
|
|
+struct ff_device {
|
|
+ int (*upload)(struct input_dev *, struct ff_effect *, struct ff_effect *);
|
|
+ int (*erase)(struct input_dev *, int);
|
|
+ int (*playback)(struct input_dev *, int, int);
|
|
+ void (*set_gain)(struct input_dev *, u16);
|
|
+ void (*set_autocenter)(struct input_dev *, u16);
|
|
+ void (*destroy)(struct ff_device *);
|
|
+ void *private;
|
|
+ long unsigned int ffbit[2];
|
|
+ struct mutex mutex;
|
|
+ int max_effects;
|
|
+ struct ff_effect *effects;
|
|
+ struct file *effect_owners[0];
|
|
+};
|
|
+
|
|
+struct input_handler;
|
|
+
|
|
+struct input_handle {
|
|
+ void *private;
|
|
+ int open;
|
|
+ const char *name;
|
|
+ struct input_dev *dev;
|
|
+ struct input_handler *handler;
|
|
+ struct list_head d_node;
|
|
+ struct list_head h_node;
|
|
+};
|
|
+
|
|
+struct input_handler {
|
|
+ void *private;
|
|
+ void (*event)(struct input_handle *, unsigned int, unsigned int, int);
|
|
+ void (*events)(struct input_handle *, const struct input_value *, unsigned int);
|
|
+ bool (*filter)(struct input_handle *, unsigned int, unsigned int, int);
|
|
+ bool (*match)(struct input_handler *, struct input_dev *);
|
|
+ int (*connect)(struct input_handler *, struct input_dev *, const struct input_device_id *);
|
|
+ void (*disconnect)(struct input_handle *);
|
|
+ void (*start)(struct input_handle *);
|
|
+ bool legacy_minors;
|
|
+ int minor;
|
|
+ const char *name;
|
|
+ const struct input_device_id *id_table;
|
|
+ struct list_head h_list;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct acpi_button {
|
|
+ unsigned int type;
|
|
+ struct input_dev *input;
|
|
+ char phys[32];
|
|
+ long unsigned int pushed;
|
|
+ int last_state;
|
|
+ ktime_t last_time;
|
|
+ bool suspended;
|
|
+};
|
|
+
|
|
+struct acpi_fan_fps {
|
|
+ u64 control;
|
|
+ u64 trip_point;
|
|
+ u64 speed;
|
|
+ u64 noise_level;
|
|
+ u64 power;
|
|
+};
|
|
+
|
|
+struct acpi_fan_fif {
|
|
+ u64 revision;
|
|
+ u64 fine_grain_ctrl;
|
|
+ u64 step_size;
|
|
+ u64 low_speed_notification;
|
|
+};
|
|
+
|
|
+struct acpi_fan {
|
|
+ bool acpi4;
|
|
+ struct acpi_fan_fif fif;
|
|
+ struct acpi_fan_fps *fps;
|
|
+ int fps_count;
|
|
+ struct thermal_cooling_device *cdev;
|
|
+};
|
|
+
|
|
+struct acpi_pci_slot {
|
|
+ struct pci_slot *pci_slot;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct acpi_lpi_states_array {
|
|
+ unsigned int size;
|
|
+ unsigned int composite_states_size;
|
|
+ struct acpi_lpi_state *entries;
|
|
+ struct acpi_lpi_state *composite_states[8];
|
|
+};
|
|
+
|
|
+struct throttling_tstate {
|
|
+ unsigned int cpu;
|
|
+ int target_state;
|
|
+};
|
|
+
|
|
+struct acpi_processor_throttling_arg {
|
|
+ struct acpi_processor *pr;
|
|
+ int target_state;
|
|
+ bool force;
|
|
+};
|
|
+
|
|
+struct container_dev {
|
|
+ struct device dev;
|
|
+ int (*offline)(struct container_dev *);
|
|
+};
|
|
+
|
|
+struct acpi_thermal_state {
|
|
+ u8 critical: 1;
|
|
+ u8 hot: 1;
|
|
+ u8 passive: 1;
|
|
+ u8 active: 1;
|
|
+ u8 reserved: 4;
|
|
+ int active_index;
|
|
+};
|
|
+
|
|
+struct acpi_thermal_state_flags {
|
|
+ u8 valid: 1;
|
|
+ u8 enabled: 1;
|
|
+ u8 reserved: 6;
|
|
+};
|
|
+
|
|
+struct acpi_thermal_critical {
|
|
+ struct acpi_thermal_state_flags flags;
|
|
+ long unsigned int temperature;
|
|
+};
|
|
+
|
|
+struct acpi_thermal_hot {
|
|
+ struct acpi_thermal_state_flags flags;
|
|
+ long unsigned int temperature;
|
|
+};
|
|
+
|
|
+struct acpi_thermal_passive {
|
|
+ struct acpi_thermal_state_flags flags;
|
|
+ long unsigned int temperature;
|
|
+ long unsigned int tc1;
|
|
+ long unsigned int tc2;
|
|
+ long unsigned int tsp;
|
|
+ struct acpi_handle_list devices;
|
|
+};
|
|
+
|
|
+struct acpi_thermal_active {
|
|
+ struct acpi_thermal_state_flags flags;
|
|
+ long unsigned int temperature;
|
|
+ struct acpi_handle_list devices;
|
|
+};
|
|
+
|
|
+struct acpi_thermal_trips {
|
|
+ struct acpi_thermal_critical critical;
|
|
+ struct acpi_thermal_hot hot;
|
|
+ struct acpi_thermal_passive passive;
|
|
+ struct acpi_thermal_active active[10];
|
|
+};
|
|
+
|
|
+struct acpi_thermal_flags {
|
|
+ u8 cooling_mode: 1;
|
|
+ u8 devices: 1;
|
|
+ u8 reserved: 6;
|
|
+};
|
|
+
|
|
+struct acpi_thermal {
|
|
+ struct acpi_device *device;
|
|
+ acpi_bus_id name;
|
|
+ long unsigned int temperature;
|
|
+ long unsigned int last_temperature;
|
|
+ long unsigned int polling_frequency;
|
|
+ volatile u8 zombie;
|
|
+ struct acpi_thermal_flags flags;
|
|
+ struct acpi_thermal_state state;
|
|
+ struct acpi_thermal_trips trips;
|
|
+ struct acpi_handle_list devices;
|
|
+ struct thermal_zone_device *thermal_zone;
|
|
+ int tz_enabled;
|
|
+ int kelvin_offset;
|
|
+ struct work_struct thermal_check_work;
|
|
+};
|
|
+
|
|
+enum acpi_hmat_type {
|
|
+ ACPI_HMAT_TYPE_PROXIMITY = 0,
|
|
+ ACPI_HMAT_TYPE_LOCALITY = 1,
|
|
+ ACPI_HMAT_TYPE_CACHE = 2,
|
|
+ ACPI_HMAT_TYPE_RESERVED = 3,
|
|
+};
|
|
+
|
|
+struct acpi_hmat_proximity_domain {
|
|
+ struct acpi_hmat_structure header;
|
|
+ u16 flags;
|
|
+ u16 reserved1;
|
|
+ u32 processor_PD;
|
|
+ u32 memory_PD;
|
|
+ u32 reserved2;
|
|
+ u64 reserved3;
|
|
+ u64 reserved4;
|
|
+};
|
|
+
|
|
+struct acpi_hmat_locality {
|
|
+ struct acpi_hmat_structure header;
|
|
+ u8 flags;
|
|
+ u8 data_type;
|
|
+ u16 reserved1;
|
|
+ u32 number_of_initiator_Pds;
|
|
+ u32 number_of_target_Pds;
|
|
+ u32 reserved2;
|
|
+ u64 entry_base_unit;
|
|
+};
|
|
+
|
|
+struct acpi_hmat_cache {
|
|
+ struct acpi_hmat_structure header;
|
|
+ u32 memory_PD;
|
|
+ u32 reserved1;
|
|
+ u64 cache_size;
|
|
+ u32 cache_attributes;
|
|
+ u16 reserved2;
|
|
+ u16 number_of_SMBIOShandles;
|
|
+};
|
|
+
|
|
+struct node_hmem_attrs {
|
|
+ unsigned int read_bandwidth;
|
|
+ unsigned int write_bandwidth;
|
|
+ unsigned int read_latency;
|
|
+ unsigned int write_latency;
|
|
+};
|
|
+
|
|
+enum cache_indexing {
|
|
+ NODE_CACHE_DIRECT_MAP = 0,
|
|
+ NODE_CACHE_INDEXED = 1,
|
|
+ NODE_CACHE_OTHER = 2,
|
|
+};
|
|
+
|
|
+enum cache_write_policy {
|
|
+ NODE_CACHE_WRITE_BACK = 0,
|
|
+ NODE_CACHE_WRITE_THROUGH = 1,
|
|
+ NODE_CACHE_WRITE_OTHER = 2,
|
|
+};
|
|
+
|
|
+struct node_cache_attrs {
|
|
+ enum cache_indexing indexing;
|
|
+ enum cache_write_policy write_policy;
|
|
+ u64 size;
|
|
+ u16 line_size;
|
|
+ u8 level;
|
|
+};
|
|
+
|
|
+enum locality_types {
|
|
+ WRITE_LATENCY = 0,
|
|
+ READ_LATENCY = 1,
|
|
+ WRITE_BANDWIDTH = 2,
|
|
+ READ_BANDWIDTH = 3,
|
|
+};
|
|
+
|
|
+struct memory_locality {
|
|
+ struct list_head node;
|
|
+ struct acpi_hmat_locality *hmat_loc;
|
|
+};
|
|
+
|
|
+struct memory_target {
|
|
+ struct list_head node;
|
|
+ unsigned int memory_pxm;
|
|
+ unsigned int processor_pxm;
|
|
+ struct node_hmem_attrs hmem_attrs;
|
|
+};
|
|
+
|
|
+struct memory_initiator {
|
|
+ struct list_head node;
|
|
+ unsigned int processor_pxm;
|
|
+};
|
|
+
|
|
+struct acpi_memory_info {
|
|
+ struct list_head list;
|
|
+ u64 start_addr;
|
|
+ u64 length;
|
|
+ short unsigned int caching;
|
|
+ short unsigned int write_protect;
|
|
+ unsigned int enabled: 1;
|
|
+};
|
|
+
|
|
+struct acpi_memory_device {
|
|
+ struct acpi_device *device;
|
|
+ unsigned int state;
|
|
+ struct list_head res_list;
|
|
+};
|
|
+
|
|
+struct acpi_pci_ioapic {
|
|
+ acpi_handle root_handle;
|
|
+ acpi_handle handle;
|
|
+ u32 gsi_base;
|
|
+ struct resource res;
|
|
+ struct pci_dev *pdev;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+enum dmi_entry_type {
|
|
+ DMI_ENTRY_BIOS = 0,
|
|
+ DMI_ENTRY_SYSTEM = 1,
|
|
+ DMI_ENTRY_BASEBOARD = 2,
|
|
+ DMI_ENTRY_CHASSIS = 3,
|
|
+ DMI_ENTRY_PROCESSOR = 4,
|
|
+ DMI_ENTRY_MEM_CONTROLLER = 5,
|
|
+ DMI_ENTRY_MEM_MODULE = 6,
|
|
+ DMI_ENTRY_CACHE = 7,
|
|
+ DMI_ENTRY_PORT_CONNECTOR = 8,
|
|
+ DMI_ENTRY_SYSTEM_SLOT = 9,
|
|
+ DMI_ENTRY_ONBOARD_DEVICE = 10,
|
|
+ DMI_ENTRY_OEMSTRINGS = 11,
|
|
+ DMI_ENTRY_SYSCONF = 12,
|
|
+ DMI_ENTRY_BIOS_LANG = 13,
|
|
+ DMI_ENTRY_GROUP_ASSOC = 14,
|
|
+ DMI_ENTRY_SYSTEM_EVENT_LOG = 15,
|
|
+ DMI_ENTRY_PHYS_MEM_ARRAY = 16,
|
|
+ DMI_ENTRY_MEM_DEVICE = 17,
|
|
+ DMI_ENTRY_32_MEM_ERROR = 18,
|
|
+ DMI_ENTRY_MEM_ARRAY_MAPPED_ADDR = 19,
|
|
+ DMI_ENTRY_MEM_DEV_MAPPED_ADDR = 20,
|
|
+ DMI_ENTRY_BUILTIN_POINTING_DEV = 21,
|
|
+ DMI_ENTRY_PORTABLE_BATTERY = 22,
|
|
+ DMI_ENTRY_SYSTEM_RESET = 23,
|
|
+ DMI_ENTRY_HW_SECURITY = 24,
|
|
+ DMI_ENTRY_SYSTEM_POWER_CONTROLS = 25,
|
|
+ DMI_ENTRY_VOLTAGE_PROBE = 26,
|
|
+ DMI_ENTRY_COOLING_DEV = 27,
|
|
+ DMI_ENTRY_TEMP_PROBE = 28,
|
|
+ DMI_ENTRY_ELECTRICAL_CURRENT_PROBE = 29,
|
|
+ DMI_ENTRY_OOB_REMOTE_ACCESS = 30,
|
|
+ DMI_ENTRY_BIS_ENTRY = 31,
|
|
+ DMI_ENTRY_SYSTEM_BOOT = 32,
|
|
+ DMI_ENTRY_MGMT_DEV = 33,
|
|
+ DMI_ENTRY_MGMT_DEV_COMPONENT = 34,
|
|
+ DMI_ENTRY_MGMT_DEV_THRES = 35,
|
|
+ DMI_ENTRY_MEM_CHANNEL = 36,
|
|
+ DMI_ENTRY_IPMI_DEV = 37,
|
|
+ DMI_ENTRY_SYS_POWER_SUPPLY = 38,
|
|
+ DMI_ENTRY_ADDITIONAL = 39,
|
|
+ DMI_ENTRY_ONBOARD_DEV_EXT = 40,
|
|
+ DMI_ENTRY_MGMT_CONTROLLER_HOST = 41,
|
|
+ DMI_ENTRY_INACTIVE = 126,
|
|
+ DMI_ENTRY_END_OF_TABLE = 127,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ POWER_SUPPLY_STATUS_UNKNOWN = 0,
|
|
+ POWER_SUPPLY_STATUS_CHARGING = 1,
|
|
+ POWER_SUPPLY_STATUS_DISCHARGING = 2,
|
|
+ POWER_SUPPLY_STATUS_NOT_CHARGING = 3,
|
|
+ POWER_SUPPLY_STATUS_FULL = 4,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ POWER_SUPPLY_TECHNOLOGY_UNKNOWN = 0,
|
|
+ POWER_SUPPLY_TECHNOLOGY_NiMH = 1,
|
|
+ POWER_SUPPLY_TECHNOLOGY_LION = 2,
|
|
+ POWER_SUPPLY_TECHNOLOGY_LIPO = 3,
|
|
+ POWER_SUPPLY_TECHNOLOGY_LiFe = 4,
|
|
+ POWER_SUPPLY_TECHNOLOGY_NiCd = 5,
|
|
+ POWER_SUPPLY_TECHNOLOGY_LiMn = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0,
|
|
+ POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL = 1,
|
|
+ POWER_SUPPLY_CAPACITY_LEVEL_LOW = 2,
|
|
+ POWER_SUPPLY_CAPACITY_LEVEL_NORMAL = 3,
|
|
+ POWER_SUPPLY_CAPACITY_LEVEL_HIGH = 4,
|
|
+ POWER_SUPPLY_CAPACITY_LEVEL_FULL = 5,
|
|
+};
|
|
+
|
|
+struct acpi_battery_hook {
|
|
+ const char *name;
|
|
+ int (*add_battery)(struct power_supply *);
|
|
+ int (*remove_battery)(struct power_supply *);
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ACPI_BATTERY_ALARM_PRESENT = 0,
|
|
+ ACPI_BATTERY_XINFO_PRESENT = 1,
|
|
+ ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY = 2,
|
|
+ ACPI_BATTERY_QUIRK_THINKPAD_MAH = 3,
|
|
+ ACPI_BATTERY_QUIRK_DEGRADED_FULL_CHARGE = 4,
|
|
+};
|
|
+
|
|
+struct acpi_battery {
|
|
+ struct mutex lock;
|
|
+ struct mutex sysfs_lock;
|
|
+ struct power_supply *bat;
|
|
+ struct power_supply_desc bat_desc;
|
|
+ struct acpi_device *device;
|
|
+ struct notifier_block pm_nb;
|
|
+ struct list_head list;
|
|
+ long unsigned int update_time;
|
|
+ int revision;
|
|
+ int rate_now;
|
|
+ int capacity_now;
|
|
+ int voltage_now;
|
|
+ int design_capacity;
|
|
+ int full_charge_capacity;
|
|
+ int technology;
|
|
+ int design_voltage;
|
|
+ int design_capacity_warning;
|
|
+ int design_capacity_low;
|
|
+ int cycle_count;
|
|
+ int measurement_accuracy;
|
|
+ int max_sampling_time;
|
|
+ int min_sampling_time;
|
|
+ int max_averaging_interval;
|
|
+ int min_averaging_interval;
|
|
+ int capacity_granularity_1;
|
|
+ int capacity_granularity_2;
|
|
+ int alarm;
|
|
+ char model_number[32];
|
|
+ char serial_number[32];
|
|
+ char type[32];
|
|
+ char oem_info[32];
|
|
+ int state;
|
|
+ int power_unit;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct acpi_offsets {
|
|
+ size_t offset;
|
|
+ u8 mode;
|
|
+};
|
|
+
|
|
+struct acpi_pcct_hw_reduced {
|
|
+ struct acpi_subtable_header header;
|
|
+ u32 platform_interrupt;
|
|
+ u8 flags;
|
|
+ u8 reserved;
|
|
+ u64 base_address;
|
|
+ u64 length;
|
|
+ struct acpi_generic_address doorbell_register;
|
|
+ u64 preserve_mask;
|
|
+ u64 write_mask;
|
|
+ u32 latency;
|
|
+ u32 max_access_rate;
|
|
+ u16 min_turnaround_time;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_pcct_shared_memory {
|
|
+ u32 signature;
|
|
+ u16 command;
|
|
+ u16 status;
|
|
+};
|
|
+
|
|
+struct mbox_chan;
|
|
+
|
|
+struct mbox_chan_ops {
|
|
+ int (*send_data)(struct mbox_chan *, void *);
|
|
+ int (*startup)(struct mbox_chan *);
|
|
+ void (*shutdown)(struct mbox_chan *);
|
|
+ bool (*last_tx_done)(struct mbox_chan *);
|
|
+ bool (*peek_data)(struct mbox_chan *);
|
|
+};
|
|
+
|
|
+struct mbox_controller;
|
|
+
|
|
+struct mbox_client;
|
|
+
|
|
+struct mbox_chan {
|
|
+ struct mbox_controller *mbox;
|
|
+ unsigned int txdone_method;
|
|
+ struct mbox_client *cl;
|
|
+ struct completion tx_complete;
|
|
+ void *active_req;
|
|
+ unsigned int msg_count;
|
|
+ unsigned int msg_free;
|
|
+ void *msg_data[20];
|
|
+ spinlock_t lock;
|
|
+ void *con_priv;
|
|
+};
|
|
+
|
|
+struct mbox_controller {
|
|
+ struct device *dev;
|
|
+ const struct mbox_chan_ops *ops;
|
|
+ struct mbox_chan *chans;
|
|
+ int num_chans;
|
|
+ bool txdone_irq;
|
|
+ bool txdone_poll;
|
|
+ unsigned int txpoll_period;
|
|
+ struct mbox_chan * (*of_xlate)(struct mbox_controller *, const struct of_phandle_args *);
|
|
+ struct hrtimer poll_hrt;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct mbox_client {
|
|
+ struct device *dev;
|
|
+ bool tx_block;
|
|
+ long unsigned int tx_tout;
|
|
+ bool knows_txdone;
|
|
+ void (*rx_callback)(struct mbox_client *, void *);
|
|
+ void (*tx_prepare)(struct mbox_client *, void *);
|
|
+ void (*tx_done)(struct mbox_client *, void *, int);
|
|
+};
|
|
+
|
|
+struct cpc_register_resource {
|
|
+ acpi_object_type type;
|
|
+ u64 *sys_mem_vaddr;
|
|
+ union {
|
|
+ struct cpc_reg reg;
|
|
+ u64 int_value;
|
|
+ } cpc_entry;
|
|
+};
|
|
+
|
|
+struct cpc_desc {
|
|
+ int num_entries;
|
|
+ int version;
|
|
+ int cpu_id;
|
|
+ int write_cmd_status;
|
|
+ int write_cmd_id;
|
|
+ struct cpc_register_resource cpc_regs[21];
|
|
+ struct acpi_psd_package domain_info;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+enum cppc_regs {
|
|
+ HIGHEST_PERF = 0,
|
|
+ NOMINAL_PERF = 1,
|
|
+ LOW_NON_LINEAR_PERF = 2,
|
|
+ LOWEST_PERF = 3,
|
|
+ GUARANTEED_PERF = 4,
|
|
+ DESIRED_PERF = 5,
|
|
+ MIN_PERF = 6,
|
|
+ MAX_PERF = 7,
|
|
+ PERF_REDUC_TOLERANCE = 8,
|
|
+ TIME_WINDOW = 9,
|
|
+ CTR_WRAP_TIME = 10,
|
|
+ REFERENCE_CTR = 11,
|
|
+ DELIVERED_CTR = 12,
|
|
+ PERF_LIMITED = 13,
|
|
+ ENABLE = 14,
|
|
+ AUTO_SEL_ENABLE = 15,
|
|
+ AUTO_ACT_WINDOW = 16,
|
|
+ ENERGY_PERF = 17,
|
|
+ REFERENCE_PERF = 18,
|
|
+ LOWEST_FREQ = 19,
|
|
+ NOMINAL_FREQ = 20,
|
|
+};
|
|
+
|
|
+struct cppc_perf_caps {
|
|
+ u32 guaranteed_perf;
|
|
+ u32 highest_perf;
|
|
+ u32 nominal_perf;
|
|
+ u32 lowest_perf;
|
|
+ u32 lowest_nonlinear_perf;
|
|
+ u32 lowest_freq;
|
|
+ u32 nominal_freq;
|
|
+};
|
|
+
|
|
+struct cppc_perf_ctrls {
|
|
+ u32 max_perf;
|
|
+ u32 min_perf;
|
|
+ u32 desired_perf;
|
|
+};
|
|
+
|
|
+struct cppc_perf_fb_ctrs {
|
|
+ u64 reference;
|
|
+ u64 delivered;
|
|
+ u64 reference_perf;
|
|
+ u64 wraparound_time;
|
|
+};
|
|
+
|
|
+struct cppc_cpudata {
|
|
+ int cpu;
|
|
+ struct cppc_perf_caps perf_caps;
|
|
+ struct cppc_perf_ctrls perf_ctrls;
|
|
+ struct cppc_perf_fb_ctrs perf_fb_ctrs;
|
|
+ struct cpufreq_policy *cur_policy;
|
|
+ unsigned int shared_type;
|
|
+ cpumask_var_t shared_cpu_map;
|
|
+};
|
|
+
|
|
+struct cppc_pcc_data {
|
|
+ struct mbox_chan *pcc_channel;
|
|
+ void *pcc_comm_addr;
|
|
+ bool pcc_channel_acquired;
|
|
+ unsigned int deadline_us;
|
|
+ unsigned int pcc_mpar;
|
|
+ unsigned int pcc_mrtt;
|
|
+ unsigned int pcc_nominal;
|
|
+ bool pending_pcc_write_cmd;
|
|
+ bool platform_owns_pcc;
|
|
+ unsigned int pcc_write_cnt;
|
|
+ struct rw_semaphore pcc_lock;
|
|
+ wait_queue_head_t pcc_write_wait_q;
|
|
+ ktime_t last_cmd_cmpl_time;
|
|
+ ktime_t last_mpar_reset;
|
|
+ int mpar_count;
|
|
+ int refcount;
|
|
+};
|
|
+
|
|
+struct acpi_whea_header {
|
|
+ u8 action;
|
|
+ u8 instruction;
|
|
+ u8 flags;
|
|
+ u8 reserved;
|
|
+ struct acpi_generic_address register_region;
|
|
+ u64 value;
|
|
+ u64 mask;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct apei_exec_context;
|
|
+
|
|
+typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *, struct acpi_whea_header *);
|
|
+
|
|
+struct apei_exec_ins_type;
|
|
+
|
|
+struct apei_exec_context {
|
|
+ u32 ip;
|
|
+ u64 value;
|
|
+ u64 var1;
|
|
+ u64 var2;
|
|
+ u64 src_base;
|
|
+ u64 dst_base;
|
|
+ struct apei_exec_ins_type *ins_table;
|
|
+ u32 instructions;
|
|
+ struct acpi_whea_header *action_table;
|
|
+ u32 entries;
|
|
+};
|
|
+
|
|
+struct apei_exec_ins_type {
|
|
+ u32 flags;
|
|
+ apei_exec_ins_func_t run;
|
|
+};
|
|
+
|
|
+struct apei_resources {
|
|
+ struct list_head iomem;
|
|
+ struct list_head ioport;
|
|
+};
|
|
+
|
|
+typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *, struct acpi_whea_header *, void *);
|
|
+
|
|
+struct apei_res {
|
|
+ struct list_head list;
|
|
+ long unsigned int start;
|
|
+ long unsigned int end;
|
|
+};
|
|
+
|
|
+struct acpi_table_hest {
|
|
+ struct acpi_table_header header;
|
|
+ u32 error_source_count;
|
|
+};
|
|
+
|
|
+struct acpi_hest_ia_machine_check {
|
|
+ struct acpi_hest_header header;
|
|
+ u16 reserved1;
|
|
+ u8 flags;
|
|
+ u8 enabled;
|
|
+ u32 records_to_preallocate;
|
|
+ u32 max_sections_per_record;
|
|
+ u64 global_capability_data;
|
|
+ u64 global_control_data;
|
|
+ u8 num_hardware_banks;
|
|
+ u8 reserved3[7];
|
|
+};
|
|
+
|
|
+struct acpi_hest_generic {
|
|
+ struct acpi_hest_header header;
|
|
+ u16 related_source_id;
|
|
+ u8 reserved;
|
|
+ u8 enabled;
|
|
+ u32 records_to_preallocate;
|
|
+ u32 max_sections_per_record;
|
|
+ u32 max_raw_data_length;
|
|
+ struct acpi_generic_address error_status_address;
|
|
+ struct acpi_hest_notify notify;
|
|
+ u32 error_block_length;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum hest_status {
|
|
+ HEST_ENABLED = 0,
|
|
+ HEST_DISABLED = 1,
|
|
+ HEST_NOT_FOUND = 2,
|
|
+};
|
|
+
|
|
+typedef int (*apei_hest_func_t)(struct acpi_hest_header *, void *);
|
|
+
|
|
+struct ghes_arr {
|
|
+ struct platform_device **ghes_devs;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct acpi_table_erst {
|
|
+ struct acpi_table_header header;
|
|
+ u32 header_length;
|
|
+ u32 reserved;
|
|
+ u32 entries;
|
|
+};
|
|
+
|
|
+enum acpi_erst_actions {
|
|
+ ACPI_ERST_BEGIN_WRITE = 0,
|
|
+ ACPI_ERST_BEGIN_READ = 1,
|
|
+ ACPI_ERST_BEGIN_CLEAR = 2,
|
|
+ ACPI_ERST_END = 3,
|
|
+ ACPI_ERST_SET_RECORD_OFFSET = 4,
|
|
+ ACPI_ERST_EXECUTE_OPERATION = 5,
|
|
+ ACPI_ERST_CHECK_BUSY_STATUS = 6,
|
|
+ ACPI_ERST_GET_COMMAND_STATUS = 7,
|
|
+ ACPI_ERST_GET_RECORD_ID = 8,
|
|
+ ACPI_ERST_SET_RECORD_ID = 9,
|
|
+ ACPI_ERST_GET_RECORD_COUNT = 10,
|
|
+ ACPI_ERST_BEGIN_DUMMY_WRIITE = 11,
|
|
+ ACPI_ERST_NOT_USED = 12,
|
|
+ ACPI_ERST_GET_ERROR_RANGE = 13,
|
|
+ ACPI_ERST_GET_ERROR_LENGTH = 14,
|
|
+ ACPI_ERST_GET_ERROR_ATTRIBUTES = 15,
|
|
+ ACPI_ERST_EXECUTE_TIMINGS = 16,
|
|
+ ACPI_ERST_ACTION_RESERVED = 17,
|
|
+};
|
|
+
|
|
+enum acpi_erst_instructions {
|
|
+ ACPI_ERST_READ_REGISTER = 0,
|
|
+ ACPI_ERST_READ_REGISTER_VALUE = 1,
|
|
+ ACPI_ERST_WRITE_REGISTER = 2,
|
|
+ ACPI_ERST_WRITE_REGISTER_VALUE = 3,
|
|
+ ACPI_ERST_NOOP = 4,
|
|
+ ACPI_ERST_LOAD_VAR1 = 5,
|
|
+ ACPI_ERST_LOAD_VAR2 = 6,
|
|
+ ACPI_ERST_STORE_VAR1 = 7,
|
|
+ ACPI_ERST_ADD = 8,
|
|
+ ACPI_ERST_SUBTRACT = 9,
|
|
+ ACPI_ERST_ADD_VALUE = 10,
|
|
+ ACPI_ERST_SUBTRACT_VALUE = 11,
|
|
+ ACPI_ERST_STALL = 12,
|
|
+ ACPI_ERST_STALL_WHILE_TRUE = 13,
|
|
+ ACPI_ERST_SKIP_NEXT_IF_TRUE = 14,
|
|
+ ACPI_ERST_GOTO = 15,
|
|
+ ACPI_ERST_SET_SRC_ADDRESS_BASE = 16,
|
|
+ ACPI_ERST_SET_DST_ADDRESS_BASE = 17,
|
|
+ ACPI_ERST_MOVE_DATA = 18,
|
|
+ ACPI_ERST_INSTRUCTION_RESERVED = 19,
|
|
+};
|
|
+
|
|
+struct erst_erange {
|
|
+ u64 base;
|
|
+ u64 size;
|
|
+ void *vaddr;
|
|
+ u32 attr;
|
|
+};
|
|
+
|
|
+struct erst_record_id_cache {
|
|
+ struct mutex lock;
|
|
+ u64 *entries;
|
|
+ int len;
|
|
+ int size;
|
|
+ int refcount;
|
|
+};
|
|
+
|
|
+struct cper_pstore_record {
|
|
+ struct cper_record_header hdr;
|
|
+ struct cper_section_descriptor sec_hdr;
|
|
+ char data[0];
|
|
+};
|
|
+
|
|
+struct acpi_bert_region {
|
|
+ u32 block_status;
|
|
+ u32 raw_data_offset;
|
|
+ u32 raw_data_length;
|
|
+ u32 data_length;
|
|
+ u32 error_severity;
|
|
+};
|
|
+
|
|
+struct acpi_hest_generic_status {
|
|
+ u32 block_status;
|
|
+ u32 raw_data_offset;
|
|
+ u32 raw_data_length;
|
|
+ u32 data_length;
|
|
+ u32 error_severity;
|
|
+};
|
|
+
|
|
+enum acpi_hest_notify_types {
|
|
+ ACPI_HEST_NOTIFY_POLLED = 0,
|
|
+ ACPI_HEST_NOTIFY_EXTERNAL = 1,
|
|
+ ACPI_HEST_NOTIFY_LOCAL = 2,
|
|
+ ACPI_HEST_NOTIFY_SCI = 3,
|
|
+ ACPI_HEST_NOTIFY_NMI = 4,
|
|
+ ACPI_HEST_NOTIFY_CMCI = 5,
|
|
+ ACPI_HEST_NOTIFY_MCE = 6,
|
|
+ ACPI_HEST_NOTIFY_GPIO = 7,
|
|
+ ACPI_HEST_NOTIFY_SEA = 8,
|
|
+ ACPI_HEST_NOTIFY_SEI = 9,
|
|
+ ACPI_HEST_NOTIFY_GSIV = 10,
|
|
+ ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11,
|
|
+ ACPI_HEST_NOTIFY_RESERVED = 12,
|
|
+};
|
|
+
|
|
+struct acpi_hest_generic_v2 {
|
|
+ struct acpi_hest_header header;
|
|
+ u16 related_source_id;
|
|
+ u8 reserved;
|
|
+ u8 enabled;
|
|
+ u32 records_to_preallocate;
|
|
+ u32 max_sections_per_record;
|
|
+ u32 max_raw_data_length;
|
|
+ struct acpi_generic_address error_status_address;
|
|
+ struct acpi_hest_notify notify;
|
|
+ u32 error_block_length;
|
|
+ struct acpi_generic_address read_ack_register;
|
|
+ u64 read_ack_preserve;
|
|
+ u64 read_ack_write;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_hest_generic_data {
|
|
+ u8 section_type[16];
|
|
+ u32 error_severity;
|
|
+ u16 revision;
|
|
+ u8 validation_bits;
|
|
+ u8 flags;
|
|
+ u32 error_data_length;
|
|
+ u8 fru_id[16];
|
|
+ u8 fru_text[20];
|
|
+};
|
|
+
|
|
+struct acpi_hest_generic_data_v300 {
|
|
+ u8 section_type[16];
|
|
+ u32 error_severity;
|
|
+ u16 revision;
|
|
+ u8 validation_bits;
|
|
+ u8 flags;
|
|
+ u32 error_data_length;
|
|
+ u8 fru_id[16];
|
|
+ u8 fru_text[20];
|
|
+ u64 time_stamp;
|
|
+};
|
|
+
|
|
+struct cper_sec_proc_arm {
|
|
+ __u32 validation_bits;
|
|
+ __u16 err_info_num;
|
|
+ __u16 context_info_num;
|
|
+ __u32 section_length;
|
|
+ __u8 affinity_level;
|
|
+ __u8 reserved[3];
|
|
+ __u64 mpidr;
|
|
+ __u64 midr;
|
|
+ __u32 running_state;
|
|
+ __u32 psci_state;
|
|
+};
|
|
+
|
|
+struct cper_sec_pcie {
|
|
+ __u64 validation_bits;
|
|
+ __u32 port_type;
|
|
+ struct {
|
|
+ __u8 minor;
|
|
+ __u8 major;
|
|
+ __u8 reserved[2];
|
|
+ } version;
|
|
+ __u16 command;
|
|
+ __u16 status;
|
|
+ __u32 reserved;
|
|
+ struct {
|
|
+ __u16 vendor_id;
|
|
+ __u16 device_id;
|
|
+ __u8 class_code[3];
|
|
+ __u8 function;
|
|
+ __u8 device;
|
|
+ __u16 segment;
|
|
+ __u8 bus;
|
|
+ __u8 secondary_bus;
|
|
+ __u16 slot;
|
|
+ __u8 reserved;
|
|
+ } __attribute__((packed)) device_id;
|
|
+ struct {
|
|
+ __u32 lower;
|
|
+ __u32 upper;
|
|
+ } serial_number;
|
|
+ struct {
|
|
+ __u16 secondary_status;
|
|
+ __u16 control;
|
|
+ } bridge;
|
|
+ __u8 capability[60];
|
|
+ __u8 aer_info[96];
|
|
+};
|
|
+
|
|
+struct ghes {
|
|
+ union {
|
|
+ struct acpi_hest_generic *generic;
|
|
+ struct acpi_hest_generic_v2 *generic_v2;
|
|
+ };
|
|
+ struct acpi_hest_generic_status *estatus;
|
|
+ u64 buffer_paddr;
|
|
+ long unsigned int flags;
|
|
+ union {
|
|
+ struct list_head list;
|
|
+ struct timer_list timer;
|
|
+ unsigned int irq;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct ghes_estatus_node {
|
|
+ struct llist_node llnode;
|
|
+ struct acpi_hest_generic *generic;
|
|
+ struct ghes *ghes;
|
|
+};
|
|
+
|
|
+struct ghes_estatus_cache {
|
|
+ u32 estatus_len;
|
|
+ atomic_t count;
|
|
+ struct acpi_hest_generic *generic;
|
|
+ long long unsigned int time_in;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct ghes_mem_err {
|
|
+ int notify_type;
|
|
+ int severity;
|
|
+ struct cper_sec_mem_err *mem_err;
|
|
+};
|
|
+
|
|
+struct ghes_vendor_record_entry {
|
|
+ struct work_struct work;
|
|
+ int error_severity;
|
|
+ char vendor_record[0];
|
|
+};
|
|
+
|
|
+struct pmic_table {
|
|
+ int address;
|
|
+ int reg;
|
|
+ int bit;
|
|
+};
|
|
+
|
|
+struct regmap;
|
|
+
|
|
+struct intel_pmic_opregion_data {
|
|
+ int (*get_power)(struct regmap *, int, int, u64 *);
|
|
+ int (*update_power)(struct regmap *, int, int, bool);
|
|
+ int (*get_raw_temp)(struct regmap *, int);
|
|
+ int (*update_aux)(struct regmap *, int, int);
|
|
+ int (*get_policy)(struct regmap *, int, int, u64 *);
|
|
+ int (*update_policy)(struct regmap *, int, int, int);
|
|
+ struct pmic_table *power_table;
|
|
+ int power_table_count;
|
|
+ struct pmic_table *thermal_table;
|
|
+ int thermal_table_count;
|
|
+};
|
|
+
|
|
+struct intel_pmic_regs_handler_ctx {
|
|
+ unsigned int val;
|
|
+ u16 addr;
|
|
+};
|
|
+
|
|
+struct intel_pmic_opregion {
|
|
+ struct mutex lock;
|
|
+ struct acpi_lpat_conversion_table *lpat_table;
|
|
+ struct regmap *regmap;
|
|
+ struct intel_pmic_opregion_data *data;
|
|
+ struct intel_pmic_regs_handler_ctx ctx;
|
|
+};
|
|
+
|
|
+struct acpi_table_xsdt {
|
|
+ struct acpi_table_header header;
|
|
+ u64 table_offset_entry[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct sfi_table_key {
|
|
+ char *sig;
|
|
+ char *oem_id;
|
|
+ char *oem_table_id;
|
|
+};
|
|
+
|
|
+struct sfi_table_attr {
|
|
+ struct bin_attribute attr;
|
|
+ char name[8];
|
|
+};
|
|
+
|
|
+typedef int (*sfi_table_handler)(struct sfi_table_header *);
|
|
+
|
|
+struct pnp_resource {
|
|
+ struct list_head list;
|
|
+ struct resource res;
|
|
+};
|
|
+
|
|
+struct pnp_port {
|
|
+ resource_size_t min;
|
|
+ resource_size_t max;
|
|
+ resource_size_t align;
|
|
+ resource_size_t size;
|
|
+ unsigned char flags;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int bits[4];
|
|
+} pnp_irq_mask_t;
|
|
+
|
|
+struct pnp_irq {
|
|
+ pnp_irq_mask_t map;
|
|
+ unsigned char flags;
|
|
+};
|
|
+
|
|
+struct pnp_dma {
|
|
+ unsigned char map;
|
|
+ unsigned char flags;
|
|
+};
|
|
+
|
|
+struct pnp_mem {
|
|
+ resource_size_t min;
|
|
+ resource_size_t max;
|
|
+ resource_size_t align;
|
|
+ resource_size_t size;
|
|
+ unsigned char flags;
|
|
+};
|
|
+
|
|
+struct pnp_option {
|
|
+ struct list_head list;
|
|
+ unsigned int flags;
|
|
+ long unsigned int type;
|
|
+ union {
|
|
+ struct pnp_port port;
|
|
+ struct pnp_irq irq;
|
|
+ struct pnp_dma dma;
|
|
+ struct pnp_mem mem;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct pnp_info_buffer {
|
|
+ char *buffer;
|
|
+ char *curr;
|
|
+ long unsigned int size;
|
|
+ long unsigned int len;
|
|
+ int stop;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+typedef struct pnp_info_buffer pnp_info_buffer_t;
|
|
+
|
|
+struct pnp_fixup {
|
|
+ char id[7];
|
|
+ void (*quirk_function)(struct pnp_dev *);
|
|
+};
|
|
+
|
|
+struct acpipnp_parse_option_s {
|
|
+ struct pnp_dev *dev;
|
|
+ unsigned int option_flags;
|
|
+};
|
|
+
|
|
+struct clk_bulk_data {
|
|
+ const char *id;
|
|
+ struct clk *clk;
|
|
+};
|
|
+
|
|
+struct clk_bulk_devres {
|
|
+ struct clk_bulk_data *clks;
|
|
+ int num_clks;
|
|
+};
|
|
+
|
|
+struct clk_lookup {
|
|
+ struct list_head node;
|
|
+ const char *dev_id;
|
|
+ const char *con_id;
|
|
+ struct clk *clk;
|
|
+ struct clk_hw *clk_hw;
|
|
+};
|
|
+
|
|
+struct clk_lookup_alloc {
|
|
+ struct clk_lookup cl;
|
|
+ char dev_id[20];
|
|
+ char con_id[16];
|
|
+};
|
|
+
|
|
+struct clk_notifier {
|
|
+ struct clk *clk;
|
|
+ struct srcu_notifier_head notifier_head;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct clk {
|
|
+ struct clk_core *core;
|
|
+ const char *dev_id;
|
|
+ const char *con_id;
|
|
+ long unsigned int min_rate;
|
|
+ long unsigned int max_rate;
|
|
+ unsigned int exclusive_count;
|
|
+ struct hlist_node clks_node;
|
|
+};
|
|
+
|
|
+struct clk_notifier_data {
|
|
+ struct clk *clk;
|
|
+ long unsigned int old_rate;
|
|
+ long unsigned int new_rate;
|
|
+};
|
|
+
|
|
+struct clk_core {
|
|
+ const char *name;
|
|
+ const struct clk_ops *ops;
|
|
+ struct clk_hw *hw;
|
|
+ struct module *owner;
|
|
+ struct device *dev;
|
|
+ struct clk_core *parent;
|
|
+ const char **parent_names;
|
|
+ struct clk_core **parents;
|
|
+ u8 num_parents;
|
|
+ u8 new_parent_index;
|
|
+ long unsigned int rate;
|
|
+ long unsigned int req_rate;
|
|
+ long unsigned int new_rate;
|
|
+ struct clk_core *new_parent;
|
|
+ struct clk_core *new_child;
|
|
+ long unsigned int flags;
|
|
+ bool orphan;
|
|
+ unsigned int enable_count;
|
|
+ unsigned int prepare_count;
|
|
+ unsigned int protect_count;
|
|
+ long unsigned int min_rate;
|
|
+ long unsigned int max_rate;
|
|
+ long unsigned int accuracy;
|
|
+ int phase;
|
|
+ struct clk_duty duty;
|
|
+ struct hlist_head children;
|
|
+ struct hlist_node child_node;
|
|
+ struct hlist_head clks;
|
|
+ unsigned int notifier_count;
|
|
+ struct dentry *dentry;
|
|
+ struct hlist_node debug_node;
|
|
+ struct kref ref;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_clk {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_clk_rate {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ long unsigned int rate;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_clk_parent {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ u32 __data_loc_pname;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_clk_phase {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ int phase;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_clk_duty_cycle {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ unsigned int num;
|
|
+ unsigned int den;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_clk {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_clk_rate {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_clk_parent {
|
|
+ u32 name;
|
|
+ u32 pname;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_clk_phase {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_clk_duty_cycle {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct clk_div_table {
|
|
+ unsigned int val;
|
|
+ unsigned int div;
|
|
+};
|
|
+
|
|
+struct clk_divider {
|
|
+ struct clk_hw hw;
|
|
+ void *reg;
|
|
+ u8 shift;
|
|
+ u8 width;
|
|
+ u8 flags;
|
|
+ const struct clk_div_table *table;
|
|
+ spinlock_t *lock;
|
|
+};
|
|
+
|
|
+struct clk_fixed_factor {
|
|
+ struct clk_hw hw;
|
|
+ unsigned int mult;
|
|
+ unsigned int div;
|
|
+};
|
|
+
|
|
+struct clk_fixed_rate {
|
|
+ struct clk_hw hw;
|
|
+ long unsigned int fixed_rate;
|
|
+ long unsigned int fixed_accuracy;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+struct clk_gate {
|
|
+ struct clk_hw hw;
|
|
+ void *reg;
|
|
+ u8 bit_idx;
|
|
+ u8 flags;
|
|
+ spinlock_t *lock;
|
|
+};
|
|
+
|
|
+struct clk_multiplier {
|
|
+ struct clk_hw hw;
|
|
+ void *reg;
|
|
+ u8 shift;
|
|
+ u8 width;
|
|
+ u8 flags;
|
|
+ spinlock_t *lock;
|
|
+};
|
|
+
|
|
+struct clk_mux {
|
|
+ struct clk_hw hw;
|
|
+ void *reg;
|
|
+ u32 *table;
|
|
+ u32 mask;
|
|
+ u8 shift;
|
|
+ u8 flags;
|
|
+ spinlock_t *lock;
|
|
+};
|
|
+
|
|
+struct clk_composite {
|
|
+ struct clk_hw hw;
|
|
+ struct clk_ops ops;
|
|
+ struct clk_hw *mux_hw;
|
|
+ struct clk_hw *rate_hw;
|
|
+ struct clk_hw *gate_hw;
|
|
+ const struct clk_ops *mux_ops;
|
|
+ const struct clk_ops *rate_ops;
|
|
+ const struct clk_ops *gate_ops;
|
|
+};
|
|
+
|
|
+struct clk_fractional_divider {
|
|
+ struct clk_hw hw;
|
|
+ void *reg;
|
|
+ u8 mshift;
|
|
+ u8 mwidth;
|
|
+ u32 mmask;
|
|
+ u8 nshift;
|
|
+ u8 nwidth;
|
|
+ u32 nmask;
|
|
+ u8 flags;
|
|
+ void (*approximation)(struct clk_hw *, long unsigned int, long unsigned int *, long unsigned int *, long unsigned int *);
|
|
+ spinlock_t *lock;
|
|
+};
|
|
+
|
|
+struct gpio_desc___2;
|
|
+
|
|
+struct clk_gpio {
|
|
+ struct clk_hw hw;
|
|
+ struct gpio_desc___2 *gpiod;
|
|
+};
|
|
+
|
|
+struct pmc_clk {
|
|
+ const char *name;
|
|
+ long unsigned int freq;
|
|
+ const char *parent_name;
|
|
+};
|
|
+
|
|
+struct pmc_clk_data {
|
|
+ void *base;
|
|
+ const struct pmc_clk *clks;
|
|
+ bool critical;
|
|
+};
|
|
+
|
|
+struct clk_plt_fixed {
|
|
+ struct clk_hw *clk;
|
|
+ struct clk_lookup *lookup;
|
|
+};
|
|
+
|
|
+struct clk_plt {
|
|
+ struct clk_hw hw;
|
|
+ void *reg;
|
|
+ struct clk_lookup *lookup;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct clk_plt_data {
|
|
+ struct clk_plt_fixed **parents;
|
|
+ u8 nparents;
|
|
+ struct clk_plt *clks[6];
|
|
+ struct clk_lookup *mclk_lookup;
|
|
+ struct clk_lookup *ether_clk_lookup;
|
|
+};
|
|
+
|
|
+typedef s32 dma_cookie_t;
|
|
+
|
|
+enum dma_status {
|
|
+ DMA_COMPLETE = 0,
|
|
+ DMA_IN_PROGRESS = 1,
|
|
+ DMA_PAUSED = 2,
|
|
+ DMA_ERROR = 3,
|
|
+};
|
|
+
|
|
+enum dma_transaction_type {
|
|
+ DMA_MEMCPY = 0,
|
|
+ DMA_XOR = 1,
|
|
+ DMA_PQ = 2,
|
|
+ DMA_XOR_VAL = 3,
|
|
+ DMA_PQ_VAL = 4,
|
|
+ DMA_MEMSET = 5,
|
|
+ DMA_MEMSET_SG = 6,
|
|
+ DMA_INTERRUPT = 7,
|
|
+ DMA_PRIVATE = 8,
|
|
+ DMA_ASYNC_TX = 9,
|
|
+ DMA_SLAVE = 10,
|
|
+ DMA_CYCLIC = 11,
|
|
+ DMA_INTERLEAVE = 12,
|
|
+ DMA_TX_TYPE_END = 13,
|
|
+};
|
|
+
|
|
+enum dma_transfer_direction {
|
|
+ DMA_MEM_TO_MEM = 0,
|
|
+ DMA_MEM_TO_DEV = 1,
|
|
+ DMA_DEV_TO_MEM = 2,
|
|
+ DMA_DEV_TO_DEV = 3,
|
|
+ DMA_TRANS_NONE = 4,
|
|
+};
|
|
+
|
|
+struct data_chunk {
|
|
+ size_t size;
|
|
+ size_t icg;
|
|
+ size_t dst_icg;
|
|
+ size_t src_icg;
|
|
+};
|
|
+
|
|
+struct dma_interleaved_template {
|
|
+ dma_addr_t src_start;
|
|
+ dma_addr_t dst_start;
|
|
+ enum dma_transfer_direction dir;
|
|
+ bool src_inc;
|
|
+ bool dst_inc;
|
|
+ bool src_sgl;
|
|
+ bool dst_sgl;
|
|
+ size_t numf;
|
|
+ size_t frame_size;
|
|
+ struct data_chunk sgl[0];
|
|
+};
|
|
+
|
|
+enum dma_ctrl_flags {
|
|
+ DMA_PREP_INTERRUPT = 1,
|
|
+ DMA_CTRL_ACK = 2,
|
|
+ DMA_PREP_PQ_DISABLE_P = 4,
|
|
+ DMA_PREP_PQ_DISABLE_Q = 8,
|
|
+ DMA_PREP_CONTINUE = 16,
|
|
+ DMA_PREP_FENCE = 32,
|
|
+ DMA_CTRL_REUSE = 64,
|
|
+ DMA_PREP_CMD = 128,
|
|
+};
|
|
+
|
|
+enum sum_check_bits {
|
|
+ SUM_CHECK_P = 0,
|
|
+ SUM_CHECK_Q = 1,
|
|
+};
|
|
+
|
|
+enum sum_check_flags {
|
|
+ SUM_CHECK_P_RESULT = 1,
|
|
+ SUM_CHECK_Q_RESULT = 2,
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int bits[1];
|
|
+} dma_cap_mask_t;
|
|
+
|
|
+struct dma_chan_percpu {
|
|
+ long unsigned int memcpy_count;
|
|
+ long unsigned int bytes_transferred;
|
|
+};
|
|
+
|
|
+struct dma_router {
|
|
+ struct device *dev;
|
|
+ void (*route_free)(struct device *, void *);
|
|
+};
|
|
+
|
|
+struct dma_device;
|
|
+
|
|
+struct dma_chan_dev;
|
|
+
|
|
+struct dma_chan___2 {
|
|
+ struct dma_device *device;
|
|
+ dma_cookie_t cookie;
|
|
+ dma_cookie_t completed_cookie;
|
|
+ int chan_id;
|
|
+ struct dma_chan_dev *dev;
|
|
+ struct list_head device_node;
|
|
+ struct dma_chan_percpu *local;
|
|
+ int client_count;
|
|
+ int table_count;
|
|
+ struct dma_router *router;
|
|
+ void *route_data;
|
|
+ void *private;
|
|
+};
|
|
+
|
|
+typedef bool (*dma_filter_fn)(struct dma_chan___2 *, void *);
|
|
+
|
|
+struct dma_slave_map;
|
|
+
|
|
+struct dma_filter {
|
|
+ dma_filter_fn fn;
|
|
+ int mapcnt;
|
|
+ const struct dma_slave_map *map;
|
|
+};
|
|
+
|
|
+enum dmaengine_alignment {
|
|
+ DMAENGINE_ALIGN_1_BYTE = 0,
|
|
+ DMAENGINE_ALIGN_2_BYTES = 1,
|
|
+ DMAENGINE_ALIGN_4_BYTES = 2,
|
|
+ DMAENGINE_ALIGN_8_BYTES = 3,
|
|
+ DMAENGINE_ALIGN_16_BYTES = 4,
|
|
+ DMAENGINE_ALIGN_32_BYTES = 5,
|
|
+ DMAENGINE_ALIGN_64_BYTES = 6,
|
|
+};
|
|
+
|
|
+enum dma_residue_granularity {
|
|
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR = 0,
|
|
+ DMA_RESIDUE_GRANULARITY_SEGMENT = 1,
|
|
+ DMA_RESIDUE_GRANULARITY_BURST = 2,
|
|
+};
|
|
+
|
|
+struct dma_async_tx_descriptor;
|
|
+
|
|
+struct dma_slave_config;
|
|
+
|
|
+struct dma_tx_state;
|
|
+
|
|
+struct dma_device {
|
|
+ unsigned int chancnt;
|
|
+ unsigned int privatecnt;
|
|
+ struct list_head channels;
|
|
+ struct list_head global_node;
|
|
+ struct dma_filter filter;
|
|
+ dma_cap_mask_t cap_mask;
|
|
+ short unsigned int max_xor;
|
|
+ short unsigned int max_pq;
|
|
+ enum dmaengine_alignment copy_align;
|
|
+ enum dmaengine_alignment xor_align;
|
|
+ enum dmaengine_alignment pq_align;
|
|
+ enum dmaengine_alignment fill_align;
|
|
+ int dev_id;
|
|
+ struct device *dev;
|
|
+ u32 src_addr_widths;
|
|
+ u32 dst_addr_widths;
|
|
+ u32 directions;
|
|
+ u32 max_burst;
|
|
+ bool descriptor_reuse;
|
|
+ enum dma_residue_granularity residue_granularity;
|
|
+ int (*device_alloc_chan_resources)(struct dma_chan___2 *);
|
|
+ void (*device_free_chan_resources)(struct dma_chan___2 *);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_memcpy)(struct dma_chan___2 *, dma_addr_t, dma_addr_t, size_t, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_xor)(struct dma_chan___2 *, dma_addr_t, dma_addr_t *, unsigned int, size_t, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_xor_val)(struct dma_chan___2 *, dma_addr_t *, unsigned int, size_t, enum sum_check_flags *, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_pq)(struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_pq_val)(struct dma_chan___2 *, dma_addr_t *, dma_addr_t *, unsigned int, const unsigned char *, size_t, enum sum_check_flags *, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_memset)(struct dma_chan___2 *, dma_addr_t, int, size_t, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_memset_sg)(struct dma_chan___2 *, struct scatterlist *, unsigned int, int, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_interrupt)(struct dma_chan___2 *, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_slave_sg)(struct dma_chan___2 *, struct scatterlist *, unsigned int, enum dma_transfer_direction, long unsigned int, void *);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_cyclic)(struct dma_chan___2 *, dma_addr_t, size_t, size_t, enum dma_transfer_direction, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_interleaved_dma)(struct dma_chan___2 *, struct dma_interleaved_template *, long unsigned int);
|
|
+ struct dma_async_tx_descriptor * (*device_prep_dma_imm_data)(struct dma_chan___2 *, dma_addr_t, u64, long unsigned int);
|
|
+ int (*device_config)(struct dma_chan___2 *, struct dma_slave_config *);
|
|
+ int (*device_pause)(struct dma_chan___2 *);
|
|
+ int (*device_resume)(struct dma_chan___2 *);
|
|
+ int (*device_terminate_all)(struct dma_chan___2 *);
|
|
+ void (*device_synchronize)(struct dma_chan___2 *);
|
|
+ enum dma_status (*device_tx_status)(struct dma_chan___2 *, dma_cookie_t, struct dma_tx_state *);
|
|
+ void (*device_issue_pending)(struct dma_chan___2 *);
|
|
+};
|
|
+
|
|
+struct dma_chan_dev {
|
|
+ struct dma_chan___2 *chan;
|
|
+ struct device device;
|
|
+ int dev_id;
|
|
+ atomic_t *idr_ref;
|
|
+};
|
|
+
|
|
+enum dma_slave_buswidth {
|
|
+ DMA_SLAVE_BUSWIDTH_UNDEFINED = 0,
|
|
+ DMA_SLAVE_BUSWIDTH_1_BYTE = 1,
|
|
+ DMA_SLAVE_BUSWIDTH_2_BYTES = 2,
|
|
+ DMA_SLAVE_BUSWIDTH_3_BYTES = 3,
|
|
+ DMA_SLAVE_BUSWIDTH_4_BYTES = 4,
|
|
+ DMA_SLAVE_BUSWIDTH_8_BYTES = 8,
|
|
+ DMA_SLAVE_BUSWIDTH_16_BYTES = 16,
|
|
+ DMA_SLAVE_BUSWIDTH_32_BYTES = 32,
|
|
+ DMA_SLAVE_BUSWIDTH_64_BYTES = 64,
|
|
+};
|
|
+
|
|
+struct dma_slave_config {
|
|
+ enum dma_transfer_direction direction;
|
|
+ phys_addr_t src_addr;
|
|
+ phys_addr_t dst_addr;
|
|
+ enum dma_slave_buswidth src_addr_width;
|
|
+ enum dma_slave_buswidth dst_addr_width;
|
|
+ u32 src_maxburst;
|
|
+ u32 dst_maxburst;
|
|
+ u32 src_port_window_size;
|
|
+ u32 dst_port_window_size;
|
|
+ bool device_fc;
|
|
+ unsigned int slave_id;
|
|
+};
|
|
+
|
|
+struct dma_slave_caps {
|
|
+ u32 src_addr_widths;
|
|
+ u32 dst_addr_widths;
|
|
+ u32 directions;
|
|
+ u32 max_burst;
|
|
+ bool cmd_pause;
|
|
+ bool cmd_resume;
|
|
+ bool cmd_terminate;
|
|
+ enum dma_residue_granularity residue_granularity;
|
|
+ bool descriptor_reuse;
|
|
+};
|
|
+
|
|
+typedef void (*dma_async_tx_callback)(void *);
|
|
+
|
|
+enum dmaengine_tx_result {
|
|
+ DMA_TRANS_NOERROR = 0,
|
|
+ DMA_TRANS_READ_FAILED = 1,
|
|
+ DMA_TRANS_WRITE_FAILED = 2,
|
|
+ DMA_TRANS_ABORTED = 3,
|
|
+};
|
|
+
|
|
+struct dmaengine_result {
|
|
+ enum dmaengine_tx_result result;
|
|
+ u32 residue;
|
|
+};
|
|
+
|
|
+typedef void (*dma_async_tx_callback_result)(void *, const struct dmaengine_result *);
|
|
+
|
|
+struct dmaengine_unmap_data {
|
|
+ u16 map_cnt;
|
|
+ u8 to_cnt;
|
|
+ u8 from_cnt;
|
|
+ u8 bidi_cnt;
|
|
+ struct device *dev;
|
|
+ struct kref kref;
|
|
+ size_t len;
|
|
+ dma_addr_t addr[0];
|
|
+};
|
|
+
|
|
+struct dma_async_tx_descriptor {
|
|
+ dma_cookie_t cookie;
|
|
+ enum dma_ctrl_flags flags;
|
|
+ dma_addr_t phys;
|
|
+ struct dma_chan___2 *chan;
|
|
+ dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *);
|
|
+ int (*desc_free)(struct dma_async_tx_descriptor *);
|
|
+ dma_async_tx_callback callback;
|
|
+ dma_async_tx_callback_result callback_result;
|
|
+ void *callback_param;
|
|
+ struct dmaengine_unmap_data *unmap;
|
|
+};
|
|
+
|
|
+struct dma_tx_state {
|
|
+ dma_cookie_t last;
|
|
+ dma_cookie_t used;
|
|
+ u32 residue;
|
|
+};
|
|
+
|
|
+struct dma_slave_map {
|
|
+ const char *devname;
|
|
+ const char *slave;
|
|
+ void *param;
|
|
+};
|
|
+
|
|
+struct dma_chan_tbl_ent {
|
|
+ struct dma_chan___2 *chan;
|
|
+};
|
|
+
|
|
+struct dmaengine_unmap_pool {
|
|
+ struct kmem_cache *cache;
|
|
+ const char *name;
|
|
+ mempool_t *pool;
|
|
+ size_t size;
|
|
+};
|
|
+
|
|
+struct dmaengine_desc_callback {
|
|
+ dma_async_tx_callback callback;
|
|
+ dma_async_tx_callback_result callback_result;
|
|
+ void *callback_param;
|
|
+};
|
|
+
|
|
+struct virt_dma_desc {
|
|
+ struct dma_async_tx_descriptor tx;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct virt_dma_chan {
|
|
+ struct dma_chan___2 chan;
|
|
+ struct tasklet_struct task;
|
|
+ void (*desc_free)(struct virt_dma_desc *);
|
|
+ spinlock_t lock;
|
|
+ struct list_head desc_allocated;
|
|
+ struct list_head desc_submitted;
|
|
+ struct list_head desc_issued;
|
|
+ struct list_head desc_completed;
|
|
+ struct virt_dma_desc *cyclic;
|
|
+ struct virt_dma_desc *vd_terminated;
|
|
+};
|
|
+
|
|
+struct acpi_table_csrt {
|
|
+ struct acpi_table_header header;
|
|
+};
|
|
+
|
|
+struct acpi_csrt_group {
|
|
+ u32 length;
|
|
+ u32 vendor_id;
|
|
+ u32 subvendor_id;
|
|
+ u16 device_id;
|
|
+ u16 subdevice_id;
|
|
+ u16 revision;
|
|
+ u16 reserved;
|
|
+ u32 shared_info_length;
|
|
+};
|
|
+
|
|
+struct acpi_csrt_shared_info {
|
|
+ u16 major_version;
|
|
+ u16 minor_version;
|
|
+ u32 mmio_base_low;
|
|
+ u32 mmio_base_high;
|
|
+ u32 gsi_interrupt;
|
|
+ u8 interrupt_polarity;
|
|
+ u8 interrupt_mode;
|
|
+ u8 num_channels;
|
|
+ u8 dma_address_width;
|
|
+ u16 base_request_line;
|
|
+ u16 num_handshake_signals;
|
|
+ u32 max_block_size;
|
|
+};
|
|
+
|
|
+struct acpi_dma_spec {
|
|
+ int chan_id;
|
|
+ int slave_id;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct acpi_dma {
|
|
+ struct list_head dma_controllers;
|
|
+ struct device *dev;
|
|
+ struct dma_chan___2 * (*acpi_dma_xlate)(struct acpi_dma_spec *, struct acpi_dma *);
|
|
+ void *data;
|
|
+ short unsigned int base_request_line;
|
|
+ short unsigned int end_request_line;
|
|
+};
|
|
+
|
|
+struct acpi_dma_filter_info {
|
|
+ dma_cap_mask_t dma_cap;
|
|
+ dma_filter_fn filter_fn;
|
|
+};
|
|
+
|
|
+struct acpi_dma_parser_data {
|
|
+ struct acpi_dma_spec dma_spec;
|
|
+ size_t index;
|
|
+ size_t n;
|
|
+};
|
|
+
|
|
+struct dw_dma_slave {
|
|
+ struct device *dma_dev;
|
|
+ u8 src_id;
|
|
+ u8 dst_id;
|
|
+ u8 m_master;
|
|
+ u8 p_master;
|
|
+ bool hs_polarity;
|
|
+};
|
|
+
|
|
+struct dw_dma_platform_data {
|
|
+ unsigned int nr_channels;
|
|
+ bool is_private;
|
|
+ bool is_memcpy;
|
|
+ bool is_idma32;
|
|
+ unsigned char chan_allocation_order;
|
|
+ unsigned char chan_priority;
|
|
+ unsigned int block_size;
|
|
+ unsigned char nr_masters;
|
|
+ unsigned char data_width[4];
|
|
+ unsigned char multi_block[8];
|
|
+ unsigned char protctl;
|
|
+};
|
|
+
|
|
+struct dw_dma;
|
|
+
|
|
+struct dw_dma_chip {
|
|
+ struct device *dev;
|
|
+ int id;
|
|
+ int irq;
|
|
+ void *regs;
|
|
+ struct clk *clk;
|
|
+ struct dw_dma *dw;
|
|
+ const struct dw_dma_platform_data *pdata;
|
|
+};
|
|
+
|
|
+struct dma_pool___2;
|
|
+
|
|
+struct dw_dma_chan;
|
|
+
|
|
+struct dw_dma {
|
|
+ struct dma_device dma;
|
|
+ char name[20];
|
|
+ void *regs;
|
|
+ struct dma_pool___2 *desc_pool;
|
|
+ struct tasklet_struct tasklet;
|
|
+ struct dw_dma_chan *chan;
|
|
+ u8 all_chan_mask;
|
|
+ u8 in_use;
|
|
+ struct dw_dma_platform_data *pdata;
|
|
+};
|
|
+
|
|
+enum dw_dma_fc {
|
|
+ DW_DMA_FC_D_M2M = 0,
|
|
+ DW_DMA_FC_D_M2P = 1,
|
|
+ DW_DMA_FC_D_P2M = 2,
|
|
+ DW_DMA_FC_D_P2P = 3,
|
|
+ DW_DMA_FC_P_P2M = 4,
|
|
+ DW_DMA_FC_SP_P2P = 5,
|
|
+ DW_DMA_FC_P_M2P = 6,
|
|
+ DW_DMA_FC_DP_P2P = 7,
|
|
+};
|
|
+
|
|
+struct dw_dma_chan_regs {
|
|
+ u32 SAR;
|
|
+ u32 __pad_SAR;
|
|
+ u32 DAR;
|
|
+ u32 __pad_DAR;
|
|
+ u32 LLP;
|
|
+ u32 __pad_LLP;
|
|
+ u32 CTL_LO;
|
|
+ u32 CTL_HI;
|
|
+ u32 SSTAT;
|
|
+ u32 __pad_SSTAT;
|
|
+ u32 DSTAT;
|
|
+ u32 __pad_DSTAT;
|
|
+ u32 SSTATAR;
|
|
+ u32 __pad_SSTATAR;
|
|
+ u32 DSTATAR;
|
|
+ u32 __pad_DSTATAR;
|
|
+ u32 CFG_LO;
|
|
+ u32 CFG_HI;
|
|
+ u32 SGR;
|
|
+ u32 __pad_SGR;
|
|
+ u32 DSR;
|
|
+ u32 __pad_DSR;
|
|
+};
|
|
+
|
|
+struct dw_dma_irq_regs {
|
|
+ u32 XFER;
|
|
+ u32 __pad_XFER;
|
|
+ u32 BLOCK;
|
|
+ u32 __pad_BLOCK;
|
|
+ u32 SRC_TRAN;
|
|
+ u32 __pad_SRC_TRAN;
|
|
+ u32 DST_TRAN;
|
|
+ u32 __pad_DST_TRAN;
|
|
+ u32 ERROR;
|
|
+ u32 __pad_ERROR;
|
|
+};
|
|
+
|
|
+struct dw_dma_regs {
|
|
+ struct dw_dma_chan_regs CHAN[8];
|
|
+ struct dw_dma_irq_regs RAW;
|
|
+ struct dw_dma_irq_regs STATUS;
|
|
+ struct dw_dma_irq_regs MASK;
|
|
+ struct dw_dma_irq_regs CLEAR;
|
|
+ u32 STATUS_INT;
|
|
+ u32 __pad_STATUS_INT;
|
|
+ u32 REQ_SRC;
|
|
+ u32 __pad_REQ_SRC;
|
|
+ u32 REQ_DST;
|
|
+ u32 __pad_REQ_DST;
|
|
+ u32 SGL_REQ_SRC;
|
|
+ u32 __pad_SGL_REQ_SRC;
|
|
+ u32 SGL_REQ_DST;
|
|
+ u32 __pad_SGL_REQ_DST;
|
|
+ u32 LAST_SRC;
|
|
+ u32 __pad_LAST_SRC;
|
|
+ u32 LAST_DST;
|
|
+ u32 __pad_LAST_DST;
|
|
+ u32 CFG;
|
|
+ u32 __pad_CFG;
|
|
+ u32 CH_EN;
|
|
+ u32 __pad_CH_EN;
|
|
+ u32 ID;
|
|
+ u32 __pad_ID;
|
|
+ u32 TEST;
|
|
+ u32 __pad_TEST;
|
|
+ u32 CLASS_PRIORITY0;
|
|
+ u32 __pad_CLASS_PRIORITY0;
|
|
+ u32 CLASS_PRIORITY1;
|
|
+ u32 __pad_CLASS_PRIORITY1;
|
|
+ u32 __reserved;
|
|
+ u32 DWC_PARAMS[8];
|
|
+ u32 MULTI_BLK_TYPE;
|
|
+ u32 MAX_BLK_SIZE;
|
|
+ u32 DW_PARAMS;
|
|
+ u32 COMP_TYPE;
|
|
+ u32 COMP_VERSION;
|
|
+ u32 FIFO_PARTITION0;
|
|
+ u32 __pad_FIFO_PARTITION0;
|
|
+ u32 FIFO_PARTITION1;
|
|
+ u32 __pad_FIFO_PARTITION1;
|
|
+ u32 SAI_ERR;
|
|
+ u32 __pad_SAI_ERR;
|
|
+ u32 GLOBAL_CFG;
|
|
+ u32 __pad_GLOBAL_CFG;
|
|
+};
|
|
+
|
|
+enum dw_dma_msize {
|
|
+ DW_DMA_MSIZE_1 = 0,
|
|
+ DW_DMA_MSIZE_4 = 1,
|
|
+ DW_DMA_MSIZE_8 = 2,
|
|
+ DW_DMA_MSIZE_16 = 3,
|
|
+ DW_DMA_MSIZE_32 = 4,
|
|
+ DW_DMA_MSIZE_64 = 5,
|
|
+ DW_DMA_MSIZE_128 = 6,
|
|
+ DW_DMA_MSIZE_256 = 7,
|
|
+};
|
|
+
|
|
+enum dw_dmac_flags {
|
|
+ DW_DMA_IS_CYCLIC = 0,
|
|
+ DW_DMA_IS_SOFT_LLP = 1,
|
|
+ DW_DMA_IS_PAUSED = 2,
|
|
+ DW_DMA_IS_INITIALIZED = 3,
|
|
+};
|
|
+
|
|
+struct dw_dma_chan {
|
|
+ struct dma_chan___2 chan;
|
|
+ void *ch_regs;
|
|
+ u8 mask;
|
|
+ u8 priority;
|
|
+ enum dma_transfer_direction direction;
|
|
+ struct list_head *tx_node_active;
|
|
+ spinlock_t lock;
|
|
+ long unsigned int flags;
|
|
+ struct list_head active_list;
|
|
+ struct list_head queue;
|
|
+ unsigned int descs_allocated;
|
|
+ unsigned int block_size;
|
|
+ bool nollp;
|
|
+ struct dw_dma_slave dws;
|
|
+ struct dma_slave_config dma_sconfig;
|
|
+};
|
|
+
|
|
+struct dw_lli {
|
|
+ __le32 sar;
|
|
+ __le32 dar;
|
|
+ __le32 llp;
|
|
+ __le32 ctllo;
|
|
+ __le32 ctlhi;
|
|
+ __le32 sstat;
|
|
+ __le32 dstat;
|
|
+};
|
|
+
|
|
+struct dw_desc {
|
|
+ struct dw_lli lli;
|
|
+ struct list_head desc_node;
|
|
+ struct list_head tx_list;
|
|
+ struct dma_async_tx_descriptor txd;
|
|
+ size_t len;
|
|
+ size_t total_len;
|
|
+ u32 residue;
|
|
+};
|
|
+
|
|
+struct hsu_dma;
|
|
+
|
|
+struct hsu_dma_chip {
|
|
+ struct device *dev;
|
|
+ int irq;
|
|
+ void *regs;
|
|
+ unsigned int length;
|
|
+ unsigned int offset;
|
|
+ struct hsu_dma *hsu;
|
|
+};
|
|
+
|
|
+struct hsu_dma_chan;
|
|
+
|
|
+struct hsu_dma {
|
|
+ struct dma_device dma;
|
|
+ struct hsu_dma_chan *chan;
|
|
+ short unsigned int nr_channels;
|
|
+};
|
|
+
|
|
+struct hsu_dma_sg {
|
|
+ dma_addr_t addr;
|
|
+ unsigned int len;
|
|
+};
|
|
+
|
|
+struct hsu_dma_desc {
|
|
+ struct virt_dma_desc vdesc;
|
|
+ enum dma_transfer_direction direction;
|
|
+ struct hsu_dma_sg *sg;
|
|
+ unsigned int nents;
|
|
+ size_t length;
|
|
+ unsigned int active;
|
|
+ enum dma_status status;
|
|
+};
|
|
+
|
|
+struct hsu_dma_chan {
|
|
+ struct virt_dma_chan vchan;
|
|
+ void *reg;
|
|
+ enum dma_transfer_direction direction;
|
|
+ struct dma_slave_config config;
|
|
+ struct hsu_dma_desc *desc;
|
|
+};
|
|
+
|
|
+struct virtio_driver {
|
|
+ struct device_driver driver;
|
|
+ const struct virtio_device_id *id_table;
|
|
+ const unsigned int *feature_table;
|
|
+ unsigned int feature_table_size;
|
|
+ const unsigned int *feature_table_legacy;
|
|
+ unsigned int feature_table_size_legacy;
|
|
+ int (*validate)(struct virtio_device *);
|
|
+ int (*probe)(struct virtio_device *);
|
|
+ void (*scan)(struct virtio_device *);
|
|
+ void (*remove)(struct virtio_device *);
|
|
+ void (*config_changed)(struct virtio_device *);
|
|
+ int (*freeze)(struct virtio_device *);
|
|
+ int (*restore)(struct virtio_device *);
|
|
+};
|
|
+
|
|
+struct vring_desc_state {
|
|
+ void *data;
|
|
+ struct vring_desc *indir_desc;
|
|
+};
|
|
+
|
|
+struct vring_virtqueue {
|
|
+ struct virtqueue vq;
|
|
+ struct vring vring;
|
|
+ bool weak_barriers;
|
|
+ bool broken;
|
|
+ bool indirect;
|
|
+ bool event;
|
|
+ unsigned int free_head;
|
|
+ unsigned int num_added;
|
|
+ u16 last_used_idx;
|
|
+ u16 avail_flags_shadow;
|
|
+ u16 avail_idx_shadow;
|
|
+ bool (*notify)(struct virtqueue *);
|
|
+ bool we_own_ring;
|
|
+ size_t queue_size_in_bytes;
|
|
+ dma_addr_t queue_dma_addr;
|
|
+ struct vring_desc_state desc_state[0];
|
|
+};
|
|
+
|
|
+struct virtio_pci_common_cfg {
|
|
+ __le32 device_feature_select;
|
|
+ __le32 device_feature;
|
|
+ __le32 guest_feature_select;
|
|
+ __le32 guest_feature;
|
|
+ __le16 msix_config;
|
|
+ __le16 num_queues;
|
|
+ __u8 device_status;
|
|
+ __u8 config_generation;
|
|
+ __le16 queue_select;
|
|
+ __le16 queue_size;
|
|
+ __le16 queue_msix_vector;
|
|
+ __le16 queue_enable;
|
|
+ __le16 queue_notify_off;
|
|
+ __le32 queue_desc_lo;
|
|
+ __le32 queue_desc_hi;
|
|
+ __le32 queue_avail_lo;
|
|
+ __le32 queue_avail_hi;
|
|
+ __le32 queue_used_lo;
|
|
+ __le32 queue_used_hi;
|
|
+};
|
|
+
|
|
+struct virtio_pci_vq_info {
|
|
+ struct virtqueue *vq;
|
|
+ struct list_head node;
|
|
+ unsigned int msix_vector;
|
|
+};
|
|
+
|
|
+struct virtio_pci_device {
|
|
+ struct virtio_device vdev;
|
|
+ struct pci_dev *pci_dev;
|
|
+ u8 *isr;
|
|
+ struct virtio_pci_common_cfg *common;
|
|
+ void *device;
|
|
+ void *notify_base;
|
|
+ size_t notify_len;
|
|
+ size_t device_len;
|
|
+ int notify_map_cap;
|
|
+ u32 notify_offset_multiplier;
|
|
+ int modern_bars;
|
|
+ void *ioaddr;
|
|
+ spinlock_t lock;
|
|
+ struct list_head virtqueues;
|
|
+ struct virtio_pci_vq_info **vqs;
|
|
+ int msix_enabled;
|
|
+ int intx_enabled;
|
|
+ cpumask_var_t *msix_affinity_masks;
|
|
+ char (*msix_names)[256];
|
|
+ unsigned int msix_vectors;
|
|
+ unsigned int msix_used_vectors;
|
|
+ bool per_vq_vectors;
|
|
+ struct virtqueue * (*setup_vq)(struct virtio_pci_device *, struct virtio_pci_vq_info *, unsigned int, void (*)(struct virtqueue *), const char *, bool, u16);
|
|
+ void (*del_vq)(struct virtio_pci_vq_info *);
|
|
+ u16 (*config_vector)(struct virtio_pci_device *, u16);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ VP_MSIX_CONFIG_VECTOR = 0,
|
|
+ VP_MSIX_VQ_VECTOR = 1,
|
|
+};
|
|
+
|
|
+struct xsd_errors {
|
|
+ int errnum;
|
|
+ const char *errstring;
|
|
+};
|
|
+
|
|
+typedef uint32_t XENSTORE_RING_IDX;
|
|
+
|
|
+struct xenstore_domain_interface {
|
|
+ char req[1024];
|
|
+ char rsp[1024];
|
|
+ XENSTORE_RING_IDX req_cons;
|
|
+ XENSTORE_RING_IDX req_prod;
|
|
+ XENSTORE_RING_IDX rsp_cons;
|
|
+ XENSTORE_RING_IDX rsp_prod;
|
|
+};
|
|
+
|
|
+struct xenbus_watch {
|
|
+ struct list_head list;
|
|
+ const char *node;
|
|
+ unsigned int nr_pending;
|
|
+ bool (*will_handle)(struct xenbus_watch *, const char *, const char *);
|
|
+ void (*callback)(struct xenbus_watch *, const char *, const char *);
|
|
+};
|
|
+
|
|
+struct xenbus_transaction {
|
|
+ u32 id;
|
|
+};
|
|
+
|
|
+typedef uint32_t evtchn_port_t;
|
|
+
|
|
+struct evtchn_alloc_unbound {
|
|
+ domid_t dom;
|
|
+ domid_t remote_dom;
|
|
+ evtchn_port_t port;
|
|
+};
|
|
+
|
|
+struct evtchn_bind_interdomain {
|
|
+ domid_t remote_dom;
|
|
+ evtchn_port_t remote_port;
|
|
+ evtchn_port_t local_port;
|
|
+};
|
|
+
|
|
+struct evtchn_bind_virq {
|
|
+ uint32_t virq;
|
|
+ uint32_t vcpu;
|
|
+ evtchn_port_t port;
|
|
+};
|
|
+
|
|
+struct evtchn_bind_pirq {
|
|
+ uint32_t pirq;
|
|
+ uint32_t flags;
|
|
+ evtchn_port_t port;
|
|
+};
|
|
+
|
|
+struct evtchn_bind_ipi {
|
|
+ uint32_t vcpu;
|
|
+ evtchn_port_t port;
|
|
+};
|
|
+
|
|
+struct evtchn_close {
|
|
+ evtchn_port_t port;
|
|
+};
|
|
+
|
|
+struct evtchn_send {
|
|
+ evtchn_port_t port;
|
|
+};
|
|
+
|
|
+struct evtchn_status {
|
|
+ domid_t dom;
|
|
+ evtchn_port_t port;
|
|
+ uint32_t status;
|
|
+ uint32_t vcpu;
|
|
+ union {
|
|
+ struct {
|
|
+ domid_t dom;
|
|
+ } unbound;
|
|
+ struct {
|
|
+ domid_t dom;
|
|
+ evtchn_port_t port;
|
|
+ } interdomain;
|
|
+ uint32_t pirq;
|
|
+ uint32_t virq;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct evtchn_bind_vcpu {
|
|
+ evtchn_port_t port;
|
|
+ uint32_t vcpu;
|
|
+};
|
|
+
|
|
+struct evtchn_unmask {
|
|
+ evtchn_port_t port;
|
|
+};
|
|
+
|
|
+struct evtchn_op {
|
|
+ uint32_t cmd;
|
|
+ union {
|
|
+ struct evtchn_alloc_unbound alloc_unbound;
|
|
+ struct evtchn_bind_interdomain bind_interdomain;
|
|
+ struct evtchn_bind_virq bind_virq;
|
|
+ struct evtchn_bind_pirq bind_pirq;
|
|
+ struct evtchn_bind_ipi bind_ipi;
|
|
+ struct evtchn_close close;
|
|
+ struct evtchn_send send;
|
|
+ struct evtchn_status status;
|
|
+ struct evtchn_bind_vcpu bind_vcpu;
|
|
+ struct evtchn_unmask unmask;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct physdev_irq_status_query {
|
|
+ uint32_t irq;
|
|
+ uint32_t flags;
|
|
+};
|
|
+
|
|
+struct physdev_set_iopl {
|
|
+ uint32_t iopl;
|
|
+};
|
|
+
|
|
+struct physdev_set_iobitmap {
|
|
+ uint8_t *bitmap;
|
|
+ uint32_t nr_ports;
|
|
+};
|
|
+
|
|
+struct physdev_apic {
|
|
+ long unsigned int apic_physbase;
|
|
+ uint32_t reg;
|
|
+ uint32_t value;
|
|
+};
|
|
+
|
|
+struct physdev_irq {
|
|
+ uint32_t irq;
|
|
+ uint32_t vector;
|
|
+};
|
|
+
|
|
+struct physdev_op {
|
|
+ uint32_t cmd;
|
|
+ union {
|
|
+ struct physdev_irq_status_query irq_status_query;
|
|
+ struct physdev_set_iopl set_iopl;
|
|
+ struct physdev_set_iobitmap set_iobitmap;
|
|
+ struct physdev_apic apic_op;
|
|
+ struct physdev_irq irq_op;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+typedef xen_pfn_t *__guest_handle_xen_pfn_t;
|
|
+
|
|
+typedef uint32_t grant_ref_t;
|
|
+
|
|
+struct grant_entry_v1 {
|
|
+ uint16_t flags;
|
|
+ domid_t domid;
|
|
+ uint32_t frame;
|
|
+};
|
|
+
|
|
+struct grant_entry_header {
|
|
+ uint16_t flags;
|
|
+ domid_t domid;
|
|
+};
|
|
+
|
|
+union grant_entry_v2 {
|
|
+ struct grant_entry_header hdr;
|
|
+ struct {
|
|
+ struct grant_entry_header hdr;
|
|
+ uint32_t pad0;
|
|
+ uint64_t frame;
|
|
+ } full_page;
|
|
+ struct {
|
|
+ struct grant_entry_header hdr;
|
|
+ uint16_t page_off;
|
|
+ uint16_t length;
|
|
+ uint64_t frame;
|
|
+ } sub_page;
|
|
+ struct {
|
|
+ struct grant_entry_header hdr;
|
|
+ domid_t trans_domid;
|
|
+ uint16_t pad0;
|
|
+ grant_ref_t gref;
|
|
+ } transitive;
|
|
+ uint32_t __spacer[4];
|
|
+};
|
|
+
|
|
+typedef uint32_t grant_handle_t;
|
|
+
|
|
+struct gnttab_map_grant_ref {
|
|
+ uint64_t host_addr;
|
|
+ uint32_t flags;
|
|
+ grant_ref_t ref;
|
|
+ domid_t dom;
|
|
+ int16_t status;
|
|
+ grant_handle_t handle;
|
|
+ uint64_t dev_bus_addr;
|
|
+};
|
|
+
|
|
+struct gnttab_unmap_grant_ref {
|
|
+ uint64_t host_addr;
|
|
+ uint64_t dev_bus_addr;
|
|
+ grant_handle_t handle;
|
|
+ int16_t status;
|
|
+};
|
|
+
|
|
+struct gnttab_setup_table {
|
|
+ domid_t dom;
|
|
+ uint32_t nr_frames;
|
|
+ int16_t status;
|
|
+ __guest_handle_xen_pfn_t frame_list;
|
|
+};
|
|
+
|
|
+struct gnttab_copy {
|
|
+ struct {
|
|
+ union {
|
|
+ grant_ref_t ref;
|
|
+ xen_pfn_t gmfn;
|
|
+ } u;
|
|
+ domid_t domid;
|
|
+ uint16_t offset;
|
|
+ } source;
|
|
+ struct {
|
|
+ union {
|
|
+ grant_ref_t ref;
|
|
+ xen_pfn_t gmfn;
|
|
+ } u;
|
|
+ domid_t domid;
|
|
+ uint16_t offset;
|
|
+ } dest;
|
|
+ uint16_t len;
|
|
+ uint16_t flags;
|
|
+ int16_t status;
|
|
+};
|
|
+
|
|
+struct gnttab_query_size {
|
|
+ domid_t dom;
|
|
+ uint32_t nr_frames;
|
|
+ uint32_t max_nr_frames;
|
|
+ int16_t status;
|
|
+};
|
|
+
|
|
+struct gnttab_set_version {
|
|
+ uint32_t version;
|
|
+};
|
|
+
|
|
+struct gnttab_get_status_frames {
|
|
+ uint32_t nr_frames;
|
|
+ domid_t dom;
|
|
+ int16_t status;
|
|
+ __guest_handle_uint64_t frame_list;
|
|
+};
|
|
+
|
|
+struct gnttab_free_callback {
|
|
+ struct gnttab_free_callback *next;
|
|
+ void (*fn)(void *);
|
|
+ void *arg;
|
|
+ u16 count;
|
|
+};
|
|
+
|
|
+struct gntab_unmap_queue_data;
|
|
+
|
|
+typedef void (*gnttab_unmap_refs_done)(int, struct gntab_unmap_queue_data *);
|
|
+
|
|
+struct gntab_unmap_queue_data {
|
|
+ struct delayed_work gnttab_work;
|
|
+ void *data;
|
|
+ gnttab_unmap_refs_done done;
|
|
+ struct gnttab_unmap_grant_ref *unmap_ops;
|
|
+ struct gnttab_unmap_grant_ref *kunmap_ops;
|
|
+ struct page **pages;
|
|
+ unsigned int count;
|
|
+ unsigned int age;
|
|
+};
|
|
+
|
|
+struct xen_page_foreign {
|
|
+ domid_t domid;
|
|
+ grant_ref_t gref;
|
|
+};
|
|
+
|
|
+typedef void (*xen_grant_fn_t)(long unsigned int, unsigned int, unsigned int, void *);
|
|
+
|
|
+struct balloon_stats {
|
|
+ long unsigned int current_pages;
|
|
+ long unsigned int target_pages;
|
|
+ long unsigned int target_unpopulated;
|
|
+ long unsigned int balloon_low;
|
|
+ long unsigned int balloon_high;
|
|
+ long unsigned int total_pages;
|
|
+ long unsigned int schedule_delay;
|
|
+ long unsigned int max_schedule_delay;
|
|
+ long unsigned int retry_count;
|
|
+ long unsigned int max_retry_count;
|
|
+};
|
|
+
|
|
+struct gnttab_ops {
|
|
+ unsigned int version;
|
|
+ unsigned int grefs_per_grant_frame;
|
|
+ int (*map_frames)(xen_pfn_t *, unsigned int);
|
|
+ void (*unmap_frames)();
|
|
+ void (*update_entry)(grant_ref_t, domid_t, long unsigned int, unsigned int);
|
|
+ int (*end_foreign_access_ref)(grant_ref_t, int);
|
|
+ long unsigned int (*end_foreign_transfer_ref)(grant_ref_t);
|
|
+ int (*query_foreign_access)(grant_ref_t);
|
|
+};
|
|
+
|
|
+struct unmap_refs_callback_data {
|
|
+ struct completion completion;
|
|
+ int result;
|
|
+};
|
|
+
|
|
+struct deferred_entry {
|
|
+ struct list_head list;
|
|
+ grant_ref_t ref;
|
|
+ bool ro;
|
|
+ uint16_t warn_delay;
|
|
+ struct page *page;
|
|
+};
|
|
+
|
|
+struct xen_feature_info {
|
|
+ unsigned int submap_idx;
|
|
+ uint32_t submap;
|
|
+};
|
|
+
|
|
+enum bp_state {
|
|
+ BP_DONE = 0,
|
|
+ BP_WAIT = 1,
|
|
+ BP_EAGAIN = 2,
|
|
+ BP_ECANCELED = 3,
|
|
+};
|
|
+
|
|
+enum shutdown_state {
|
|
+ SHUTDOWN_INVALID = -1,
|
|
+ SHUTDOWN_POWEROFF = 0,
|
|
+ SHUTDOWN_SUSPEND = 2,
|
|
+ SHUTDOWN_HALT = 4,
|
|
+};
|
|
+
|
|
+struct suspend_info {
|
|
+ int cancelled;
|
|
+};
|
|
+
|
|
+struct shutdown_handler {
|
|
+ const char command[11];
|
|
+ bool flag;
|
|
+ void (*cb)();
|
|
+};
|
|
+
|
|
+struct vcpu_runstate_info {
|
|
+ int state;
|
|
+ uint64_t state_entry_time;
|
|
+ uint64_t time[4];
|
|
+};
|
|
+
|
|
+typedef struct vcpu_runstate_info *__guest_handle_vcpu_runstate_info;
|
|
+
|
|
+struct vcpu_register_runstate_memory_area {
|
|
+ union {
|
|
+ __guest_handle_vcpu_runstate_info h;
|
|
+ struct vcpu_runstate_info *v;
|
|
+ uint64_t p;
|
|
+ } addr;
|
|
+};
|
|
+
|
|
+struct xen_memory_reservation {
|
|
+ __guest_handle_xen_pfn_t extent_start;
|
|
+ xen_ulong_t nr_extents;
|
|
+ unsigned int extent_order;
|
|
+ unsigned int address_bits;
|
|
+ domid_t domid;
|
|
+};
|
|
+
|
|
+struct xen_pci_frontend_ops {
|
|
+ int (*enable_msi)(struct pci_dev *, int *);
|
|
+ void (*disable_msi)(struct pci_dev *);
|
|
+ int (*enable_msix)(struct pci_dev *, int *, int);
|
|
+ void (*disable_msix)(struct pci_dev *);
|
|
+};
|
|
+
|
|
+typedef evtchn_port_t *__guest_handle_evtchn_port_t;
|
|
+
|
|
+struct evtchn_set_priority {
|
|
+ uint32_t port;
|
|
+ uint32_t priority;
|
|
+};
|
|
+
|
|
+struct sched_poll {
|
|
+ __guest_handle_evtchn_port_t ports;
|
|
+ unsigned int nr_ports;
|
|
+ uint64_t timeout;
|
|
+};
|
|
+
|
|
+struct physdev_eoi {
|
|
+ uint32_t irq;
|
|
+};
|
|
+
|
|
+struct physdev_pirq_eoi_gmfn {
|
|
+ xen_ulong_t gmfn;
|
|
+};
|
|
+
|
|
+struct physdev_map_pirq {
|
|
+ domid_t domid;
|
|
+ int type;
|
|
+ int index;
|
|
+ int pirq;
|
|
+ int bus;
|
|
+ int devfn;
|
|
+ int entry_nr;
|
|
+ uint64_t table_base;
|
|
+};
|
|
+
|
|
+struct physdev_unmap_pirq {
|
|
+ domid_t domid;
|
|
+ int pirq;
|
|
+};
|
|
+
|
|
+struct physdev_get_free_pirq {
|
|
+ int type;
|
|
+ uint32_t pirq;
|
|
+};
|
|
+
|
|
+struct xen_hvm_param {
|
|
+ domid_t domid;
|
|
+ uint32_t index;
|
|
+ uint64_t value;
|
|
+};
|
|
+
|
|
+enum xen_irq_type {
|
|
+ IRQT_UNBOUND = 0,
|
|
+ IRQT_PIRQ = 1,
|
|
+ IRQT_VIRQ = 2,
|
|
+ IRQT_IPI = 3,
|
|
+ IRQT_EVTCHN = 4,
|
|
+};
|
|
+
|
|
+struct irq_info {
|
|
+ struct list_head list;
|
|
+ struct list_head eoi_list;
|
|
+ int refcnt;
|
|
+ enum xen_irq_type type;
|
|
+ unsigned int irq;
|
|
+ unsigned int evtchn;
|
|
+ short unsigned int cpu;
|
|
+ short unsigned int eoi_cpu;
|
|
+ unsigned int irq_epoch;
|
|
+ u64 eoi_time;
|
|
+ union {
|
|
+ short unsigned int virq;
|
|
+ enum ipi_vector ipi;
|
|
+ struct {
|
|
+ short unsigned int pirq;
|
|
+ short unsigned int gsi;
|
|
+ unsigned char vector;
|
|
+ unsigned char flags;
|
|
+ uint16_t domid;
|
|
+ } pirq;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct evtchn_loop_ctrl;
|
|
+
|
|
+struct evtchn_ops {
|
|
+ unsigned int (*max_channels)();
|
|
+ unsigned int (*nr_channels)();
|
|
+ int (*setup)(struct irq_info *);
|
|
+ void (*bind_to_cpu)(struct irq_info *, unsigned int);
|
|
+ void (*clear_pending)(unsigned int);
|
|
+ void (*set_pending)(unsigned int);
|
|
+ bool (*is_pending)(unsigned int);
|
|
+ bool (*test_and_set_mask)(unsigned int);
|
|
+ void (*mask)(unsigned int);
|
|
+ void (*unmask)(unsigned int);
|
|
+ void (*handle_events)(unsigned int, struct evtchn_loop_ctrl *);
|
|
+ void (*resume)();
|
|
+ int (*percpu_init)(unsigned int);
|
|
+ int (*percpu_deinit)(unsigned int);
|
|
+};
|
|
+
|
|
+struct evtchn_loop_ctrl {
|
|
+ ktime_t timeout;
|
|
+ unsigned int count;
|
|
+ bool defer_eoi;
|
|
+};
|
|
+
|
|
+struct lateeoi_work {
|
|
+ struct delayed_work delayed;
|
|
+ spinlock_t eoi_list_lock;
|
|
+ struct list_head eoi_list;
|
|
+};
|
|
+
|
|
+struct evtchn_init_control {
|
|
+ uint64_t control_gfn;
|
|
+ uint32_t offset;
|
|
+ uint32_t vcpu;
|
|
+ uint8_t link_bits;
|
|
+ uint8_t _pad[7];
|
|
+};
|
|
+
|
|
+struct evtchn_expand_array {
|
|
+ uint64_t array_gfn;
|
|
+};
|
|
+
|
|
+typedef uint32_t event_word_t;
|
|
+
|
|
+struct evtchn_fifo_control_block {
|
|
+ uint32_t ready;
|
|
+ uint32_t _rsvd;
|
|
+ event_word_t head[16];
|
|
+};
|
|
+
|
|
+struct evtchn_fifo_queue {
|
|
+ uint32_t head[16];
|
|
+};
|
|
+
|
|
+enum xenbus_state {
|
|
+ XenbusStateUnknown = 0,
|
|
+ XenbusStateInitialising = 1,
|
|
+ XenbusStateInitWait = 2,
|
|
+ XenbusStateInitialised = 3,
|
|
+ XenbusStateConnected = 4,
|
|
+ XenbusStateClosing = 5,
|
|
+ XenbusStateClosed = 6,
|
|
+ XenbusStateReconfiguring = 7,
|
|
+ XenbusStateReconfigured = 8,
|
|
+};
|
|
+
|
|
+struct xenbus_device {
|
|
+ const char *devicetype;
|
|
+ const char *nodename;
|
|
+ const char *otherend;
|
|
+ int otherend_id;
|
|
+ struct xenbus_watch otherend_watch;
|
|
+ struct device dev;
|
|
+ enum xenbus_state state;
|
|
+ struct completion down;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+enum xenstore_init {
|
|
+ XS_UNKNOWN = 0,
|
|
+ XS_PV = 1,
|
|
+ XS_HVM = 2,
|
|
+ XS_LOCAL = 3,
|
|
+};
|
|
+
|
|
+struct xenbus_map_node {
|
|
+ struct list_head next;
|
|
+ union {
|
|
+ struct {
|
|
+ struct vm_struct *area;
|
|
+ } pv;
|
|
+ struct {
|
|
+ struct page *pages[16];
|
|
+ long unsigned int addrs[16];
|
|
+ void *addr;
|
|
+ } hvm;
|
|
+ };
|
|
+ grant_handle_t handles[16];
|
|
+ unsigned int nr_handles;
|
|
+};
|
|
+
|
|
+struct xenbus_ring_ops {
|
|
+ int (*map)(struct xenbus_device *, grant_ref_t *, unsigned int, void **);
|
|
+ int (*unmap)(struct xenbus_device *, void *);
|
|
+};
|
|
+
|
|
+struct map_ring_valloc_hvm {
|
|
+ unsigned int idx;
|
|
+ phys_addr_t phys_addrs[16];
|
|
+ long unsigned int addrs[16];
|
|
+};
|
|
+
|
|
+struct unmap_ring_vfree_hvm {
|
|
+ unsigned int idx;
|
|
+ long unsigned int addrs[16];
|
|
+};
|
|
+
|
|
+enum xsd_sockmsg_type {
|
|
+ XS_DEBUG = 0,
|
|
+ XS_DIRECTORY = 1,
|
|
+ XS_READ = 2,
|
|
+ XS_GET_PERMS = 3,
|
|
+ XS_WATCH = 4,
|
|
+ XS_UNWATCH = 5,
|
|
+ XS_TRANSACTION_START = 6,
|
|
+ XS_TRANSACTION_END = 7,
|
|
+ XS_INTRODUCE = 8,
|
|
+ XS_RELEASE = 9,
|
|
+ XS_GET_DOMAIN_PATH = 10,
|
|
+ XS_WRITE = 11,
|
|
+ XS_MKDIR = 12,
|
|
+ XS_RM = 13,
|
|
+ XS_SET_PERMS = 14,
|
|
+ XS_WATCH_EVENT = 15,
|
|
+ XS_ERROR = 16,
|
|
+ XS_IS_DOMAIN_INTRODUCED = 17,
|
|
+ XS_RESUME = 18,
|
|
+ XS_SET_TARGET = 19,
|
|
+ XS_RESTRICT = 20,
|
|
+ XS_RESET_WATCHES = 21,
|
|
+};
|
|
+
|
|
+struct xsd_sockmsg {
|
|
+ uint32_t type;
|
|
+ uint32_t req_id;
|
|
+ uint32_t tx_id;
|
|
+ uint32_t len;
|
|
+};
|
|
+
|
|
+struct xs_watch_event {
|
|
+ struct list_head list;
|
|
+ unsigned int len;
|
|
+ struct xenbus_watch *handle;
|
|
+ const char *path;
|
|
+ const char *token;
|
|
+ char body[0];
|
|
+};
|
|
+
|
|
+enum xb_req_state {
|
|
+ xb_req_state_queued = 0,
|
|
+ xb_req_state_wait_reply = 1,
|
|
+ xb_req_state_got_reply = 2,
|
|
+ xb_req_state_aborted = 3,
|
|
+};
|
|
+
|
|
+struct xb_req_data {
|
|
+ struct list_head list;
|
|
+ wait_queue_head_t wq;
|
|
+ struct xsd_sockmsg msg;
|
|
+ uint32_t caller_req_id;
|
|
+ enum xsd_sockmsg_type type;
|
|
+ char *body;
|
|
+ const struct kvec *vec;
|
|
+ int num_vecs;
|
|
+ int err;
|
|
+ enum xb_req_state state;
|
|
+ bool user_req;
|
|
+ void (*cb)(struct xb_req_data *);
|
|
+ void *par;
|
|
+};
|
|
+
|
|
+struct xenbus_device_id {
|
|
+ char devicetype[32];
|
|
+};
|
|
+
|
|
+struct xenbus_driver {
|
|
+ const char *name;
|
|
+ const struct xenbus_device_id *ids;
|
|
+ int (*probe)(struct xenbus_device *, const struct xenbus_device_id *);
|
|
+ void (*otherend_changed)(struct xenbus_device *, enum xenbus_state);
|
|
+ int (*remove)(struct xenbus_device *);
|
|
+ int (*suspend)(struct xenbus_device *);
|
|
+ int (*resume)(struct xenbus_device *);
|
|
+ int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
|
|
+ struct device_driver driver;
|
|
+ int (*read_otherend_details)(struct xenbus_device *);
|
|
+ int (*is_ready)(struct xenbus_device *);
|
|
+};
|
|
+
|
|
+struct xen_bus_type {
|
|
+ char *root;
|
|
+ unsigned int levels;
|
|
+ int (*get_bus_id)(char *, const char *);
|
|
+ int (*probe)(struct xen_bus_type *, const char *, const char *);
|
|
+ bool (*otherend_will_handle)(struct xenbus_watch *, const char *, const char *);
|
|
+ void (*otherend_changed)(struct xenbus_watch *, const char *, const char *);
|
|
+ struct bus_type bus;
|
|
+};
|
|
+
|
|
+struct xb_find_info {
|
|
+ struct xenbus_device *dev;
|
|
+ const char *nodename;
|
|
+};
|
|
+
|
|
+struct xenbus_transaction_holder {
|
|
+ struct list_head list;
|
|
+ struct xenbus_transaction handle;
|
|
+ unsigned int generation_id;
|
|
+};
|
|
+
|
|
+struct read_buffer {
|
|
+ struct list_head list;
|
|
+ unsigned int cons;
|
|
+ unsigned int len;
|
|
+ char msg[0];
|
|
+};
|
|
+
|
|
+struct xenbus_file_priv {
|
|
+ struct mutex msgbuffer_mutex;
|
|
+ struct list_head transactions;
|
|
+ struct list_head watches;
|
|
+ unsigned int len;
|
|
+ union {
|
|
+ struct xsd_sockmsg msg;
|
|
+ char buffer[4096];
|
|
+ } u;
|
|
+ struct mutex reply_mutex;
|
|
+ struct list_head read_buffers;
|
|
+ wait_queue_head_t read_waitq;
|
|
+ struct kref kref;
|
|
+ struct work_struct wq;
|
|
+};
|
|
+
|
|
+struct watch_adapter {
|
|
+ struct list_head list;
|
|
+ struct xenbus_watch watch;
|
|
+ struct xenbus_file_priv *dev_data;
|
|
+ char *token;
|
|
+};
|
|
+
|
|
+typedef uint8_t xen_domain_handle_t[16];
|
|
+
|
|
+struct xen_compile_info {
|
|
+ char compiler[64];
|
|
+ char compile_by[16];
|
|
+ char compile_domain[32];
|
|
+ char compile_date[32];
|
|
+};
|
|
+
|
|
+struct xen_platform_parameters {
|
|
+ xen_ulong_t virt_start;
|
|
+};
|
|
+
|
|
+struct xen_build_id {
|
|
+ uint32_t len;
|
|
+ unsigned char buf[0];
|
|
+};
|
|
+
|
|
+struct hyp_sysfs_attr {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
|
|
+ ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
|
|
+ void *hyp_attr_data;
|
|
+};
|
|
+
|
|
+enum xen_swiotlb_err {
|
|
+ XEN_SWIOTLB_UNKNOWN = 0,
|
|
+ XEN_SWIOTLB_ENOMEM = 1,
|
|
+ XEN_SWIOTLB_EFIXUP = 2,
|
|
+};
|
|
+
|
|
+typedef int *__guest_handle_int;
|
|
+
|
|
+typedef xen_ulong_t *__guest_handle_xen_ulong_t;
|
|
+
|
|
+struct xen_add_to_physmap_range {
|
|
+ domid_t domid;
|
|
+ uint16_t space;
|
|
+ uint16_t size;
|
|
+ domid_t foreign_domid;
|
|
+ __guest_handle_xen_ulong_t idxs;
|
|
+ __guest_handle_xen_pfn_t gpfns;
|
|
+ __guest_handle_int errs;
|
|
+};
|
|
+
|
|
+struct xen_remove_from_physmap {
|
|
+ domid_t domid;
|
|
+ xen_pfn_t gpfn;
|
|
+};
|
|
+
|
|
+typedef void (*xen_gfn_fn_t)(long unsigned int, void *);
|
|
+
|
|
+struct xen_remap_gfn_info;
|
|
+
|
|
+struct remap_data___2 {
|
|
+ xen_pfn_t *fgfn;
|
|
+ int nr_fgfn;
|
|
+ pgprot_t prot;
|
|
+ domid_t domid;
|
|
+ struct vm_area_struct *vma;
|
|
+ int index;
|
|
+ struct page **pages;
|
|
+ struct xen_remap_gfn_info *info;
|
|
+ int *err_ptr;
|
|
+ int mapped;
|
|
+ int h_errs[1];
|
|
+ xen_ulong_t h_idxs[1];
|
|
+ xen_pfn_t h_gpfns[1];
|
|
+ int h_iter;
|
|
+};
|
|
+
|
|
+struct map_balloon_pages {
|
|
+ xen_pfn_t *pfns;
|
|
+ unsigned int idx;
|
|
+};
|
|
+
|
|
+struct n_tty_data {
|
|
+ size_t read_head;
|
|
+ size_t commit_head;
|
|
+ size_t canon_head;
|
|
+ size_t echo_head;
|
|
+ size_t echo_commit;
|
|
+ size_t echo_mark;
|
|
+ long unsigned int char_map[4];
|
|
+ long unsigned int overrun_time;
|
|
+ int num_overrun;
|
|
+ bool no_room;
|
|
+ unsigned char lnext: 1;
|
|
+ unsigned char erasing: 1;
|
|
+ unsigned char raw: 1;
|
|
+ unsigned char real_raw: 1;
|
|
+ unsigned char icanon: 1;
|
|
+ unsigned char push: 1;
|
|
+ char read_buf[4096];
|
|
+ long unsigned int read_flags[64];
|
|
+ unsigned char echo_buf[4096];
|
|
+ size_t read_tail;
|
|
+ size_t line_start;
|
|
+ unsigned int column;
|
|
+ unsigned int canon_column;
|
|
+ size_t echo_tail;
|
|
+ struct mutex atomic_read_lock;
|
|
+ struct mutex output_lock;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ERASE = 0,
|
|
+ WERASE = 1,
|
|
+ KILL = 2,
|
|
+};
|
|
+
|
|
+struct termios {
|
|
+ tcflag_t c_iflag;
|
|
+ tcflag_t c_oflag;
|
|
+ tcflag_t c_cflag;
|
|
+ tcflag_t c_lflag;
|
|
+ cc_t c_line;
|
|
+ cc_t c_cc[19];
|
|
+};
|
|
+
|
|
+struct termios2 {
|
|
+ tcflag_t c_iflag;
|
|
+ tcflag_t c_oflag;
|
|
+ tcflag_t c_cflag;
|
|
+ tcflag_t c_lflag;
|
|
+ cc_t c_line;
|
|
+ cc_t c_cc[19];
|
|
+ speed_t c_ispeed;
|
|
+ speed_t c_ospeed;
|
|
+};
|
|
+
|
|
+struct termio {
|
|
+ short unsigned int c_iflag;
|
|
+ short unsigned int c_oflag;
|
|
+ short unsigned int c_cflag;
|
|
+ short unsigned int c_lflag;
|
|
+ unsigned char c_line;
|
|
+ unsigned char c_cc[8];
|
|
+};
|
|
+
|
|
+struct ldsem_waiter {
|
|
+ struct list_head list;
|
|
+ struct task_struct *task;
|
|
+};
|
|
+
|
|
+struct pts_fs_info___2;
|
|
+
|
|
+struct tty_audit_buf {
|
|
+ struct mutex mutex;
|
|
+ dev_t dev;
|
|
+ unsigned int icanon: 1;
|
|
+ size_t valid;
|
|
+ unsigned char *data;
|
|
+};
|
|
+
|
|
+struct sysrq_state {
|
|
+ struct input_handle handle;
|
|
+ struct work_struct reinject_work;
|
|
+ long unsigned int key_down[12];
|
|
+ unsigned int alt;
|
|
+ unsigned int alt_use;
|
|
+ bool active;
|
|
+ bool need_reinject;
|
|
+ bool reinjecting;
|
|
+ bool reset_canceled;
|
|
+ bool reset_requested;
|
|
+ long unsigned int reset_keybit[12];
|
|
+ int reset_seq_len;
|
|
+ int reset_seq_cnt;
|
|
+ int reset_seq_version;
|
|
+ struct timer_list keyreset_timer;
|
|
+};
|
|
+
|
|
+struct consolefontdesc {
|
|
+ short unsigned int charcount;
|
|
+ short unsigned int charheight;
|
|
+ char *chardata;
|
|
+};
|
|
+
|
|
+struct unipair {
|
|
+ short unsigned int unicode;
|
|
+ short unsigned int fontpos;
|
|
+};
|
|
+
|
|
+struct unimapdesc {
|
|
+ short unsigned int entry_ct;
|
|
+ struct unipair *entries;
|
|
+};
|
|
+
|
|
+struct kbdiacruc {
|
|
+ unsigned int diacr;
|
|
+ unsigned int base;
|
|
+ unsigned int result;
|
|
+};
|
|
+
|
|
+struct kbd_repeat {
|
|
+ int delay;
|
|
+ int period;
|
|
+};
|
|
+
|
|
+struct console_font_op {
|
|
+ unsigned int op;
|
|
+ unsigned int flags;
|
|
+ unsigned int width;
|
|
+ unsigned int height;
|
|
+ unsigned int charcount;
|
|
+ unsigned char *data;
|
|
+};
|
|
+
|
|
+struct vt_stat {
|
|
+ short unsigned int v_active;
|
|
+ short unsigned int v_signal;
|
|
+ short unsigned int v_state;
|
|
+};
|
|
+
|
|
+struct vt_sizes {
|
|
+ short unsigned int v_rows;
|
|
+ short unsigned int v_cols;
|
|
+ short unsigned int v_scrollsize;
|
|
+};
|
|
+
|
|
+struct vt_consize {
|
|
+ short unsigned int v_rows;
|
|
+ short unsigned int v_cols;
|
|
+ short unsigned int v_vlin;
|
|
+ short unsigned int v_clin;
|
|
+ short unsigned int v_vcol;
|
|
+ short unsigned int v_ccol;
|
|
+};
|
|
+
|
|
+struct vt_event {
|
|
+ unsigned int event;
|
|
+ unsigned int oldev;
|
|
+ unsigned int newev;
|
|
+ unsigned int pad[4];
|
|
+};
|
|
+
|
|
+struct vt_setactivate {
|
|
+ unsigned int console;
|
|
+ struct vt_mode mode;
|
|
+};
|
|
+
|
|
+struct vt_event_wait {
|
|
+ struct list_head list;
|
|
+ struct vt_event event;
|
|
+ int done;
|
|
+};
|
|
+
|
|
+struct compat_consolefontdesc {
|
|
+ short unsigned int charcount;
|
|
+ short unsigned int charheight;
|
|
+ compat_caddr_t chardata;
|
|
+};
|
|
+
|
|
+struct compat_console_font_op {
|
|
+ compat_uint_t op;
|
|
+ compat_uint_t flags;
|
|
+ compat_uint_t width;
|
|
+ compat_uint_t height;
|
|
+ compat_uint_t charcount;
|
|
+ compat_caddr_t data;
|
|
+};
|
|
+
|
|
+struct compat_unimapdesc {
|
|
+ short unsigned int entry_ct;
|
|
+ compat_caddr_t entries;
|
|
+};
|
|
+
|
|
+struct vt_notifier_param {
|
|
+ struct vc_data *vc;
|
|
+ unsigned int c;
|
|
+};
|
|
+
|
|
+struct vcs_poll_data {
|
|
+ struct notifier_block notifier;
|
|
+ unsigned int cons_num;
|
|
+ bool seen_last_update;
|
|
+ wait_queue_head_t waitq;
|
|
+ struct fasync_struct *fasync;
|
|
+};
|
|
+
|
|
+struct tiocl_selection {
|
|
+ short unsigned int xs;
|
|
+ short unsigned int ys;
|
|
+ short unsigned int xe;
|
|
+ short unsigned int ye;
|
|
+ short unsigned int sel_mode;
|
|
+};
|
|
+
|
|
+struct keyboard_notifier_param {
|
|
+ struct vc_data *vc;
|
|
+ int down;
|
|
+ int shift;
|
|
+ int ledstate;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+struct kbd_struct {
|
|
+ unsigned char lockstate;
|
|
+ unsigned char slockstate;
|
|
+ unsigned char ledmode: 1;
|
|
+ unsigned char ledflagstate: 4;
|
|
+ char: 3;
|
|
+ unsigned char default_ledflagstate: 4;
|
|
+ unsigned char kbdmode: 3;
|
|
+ char: 1;
|
|
+ unsigned char modeflags: 5;
|
|
+};
|
|
+
|
|
+struct kbentry {
|
|
+ unsigned char kb_table;
|
|
+ unsigned char kb_index;
|
|
+ short unsigned int kb_value;
|
|
+};
|
|
+
|
|
+struct kbsentry {
|
|
+ unsigned char kb_func;
|
|
+ unsigned char kb_string[512];
|
|
+};
|
|
+
|
|
+struct kbdiacr {
|
|
+ unsigned char diacr;
|
|
+ unsigned char base;
|
|
+ unsigned char result;
|
|
+};
|
|
+
|
|
+struct kbdiacrs {
|
|
+ unsigned int kb_cnt;
|
|
+ struct kbdiacr kbdiacr[256];
|
|
+};
|
|
+
|
|
+struct kbdiacrsuc {
|
|
+ unsigned int kb_cnt;
|
|
+ struct kbdiacruc kbdiacruc[256];
|
|
+};
|
|
+
|
|
+struct kbkeycode {
|
|
+ unsigned int scancode;
|
|
+ unsigned int keycode;
|
|
+};
|
|
+
|
|
+typedef void k_handler_fn(struct vc_data *, unsigned char, char);
|
|
+
|
|
+typedef void fn_handler_fn(struct vc_data *);
|
|
+
|
|
+struct getset_keycode_data {
|
|
+ struct input_keymap_entry ke;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+struct kbd_led_trigger {
|
|
+ struct led_trigger trigger;
|
|
+ unsigned int mask;
|
|
+};
|
|
+
|
|
+struct uni_pagedir {
|
|
+ u16 **uni_pgdir[32];
|
|
+ long unsigned int refcount;
|
|
+ long unsigned int sum;
|
|
+ unsigned char *inverse_translations[4];
|
|
+ u16 *inverse_trans_unicode;
|
|
+};
|
|
+
|
|
+typedef uint32_t char32_t;
|
|
+
|
|
+struct uni_screen {
|
|
+ char32_t *lines[0];
|
|
+};
|
|
+
|
|
+struct con_driver {
|
|
+ const struct consw *con;
|
|
+ const char *desc;
|
|
+ struct device *dev;
|
|
+ int node;
|
|
+ int first;
|
|
+ int last;
|
|
+ int flag;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ blank_off = 0,
|
|
+ blank_normal_wait = 1,
|
|
+ blank_vesa_wait = 2,
|
|
+};
|
|
+
|
|
+struct rgb {
|
|
+ u8 r;
|
|
+ u8 g;
|
|
+ u8 b;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ESnormal = 0,
|
|
+ ESesc = 1,
|
|
+ ESsquare = 2,
|
|
+ ESgetpars = 3,
|
|
+ ESfunckey = 4,
|
|
+ EShash = 5,
|
|
+ ESsetG0 = 6,
|
|
+ ESsetG1 = 7,
|
|
+ ESpercent = 8,
|
|
+ ESignore = 9,
|
|
+ ESnonstd = 10,
|
|
+ ESpalette = 11,
|
|
+ ESosc = 12,
|
|
+};
|
|
+
|
|
+struct interval {
|
|
+ uint32_t first;
|
|
+ uint32_t last;
|
|
+};
|
|
+
|
|
+struct hv_ops;
|
|
+
|
|
+struct hvc_struct {
|
|
+ struct tty_port port;
|
|
+ spinlock_t lock;
|
|
+ int index;
|
|
+ int do_wakeup;
|
|
+ char *outbuf;
|
|
+ int outbuf_size;
|
|
+ int n_outbuf;
|
|
+ uint32_t vtermno;
|
|
+ const struct hv_ops *ops;
|
|
+ int irq_requested;
|
|
+ int data;
|
|
+ struct winsize ws;
|
|
+ struct work_struct tty_resize;
|
|
+ struct list_head next;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct hv_ops {
|
|
+ int (*get_chars)(uint32_t, char *, int);
|
|
+ int (*put_chars)(uint32_t, const char *, int);
|
|
+ int (*flush)(uint32_t, bool);
|
|
+ int (*notifier_add)(struct hvc_struct *, int);
|
|
+ void (*notifier_del)(struct hvc_struct *, int);
|
|
+ void (*notifier_hangup)(struct hvc_struct *, int);
|
|
+ int (*tiocmget)(struct hvc_struct *);
|
|
+ int (*tiocmset)(struct hvc_struct *, unsigned int, unsigned int);
|
|
+ void (*dtr_rts)(struct hvc_struct *, int);
|
|
+};
|
|
+
|
|
+typedef uint32_t XENCONS_RING_IDX;
|
|
+
|
|
+struct xencons_interface {
|
|
+ char in[1024];
|
|
+ char out[2048];
|
|
+ XENCONS_RING_IDX in_cons;
|
|
+ XENCONS_RING_IDX in_prod;
|
|
+ XENCONS_RING_IDX out_cons;
|
|
+ XENCONS_RING_IDX out_prod;
|
|
+};
|
|
+
|
|
+struct xencons_info {
|
|
+ struct list_head list;
|
|
+ struct xenbus_device *xbdev;
|
|
+ struct xencons_interface *intf;
|
|
+ unsigned int evtchn;
|
|
+ struct hvc_struct *hvc;
|
|
+ int irq;
|
|
+ int vtermno;
|
|
+ grant_ref_t gntref;
|
|
+};
|
|
+
|
|
+struct uart_driver {
|
|
+ struct module *owner;
|
|
+ const char *driver_name;
|
|
+ const char *dev_name;
|
|
+ int major;
|
|
+ int minor;
|
|
+ int nr;
|
|
+ struct console *cons;
|
|
+ struct uart_state *state;
|
|
+ struct tty_driver *tty_driver;
|
|
+};
|
|
+
|
|
+struct uart_match {
|
|
+ struct uart_port *port;
|
|
+ struct uart_driver *driver;
|
|
+};
|
|
+
|
|
+enum hwparam_type {
|
|
+ hwparam_ioport = 0,
|
|
+ hwparam_iomem = 1,
|
|
+ hwparam_ioport_or_iomem = 2,
|
|
+ hwparam_irq = 3,
|
|
+ hwparam_dma = 4,
|
|
+ hwparam_dma_addr = 5,
|
|
+ hwparam_other = 6,
|
|
+};
|
|
+
|
|
+struct plat_serial8250_port {
|
|
+ long unsigned int iobase;
|
|
+ void *membase;
|
|
+ resource_size_t mapbase;
|
|
+ unsigned int irq;
|
|
+ long unsigned int irqflags;
|
|
+ unsigned int uartclk;
|
|
+ void *private_data;
|
|
+ unsigned char regshift;
|
|
+ unsigned char iotype;
|
|
+ unsigned char hub6;
|
|
+ upf_t flags;
|
|
+ unsigned int type;
|
|
+ unsigned int (*serial_in)(struct uart_port *, int);
|
|
+ void (*serial_out)(struct uart_port *, int, int);
|
|
+ void (*set_termios)(struct uart_port *, struct ktermios *, struct ktermios *);
|
|
+ void (*set_ldisc)(struct uart_port *, struct ktermios *);
|
|
+ unsigned int (*get_mctrl)(struct uart_port *);
|
|
+ int (*handle_irq)(struct uart_port *);
|
|
+ void (*pm)(struct uart_port *, unsigned int, unsigned int);
|
|
+ void (*handle_break)(struct uart_port *);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PLAT8250_DEV_LEGACY = -1,
|
|
+ PLAT8250_DEV_PLATFORM = 0,
|
|
+ PLAT8250_DEV_PLATFORM1 = 1,
|
|
+ PLAT8250_DEV_PLATFORM2 = 2,
|
|
+ PLAT8250_DEV_FOURPORT = 3,
|
|
+ PLAT8250_DEV_ACCENT = 4,
|
|
+ PLAT8250_DEV_BOCA = 5,
|
|
+ PLAT8250_DEV_EXAR_ST16C554 = 6,
|
|
+ PLAT8250_DEV_HUB6 = 7,
|
|
+ PLAT8250_DEV_AU1X00 = 8,
|
|
+ PLAT8250_DEV_SM501 = 9,
|
|
+};
|
|
+
|
|
+struct uart_8250_port;
|
|
+
|
|
+struct uart_8250_ops {
|
|
+ int (*setup_irq)(struct uart_8250_port *);
|
|
+ void (*release_irq)(struct uart_8250_port *);
|
|
+};
|
|
+
|
|
+struct uart_8250_dma;
|
|
+
|
|
+struct uart_8250_em485;
|
|
+
|
|
+struct uart_8250_port {
|
|
+ struct uart_port port;
|
|
+ struct timer_list timer;
|
|
+ struct list_head list;
|
|
+ u32 capabilities;
|
|
+ short unsigned int bugs;
|
|
+ bool fifo_bug;
|
|
+ unsigned int tx_loadsz;
|
|
+ unsigned char acr;
|
|
+ unsigned char fcr;
|
|
+ unsigned char ier;
|
|
+ unsigned char lcr;
|
|
+ unsigned char mcr;
|
|
+ unsigned char mcr_mask;
|
|
+ unsigned char mcr_force;
|
|
+ unsigned char cur_iotype;
|
|
+ unsigned int rpm_tx_active;
|
|
+ unsigned char canary;
|
|
+ unsigned char probe;
|
|
+ unsigned char lsr_saved_flags;
|
|
+ unsigned char msr_saved_flags;
|
|
+ struct uart_8250_dma *dma;
|
|
+ const struct uart_8250_ops *ops;
|
|
+ int (*dl_read)(struct uart_8250_port *);
|
|
+ void (*dl_write)(struct uart_8250_port *, int);
|
|
+ struct uart_8250_em485 *em485;
|
|
+ struct delayed_work overrun_backoff;
|
|
+ u32 overrun_backoff_time_ms;
|
|
+};
|
|
+
|
|
+struct uart_8250_em485 {
|
|
+ struct hrtimer start_tx_timer;
|
|
+ struct hrtimer stop_tx_timer;
|
|
+ struct hrtimer *active_timer;
|
|
+ struct uart_8250_port *port;
|
|
+};
|
|
+
|
|
+struct uart_8250_dma {
|
|
+ int (*tx_dma)(struct uart_8250_port *);
|
|
+ int (*rx_dma)(struct uart_8250_port *);
|
|
+ dma_filter_fn fn;
|
|
+ void *rx_param;
|
|
+ void *tx_param;
|
|
+ struct dma_slave_config rxconf;
|
|
+ struct dma_slave_config txconf;
|
|
+ struct dma_chan___2 *rxchan;
|
|
+ struct dma_chan___2 *txchan;
|
|
+ phys_addr_t rx_dma_addr;
|
|
+ phys_addr_t tx_dma_addr;
|
|
+ dma_addr_t rx_addr;
|
|
+ dma_addr_t tx_addr;
|
|
+ dma_cookie_t rx_cookie;
|
|
+ dma_cookie_t tx_cookie;
|
|
+ void *rx_buf;
|
|
+ size_t rx_size;
|
|
+ size_t tx_size;
|
|
+ unsigned char tx_running;
|
|
+ unsigned char tx_err;
|
|
+ unsigned char rx_running;
|
|
+};
|
|
+
|
|
+struct old_serial_port {
|
|
+ unsigned int uart;
|
|
+ unsigned int baud_base;
|
|
+ unsigned int port;
|
|
+ unsigned int irq;
|
|
+ upf_t flags;
|
|
+ unsigned char io_type;
|
|
+ unsigned char *iomem_base;
|
|
+ short unsigned int iomem_reg_shift;
|
|
+};
|
|
+
|
|
+struct irq_info___2 {
|
|
+ struct hlist_node node;
|
|
+ int irq;
|
|
+ spinlock_t lock;
|
|
+ struct list_head *head;
|
|
+};
|
|
+
|
|
+struct serial8250_config {
|
|
+ const char *name;
|
|
+ short unsigned int fifo_size;
|
|
+ short unsigned int tx_loadsz;
|
|
+ unsigned char fcr;
|
|
+ unsigned char rxtrig_bytes[4];
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct pciserial_board {
|
|
+ unsigned int flags;
|
|
+ unsigned int num_ports;
|
|
+ unsigned int base_baud;
|
|
+ unsigned int uart_offset;
|
|
+ unsigned int reg_shift;
|
|
+ unsigned int first_offset;
|
|
+};
|
|
+
|
|
+struct serial_private;
|
|
+
|
|
+struct pci_serial_quirk {
|
|
+ u32 vendor;
|
|
+ u32 device;
|
|
+ u32 subvendor;
|
|
+ u32 subdevice;
|
|
+ int (*probe)(struct pci_dev *);
|
|
+ int (*init)(struct pci_dev *);
|
|
+ int (*setup)(struct serial_private *, const struct pciserial_board *, struct uart_8250_port *, int);
|
|
+ void (*exit)(struct pci_dev *);
|
|
+};
|
|
+
|
|
+struct serial_private {
|
|
+ struct pci_dev *dev;
|
|
+ unsigned int nr;
|
|
+ struct pci_serial_quirk *quirk;
|
|
+ const struct pciserial_board *board;
|
|
+ int line[0];
|
|
+};
|
|
+
|
|
+struct timedia_struct {
|
|
+ int num;
|
|
+ const short unsigned int *ids;
|
|
+};
|
|
+
|
|
+struct quatech_feature {
|
|
+ u16 devid;
|
|
+ bool amcc;
|
|
+};
|
|
+
|
|
+enum pci_board_num_t {
|
|
+ pbn_default = 0,
|
|
+ pbn_b0_1_115200 = 1,
|
|
+ pbn_b0_2_115200 = 2,
|
|
+ pbn_b0_4_115200 = 3,
|
|
+ pbn_b0_5_115200 = 4,
|
|
+ pbn_b0_8_115200 = 5,
|
|
+ pbn_b0_1_921600 = 6,
|
|
+ pbn_b0_2_921600 = 7,
|
|
+ pbn_b0_4_921600 = 8,
|
|
+ pbn_b0_2_1130000 = 9,
|
|
+ pbn_b0_4_1152000 = 10,
|
|
+ pbn_b0_4_1250000 = 11,
|
|
+ pbn_b0_2_1843200 = 12,
|
|
+ pbn_b0_4_1843200 = 13,
|
|
+ pbn_b0_1_4000000 = 14,
|
|
+ pbn_b0_bt_1_115200 = 15,
|
|
+ pbn_b0_bt_2_115200 = 16,
|
|
+ pbn_b0_bt_4_115200 = 17,
|
|
+ pbn_b0_bt_8_115200 = 18,
|
|
+ pbn_b0_bt_1_460800 = 19,
|
|
+ pbn_b0_bt_2_460800 = 20,
|
|
+ pbn_b0_bt_4_460800 = 21,
|
|
+ pbn_b0_bt_1_921600 = 22,
|
|
+ pbn_b0_bt_2_921600 = 23,
|
|
+ pbn_b0_bt_4_921600 = 24,
|
|
+ pbn_b0_bt_8_921600 = 25,
|
|
+ pbn_b1_1_115200 = 26,
|
|
+ pbn_b1_2_115200 = 27,
|
|
+ pbn_b1_4_115200 = 28,
|
|
+ pbn_b1_8_115200 = 29,
|
|
+ pbn_b1_16_115200 = 30,
|
|
+ pbn_b1_1_921600 = 31,
|
|
+ pbn_b1_2_921600 = 32,
|
|
+ pbn_b1_4_921600 = 33,
|
|
+ pbn_b1_8_921600 = 34,
|
|
+ pbn_b1_2_1250000 = 35,
|
|
+ pbn_b1_bt_1_115200 = 36,
|
|
+ pbn_b1_bt_2_115200 = 37,
|
|
+ pbn_b1_bt_4_115200 = 38,
|
|
+ pbn_b1_bt_2_921600 = 39,
|
|
+ pbn_b1_1_1382400 = 40,
|
|
+ pbn_b1_2_1382400 = 41,
|
|
+ pbn_b1_4_1382400 = 42,
|
|
+ pbn_b1_8_1382400 = 43,
|
|
+ pbn_b2_1_115200 = 44,
|
|
+ pbn_b2_2_115200 = 45,
|
|
+ pbn_b2_4_115200 = 46,
|
|
+ pbn_b2_8_115200 = 47,
|
|
+ pbn_b2_1_460800 = 48,
|
|
+ pbn_b2_4_460800 = 49,
|
|
+ pbn_b2_8_460800 = 50,
|
|
+ pbn_b2_16_460800 = 51,
|
|
+ pbn_b2_1_921600 = 52,
|
|
+ pbn_b2_4_921600 = 53,
|
|
+ pbn_b2_8_921600 = 54,
|
|
+ pbn_b2_8_1152000 = 55,
|
|
+ pbn_b2_bt_1_115200 = 56,
|
|
+ pbn_b2_bt_2_115200 = 57,
|
|
+ pbn_b2_bt_4_115200 = 58,
|
|
+ pbn_b2_bt_2_921600 = 59,
|
|
+ pbn_b2_bt_4_921600 = 60,
|
|
+ pbn_b3_2_115200 = 61,
|
|
+ pbn_b3_4_115200 = 62,
|
|
+ pbn_b3_8_115200 = 63,
|
|
+ pbn_b4_bt_2_921600 = 64,
|
|
+ pbn_b4_bt_4_921600 = 65,
|
|
+ pbn_b4_bt_8_921600 = 66,
|
|
+ pbn_panacom = 67,
|
|
+ pbn_panacom2 = 68,
|
|
+ pbn_panacom4 = 69,
|
|
+ pbn_plx_romulus = 70,
|
|
+ pbn_endrun_2_4000000 = 71,
|
|
+ pbn_oxsemi = 72,
|
|
+ pbn_oxsemi_1_4000000 = 73,
|
|
+ pbn_oxsemi_2_4000000 = 74,
|
|
+ pbn_oxsemi_4_4000000 = 75,
|
|
+ pbn_oxsemi_8_4000000 = 76,
|
|
+ pbn_intel_i960 = 77,
|
|
+ pbn_sgi_ioc3 = 78,
|
|
+ pbn_computone_4 = 79,
|
|
+ pbn_computone_6 = 80,
|
|
+ pbn_computone_8 = 81,
|
|
+ pbn_sbsxrsio = 82,
|
|
+ pbn_pasemi_1682M = 83,
|
|
+ pbn_ni8430_2 = 84,
|
|
+ pbn_ni8430_4 = 85,
|
|
+ pbn_ni8430_8 = 86,
|
|
+ pbn_ni8430_16 = 87,
|
|
+ pbn_ADDIDATA_PCIe_1_3906250 = 88,
|
|
+ pbn_ADDIDATA_PCIe_2_3906250 = 89,
|
|
+ pbn_ADDIDATA_PCIe_4_3906250 = 90,
|
|
+ pbn_ADDIDATA_PCIe_8_3906250 = 91,
|
|
+ pbn_ce4100_1_115200 = 92,
|
|
+ pbn_omegapci = 93,
|
|
+ pbn_NETMOS9900_2s_115200 = 94,
|
|
+ pbn_brcm_trumanage = 95,
|
|
+ pbn_fintek_4 = 96,
|
|
+ pbn_fintek_8 = 97,
|
|
+ pbn_fintek_12 = 98,
|
|
+ pbn_wch382_2 = 99,
|
|
+ pbn_wch384_4 = 100,
|
|
+ pbn_pericom_PI7C9X7951 = 101,
|
|
+ pbn_pericom_PI7C9X7952 = 102,
|
|
+ pbn_pericom_PI7C9X7954 = 103,
|
|
+ pbn_pericom_PI7C9X7958 = 104,
|
|
+};
|
|
+
|
|
+struct exar8250_platform {
|
|
+ int (*rs485_config)(struct uart_port *, struct serial_rs485 *);
|
|
+ int (*register_gpio)(struct pci_dev *, struct uart_8250_port *);
|
|
+};
|
|
+
|
|
+struct exar8250;
|
|
+
|
|
+struct exar8250_board {
|
|
+ unsigned int num_ports;
|
|
+ unsigned int reg_shift;
|
|
+ int (*setup)(struct exar8250 *, struct pci_dev *, struct uart_8250_port *, int);
|
|
+ void (*exit)(struct pci_dev *);
|
|
+};
|
|
+
|
|
+struct exar8250 {
|
|
+ unsigned int nr;
|
|
+ struct exar8250_board *board;
|
|
+ void *virt;
|
|
+ int line[0];
|
|
+};
|
|
+
|
|
+struct reset_control;
|
|
+
|
|
+struct dw8250_data {
|
|
+ u8 usr_reg;
|
|
+ u8 dlf_size;
|
|
+ int line;
|
|
+ int msr_mask_on;
|
|
+ int msr_mask_off;
|
|
+ struct clk *clk;
|
|
+ struct clk *pclk;
|
|
+ struct reset_control *rst;
|
|
+ struct uart_8250_dma dma;
|
|
+ unsigned int skip_autocfg: 1;
|
|
+ unsigned int uart_16550_compatible: 1;
|
|
+};
|
|
+
|
|
+struct lpss8250;
|
|
+
|
|
+struct lpss8250_board {
|
|
+ long unsigned int freq;
|
|
+ unsigned int base_baud;
|
|
+ int (*setup)(struct lpss8250 *, struct uart_port *);
|
|
+ void (*exit)(struct lpss8250 *);
|
|
+};
|
|
+
|
|
+struct lpss8250 {
|
|
+ int line;
|
|
+ struct lpss8250_board *board;
|
|
+ struct uart_8250_dma dma;
|
|
+ struct dw_dma_chip dma_chip;
|
|
+ struct dw_dma_slave dma_param;
|
|
+ u8 dma_maxburst;
|
|
+};
|
|
+
|
|
+struct hsu_dma_slave {
|
|
+ struct device *dma_dev;
|
|
+ int chan_id;
|
|
+};
|
|
+
|
|
+struct mid8250;
|
|
+
|
|
+struct mid8250_board {
|
|
+ unsigned int flags;
|
|
+ long unsigned int freq;
|
|
+ unsigned int base_baud;
|
|
+ int (*setup)(struct mid8250 *, struct uart_port *);
|
|
+ void (*exit)(struct mid8250 *);
|
|
+};
|
|
+
|
|
+struct mid8250 {
|
|
+ int line;
|
|
+ int dma_index;
|
|
+ struct pci_dev *dma_dev;
|
|
+ struct uart_8250_dma dma;
|
|
+ struct mid8250_board *board;
|
|
+ struct hsu_dma_chip dma_chip;
|
|
+};
|
|
+
|
|
+struct memdev {
|
|
+ const char *name;
|
|
+ umode_t mode;
|
|
+ const struct file_operations *fops;
|
|
+ fmode_t fmode;
|
|
+};
|
|
+
|
|
+struct timer_rand_state {
|
|
+ cycles_t last_time;
|
|
+ long int last_delta;
|
|
+ long int last_delta2;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_add_device_randomness {
|
|
+ struct trace_entry ent;
|
|
+ int bytes;
|
|
+ long unsigned int IP;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_random__mix_pool_bytes {
|
|
+ struct trace_entry ent;
|
|
+ const char *pool_name;
|
|
+ int bytes;
|
|
+ long unsigned int IP;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_credit_entropy_bits {
|
|
+ struct trace_entry ent;
|
|
+ const char *pool_name;
|
|
+ int bits;
|
|
+ int entropy_count;
|
|
+ long unsigned int IP;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_push_to_pool {
|
|
+ struct trace_entry ent;
|
|
+ const char *pool_name;
|
|
+ int pool_bits;
|
|
+ int input_bits;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_debit_entropy {
|
|
+ struct trace_entry ent;
|
|
+ const char *pool_name;
|
|
+ int debit_bits;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_add_input_randomness {
|
|
+ struct trace_entry ent;
|
|
+ int input_bits;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_add_disk_randomness {
|
|
+ struct trace_entry ent;
|
|
+ dev_t dev;
|
|
+ int input_bits;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xfer_secondary_pool {
|
|
+ struct trace_entry ent;
|
|
+ const char *pool_name;
|
|
+ int xfer_bits;
|
|
+ int request_bits;
|
|
+ int pool_entropy;
|
|
+ int input_entropy;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_random__get_random_bytes {
|
|
+ struct trace_entry ent;
|
|
+ int nbytes;
|
|
+ long unsigned int IP;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_random__extract_entropy {
|
|
+ struct trace_entry ent;
|
|
+ const char *pool_name;
|
|
+ int nbytes;
|
|
+ int entropy_count;
|
|
+ long unsigned int IP;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_random_read {
|
|
+ struct trace_entry ent;
|
|
+ int got_bits;
|
|
+ int need_bits;
|
|
+ int pool_left;
|
|
+ int input_left;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_urandom_read {
|
|
+ struct trace_entry ent;
|
|
+ int got_bits;
|
|
+ int pool_left;
|
|
+ int input_left;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_add_device_randomness {};
|
|
+
|
|
+struct trace_event_data_offsets_random__mix_pool_bytes {};
|
|
+
|
|
+struct trace_event_data_offsets_credit_entropy_bits {};
|
|
+
|
|
+struct trace_event_data_offsets_push_to_pool {};
|
|
+
|
|
+struct trace_event_data_offsets_debit_entropy {};
|
|
+
|
|
+struct trace_event_data_offsets_add_input_randomness {};
|
|
+
|
|
+struct trace_event_data_offsets_add_disk_randomness {};
|
|
+
|
|
+struct trace_event_data_offsets_xfer_secondary_pool {};
|
|
+
|
|
+struct trace_event_data_offsets_random__get_random_bytes {};
|
|
+
|
|
+struct trace_event_data_offsets_random__extract_entropy {};
|
|
+
|
|
+struct trace_event_data_offsets_random_read {};
|
|
+
|
|
+struct trace_event_data_offsets_urandom_read {};
|
|
+
|
|
+struct poolinfo {
|
|
+ int poolbitshift;
|
|
+ int poolwords;
|
|
+ int poolbytes;
|
|
+ int poolbits;
|
|
+ int poolfracbits;
|
|
+ int tap1;
|
|
+ int tap2;
|
|
+ int tap3;
|
|
+ int tap4;
|
|
+ int tap5;
|
|
+};
|
|
+
|
|
+struct crng_state {
|
|
+ __u32 state[16];
|
|
+ long unsigned int init_time;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct entropy_store {
|
|
+ const struct poolinfo *poolinfo;
|
|
+ __u32 *pool;
|
|
+ const char *name;
|
|
+ struct entropy_store *pull;
|
|
+ struct work_struct push_work;
|
|
+ long unsigned int last_pulled;
|
|
+ spinlock_t lock;
|
|
+ short unsigned int add_ptr;
|
|
+ short unsigned int input_rotate;
|
|
+ int entropy_count;
|
|
+ unsigned int initialized: 1;
|
|
+ unsigned int last_data_init: 1;
|
|
+ __u8 last_data[10];
|
|
+};
|
|
+
|
|
+struct fast_pool {
|
|
+ __u32 pool[4];
|
|
+ long unsigned int last;
|
|
+ short unsigned int reg_idx;
|
|
+ unsigned char count;
|
|
+};
|
|
+
|
|
+struct batched_entropy {
|
|
+ union {
|
|
+ u64 entropy_u64[8];
|
|
+ u32 entropy_u32[16];
|
|
+ };
|
|
+ unsigned int position;
|
|
+ spinlock_t batch_lock;
|
|
+};
|
|
+
|
|
+struct raw_config_request {
|
|
+ int raw_minor;
|
|
+ __u64 block_major;
|
|
+ __u64 block_minor;
|
|
+};
|
|
+
|
|
+struct raw_device_data {
|
|
+ struct block_device *binding;
|
|
+ int inuse;
|
|
+};
|
|
+
|
|
+struct raw32_config_request {
|
|
+ compat_int_t raw_minor;
|
|
+ compat_u64 block_major;
|
|
+ compat_u64 block_minor;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct hpet_info {
|
|
+ long unsigned int hi_ireqfreq;
|
|
+ long unsigned int hi_flags;
|
|
+ short unsigned int hi_hpet;
|
|
+ short unsigned int hi_timer;
|
|
+};
|
|
+
|
|
+struct hpets;
|
|
+
|
|
+struct hpet_dev___3 {
|
|
+ struct hpets *hd_hpets;
|
|
+ struct hpet *hd_hpet;
|
|
+ struct hpet_timer *hd_timer;
|
|
+ long unsigned int hd_ireqfreq;
|
|
+ long unsigned int hd_irqdata;
|
|
+ wait_queue_head_t hd_waitqueue;
|
|
+ struct fasync_struct *hd_async_queue;
|
|
+ unsigned int hd_flags;
|
|
+ unsigned int hd_irq;
|
|
+ unsigned int hd_hdwirq;
|
|
+ char hd_name[7];
|
|
+};
|
|
+
|
|
+struct hpets {
|
|
+ struct hpets *hp_next;
|
|
+ struct hpet *hp_hpet;
|
|
+ long unsigned int hp_hpet_phys;
|
|
+ struct clocksource *hp_clocksource;
|
|
+ long long unsigned int hp_tick_freq;
|
|
+ long unsigned int hp_delta;
|
|
+ unsigned int hp_ntimer;
|
|
+ unsigned int hp_which;
|
|
+ struct hpet_dev___3 hp_dev[1];
|
|
+};
|
|
+
|
|
+struct compat_hpet_info {
|
|
+ compat_ulong_t hi_ireqfreq;
|
|
+ compat_ulong_t hi_flags;
|
|
+ short unsigned int hi_hpet;
|
|
+ short unsigned int hi_timer;
|
|
+};
|
|
+
|
|
+struct hwrng {
|
|
+ const char *name;
|
|
+ int (*init)(struct hwrng *);
|
|
+ void (*cleanup)(struct hwrng *);
|
|
+ int (*data_present)(struct hwrng *, int);
|
|
+ int (*data_read)(struct hwrng *, u32 *);
|
|
+ int (*read)(struct hwrng *, void *, size_t, bool);
|
|
+ long unsigned int priv;
|
|
+ short unsigned int quality;
|
|
+ struct list_head list;
|
|
+ struct kref ref;
|
|
+ struct completion cleanup_done;
|
|
+};
|
|
+
|
|
+struct virtrng_info {
|
|
+ struct hwrng hwrng;
|
|
+ struct virtqueue *vq;
|
|
+ struct completion have_data;
|
|
+ char name[25];
|
|
+ unsigned int data_avail;
|
|
+ int index;
|
|
+ bool busy;
|
|
+ bool hwrng_register_done;
|
|
+ bool hwrng_removed;
|
|
+};
|
|
+
|
|
+struct tpm2_digest {
|
|
+ u16 alg_id;
|
|
+ u8 digest[64];
|
|
+};
|
|
+
|
|
+struct tpm_chip___2;
|
|
+
|
|
+struct tpm_class_ops {
|
|
+ unsigned int flags;
|
|
+ const u8 req_complete_mask;
|
|
+ const u8 req_complete_val;
|
|
+ bool (*req_canceled)(struct tpm_chip___2 *, u8);
|
|
+ int (*recv)(struct tpm_chip___2 *, u8 *, size_t);
|
|
+ int (*send)(struct tpm_chip___2 *, u8 *, size_t);
|
|
+ void (*cancel)(struct tpm_chip___2 *);
|
|
+ u8 (*status)(struct tpm_chip___2 *);
|
|
+ bool (*update_timeouts)(struct tpm_chip___2 *, long unsigned int *);
|
|
+ int (*go_idle)(struct tpm_chip___2 *);
|
|
+ int (*cmd_ready)(struct tpm_chip___2 *);
|
|
+ int (*request_locality)(struct tpm_chip___2 *, int);
|
|
+ int (*relinquish_locality)(struct tpm_chip___2 *, int);
|
|
+ void (*clk_enable)(struct tpm_chip___2 *, bool);
|
|
+};
|
|
+
|
|
+struct tpm_bios_log {
|
|
+ void *bios_event_log;
|
|
+ void *bios_event_log_end;
|
|
+};
|
|
+
|
|
+struct tpm_chip_seqops {
|
|
+ struct tpm_chip___2 *chip;
|
|
+ const struct seq_operations *seqops;
|
|
+};
|
|
+
|
|
+struct tpm_space {
|
|
+ u32 context_tbl[3];
|
|
+ u8 *context_buf;
|
|
+ u32 session_tbl[3];
|
|
+ u8 *session_buf;
|
|
+ u32 buf_size;
|
|
+};
|
|
+
|
|
+struct tpm_chip___2 {
|
|
+ struct device dev;
|
|
+ struct device devs;
|
|
+ struct cdev cdev;
|
|
+ struct cdev cdevs;
|
|
+ struct rw_semaphore ops_sem;
|
|
+ const struct tpm_class_ops *ops;
|
|
+ struct tpm_bios_log log;
|
|
+ struct tpm_chip_seqops bin_log_seqops;
|
|
+ struct tpm_chip_seqops ascii_log_seqops;
|
|
+ unsigned int flags;
|
|
+ int dev_num;
|
|
+ long unsigned int is_open;
|
|
+ char hwrng_name[64];
|
|
+ struct hwrng hwrng;
|
|
+ struct mutex tpm_mutex;
|
|
+ long unsigned int timeout_a;
|
|
+ long unsigned int timeout_b;
|
|
+ long unsigned int timeout_c;
|
|
+ long unsigned int timeout_d;
|
|
+ bool timeout_adjusted;
|
|
+ long unsigned int duration[4];
|
|
+ bool duration_adjusted;
|
|
+ struct dentry *bios_dir[3];
|
|
+ const struct attribute_group *groups[3];
|
|
+ unsigned int groups_cnt;
|
|
+ u16 active_banks[7];
|
|
+ acpi_handle acpi_dev_handle;
|
|
+ char ppi_version[4];
|
|
+ struct tpm_space work_space;
|
|
+ u32 nr_commands;
|
|
+ u32 *cc_attrs_tbl;
|
|
+ int locality;
|
|
+};
|
|
+
|
|
+enum tpm_const {
|
|
+ TPM_MINOR = 224,
|
|
+ TPM_BUFSIZE = 4096,
|
|
+ TPM_NUM_DEVICES = 65536,
|
|
+ TPM_RETRY = 50,
|
|
+ TPM_NUM_EVENT_LOG_FILES = 3,
|
|
+};
|
|
+
|
|
+enum tpm_timeout {
|
|
+ TPM_TIMEOUT = 5,
|
|
+ TPM_TIMEOUT_RETRY = 100,
|
|
+ TPM_TIMEOUT_RANGE_US = 300,
|
|
+ TPM_TIMEOUT_POLL = 1,
|
|
+ TPM_TIMEOUT_USECS_MIN = 100,
|
|
+ TPM_TIMEOUT_USECS_MAX = 500,
|
|
+};
|
|
+
|
|
+enum tpm_duration {
|
|
+ TPM_SHORT = 0,
|
|
+ TPM_MEDIUM = 1,
|
|
+ TPM_LONG = 2,
|
|
+ TPM_LONG_LONG = 3,
|
|
+ TPM_UNDEFINED = 4,
|
|
+ TPM_NUM_DURATIONS = 4,
|
|
+};
|
|
+
|
|
+enum tpm2_timeouts {
|
|
+ TPM2_TIMEOUT_A = 750,
|
|
+ TPM2_TIMEOUT_B = 2000,
|
|
+ TPM2_TIMEOUT_C = 200,
|
|
+ TPM2_TIMEOUT_D = 30,
|
|
+ TPM2_DURATION_SHORT = 20,
|
|
+ TPM2_DURATION_MEDIUM = 750,
|
|
+ TPM2_DURATION_LONG = 2000,
|
|
+ TPM2_DURATION_LONG_LONG = 300000,
|
|
+ TPM2_DURATION_DEFAULT = 120000,
|
|
+};
|
|
+
|
|
+enum tpm2_structures {
|
|
+ TPM2_ST_NO_SESSIONS = 32769,
|
|
+ TPM2_ST_SESSIONS = 32770,
|
|
+};
|
|
+
|
|
+enum tpm2_return_codes {
|
|
+ TPM2_RC_SUCCESS = 0,
|
|
+ TPM2_RC_HASH = 131,
|
|
+ TPM2_RC_HANDLE = 139,
|
|
+ TPM2_RC_INITIALIZE = 256,
|
|
+ TPM2_RC_FAILURE = 257,
|
|
+ TPM2_RC_DISABLED = 288,
|
|
+ TPM2_RC_COMMAND_CODE = 323,
|
|
+ TPM2_RC_TESTING = 2314,
|
|
+ TPM2_RC_REFERENCE_H0 = 2320,
|
|
+ TPM2_RC_RETRY = 2338,
|
|
+};
|
|
+
|
|
+enum tpm2_algorithms {
|
|
+ TPM2_ALG_ERROR = 0,
|
|
+ TPM2_ALG_SHA1 = 4,
|
|
+ TPM2_ALG_KEYEDHASH = 8,
|
|
+ TPM2_ALG_SHA256 = 11,
|
|
+ TPM2_ALG_SHA384 = 12,
|
|
+ TPM2_ALG_SHA512 = 13,
|
|
+ TPM2_ALG_NULL = 16,
|
|
+ TPM2_ALG_SM3_256 = 18,
|
|
+};
|
|
+
|
|
+enum tpm2_command_codes {
|
|
+ TPM2_CC_FIRST = 287,
|
|
+ TPM2_CC_CREATE_PRIMARY = 305,
|
|
+ TPM2_CC_SELF_TEST = 323,
|
|
+ TPM2_CC_STARTUP = 324,
|
|
+ TPM2_CC_SHUTDOWN = 325,
|
|
+ TPM2_CC_CREATE = 339,
|
|
+ TPM2_CC_LOAD = 343,
|
|
+ TPM2_CC_UNSEAL = 350,
|
|
+ TPM2_CC_CONTEXT_LOAD = 353,
|
|
+ TPM2_CC_CONTEXT_SAVE = 354,
|
|
+ TPM2_CC_FLUSH_CONTEXT = 357,
|
|
+ TPM2_CC_GET_CAPABILITY = 378,
|
|
+ TPM2_CC_GET_RANDOM = 379,
|
|
+ TPM2_CC_PCR_READ = 382,
|
|
+ TPM2_CC_PCR_EXTEND = 386,
|
|
+ TPM2_CC_LAST = 399,
|
|
+};
|
|
+
|
|
+enum tpm2_startup_types {
|
|
+ TPM2_SU_CLEAR = 0,
|
|
+ TPM2_SU_STATE = 1,
|
|
+};
|
|
+
|
|
+enum tpm2_cc_attrs {
|
|
+ TPM2_CC_ATTR_CHANDLES = 25,
|
|
+ TPM2_CC_ATTR_RHANDLE = 28,
|
|
+};
|
|
+
|
|
+enum tpm_chip_flags {
|
|
+ TPM_CHIP_FLAG_TPM2 = 2,
|
|
+ TPM_CHIP_FLAG_IRQ = 4,
|
|
+ TPM_CHIP_FLAG_VIRTUAL = 8,
|
|
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = 16,
|
|
+ TPM_CHIP_FLAG_ALWAYS_POWERED = 32,
|
|
+};
|
|
+
|
|
+struct tpm_input_header {
|
|
+ __be16 tag;
|
|
+ __be32 length;
|
|
+ __be32 ordinal;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tpm_output_header {
|
|
+ __be16 tag;
|
|
+ __be32 length;
|
|
+ __be32 return_code;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct stclear_flags_t {
|
|
+ __be16 tag;
|
|
+ u8 deactivated;
|
|
+ u8 disableForceClear;
|
|
+ u8 physicalPresence;
|
|
+ u8 physicalPresenceLock;
|
|
+ u8 bGlobalLock;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tpm_version_t {
|
|
+ u8 Major;
|
|
+ u8 Minor;
|
|
+ u8 revMajor;
|
|
+ u8 revMinor;
|
|
+};
|
|
+
|
|
+struct tpm_version_1_2_t {
|
|
+ __be16 tag;
|
|
+ u8 Major;
|
|
+ u8 Minor;
|
|
+ u8 revMajor;
|
|
+ u8 revMinor;
|
|
+};
|
|
+
|
|
+struct timeout_t {
|
|
+ __be32 a;
|
|
+ __be32 b;
|
|
+ __be32 c;
|
|
+ __be32 d;
|
|
+};
|
|
+
|
|
+struct duration_t {
|
|
+ __be32 tpm_short;
|
|
+ __be32 tpm_medium;
|
|
+ __be32 tpm_long;
|
|
+};
|
|
+
|
|
+struct permanent_flags_t {
|
|
+ __be16 tag;
|
|
+ u8 disable;
|
|
+ u8 ownership;
|
|
+ u8 deactivated;
|
|
+ u8 readPubek;
|
|
+ u8 disableOwnerClear;
|
|
+ u8 allowMaintenance;
|
|
+ u8 physicalPresenceLifetimeLock;
|
|
+ u8 physicalPresenceHWEnable;
|
|
+ u8 physicalPresenceCMDEnable;
|
|
+ u8 CEKPUsed;
|
|
+ u8 TPMpost;
|
|
+ u8 TPMpostLock;
|
|
+ u8 FIPS;
|
|
+ u8 operator;
|
|
+ u8 enableRevokeEK;
|
|
+ u8 nvLocked;
|
|
+ u8 readSRKPub;
|
|
+ u8 tpmEstablished;
|
|
+ u8 maintenanceDone;
|
|
+ u8 disableFullDALogicInfo;
|
|
+};
|
|
+
|
|
+typedef union {
|
|
+ struct permanent_flags_t perm_flags;
|
|
+ struct stclear_flags_t stclear_flags;
|
|
+ __u8 owned;
|
|
+ __be32 num_pcrs;
|
|
+ struct tpm_version_t tpm_version;
|
|
+ struct tpm_version_1_2_t tpm_version_1_2;
|
|
+ __be32 manufacturer_id;
|
|
+ struct timeout_t timeout;
|
|
+ struct duration_t duration;
|
|
+} cap_t;
|
|
+
|
|
+enum tpm_capabilities {
|
|
+ TPM_CAP_FLAG = 4,
|
|
+ TPM_CAP_PROP = 5,
|
|
+ TPM_CAP_VERSION_1_1 = 6,
|
|
+ TPM_CAP_VERSION_1_2 = 26,
|
|
+};
|
|
+
|
|
+enum tpm_sub_capabilities {
|
|
+ TPM_CAP_PROP_PCR = 257,
|
|
+ TPM_CAP_PROP_MANUFACTURER = 259,
|
|
+ TPM_CAP_FLAG_PERM = 264,
|
|
+ TPM_CAP_FLAG_VOL = 265,
|
|
+ TPM_CAP_PROP_OWNER = 273,
|
|
+ TPM_CAP_PROP_TIS_TIMEOUT = 277,
|
|
+ TPM_CAP_PROP_TIS_DURATION = 288,
|
|
+};
|
|
+
|
|
+typedef union {
|
|
+ struct tpm_input_header in;
|
|
+ struct tpm_output_header out;
|
|
+} tpm_cmd_header;
|
|
+
|
|
+struct tpm_pcrread_out {
|
|
+ u8 pcr_result[20];
|
|
+};
|
|
+
|
|
+struct tpm_pcrread_in {
|
|
+ __be32 pcr_idx;
|
|
+};
|
|
+
|
|
+struct tpm_getrandom_out {
|
|
+ __be32 rng_data_len;
|
|
+ u8 rng_data[128];
|
|
+};
|
|
+
|
|
+struct tpm_getrandom_in {
|
|
+ __be32 num_bytes;
|
|
+};
|
|
+
|
|
+typedef union {
|
|
+ struct tpm_pcrread_in pcrread_in;
|
|
+ struct tpm_pcrread_out pcrread_out;
|
|
+ struct tpm_getrandom_in getrandom_in;
|
|
+ struct tpm_getrandom_out getrandom_out;
|
|
+} tpm_cmd_params;
|
|
+
|
|
+struct tpm_cmd_t {
|
|
+ tpm_cmd_header header;
|
|
+ tpm_cmd_params params;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum tpm_buf_flags {
|
|
+ TPM_BUF_OVERFLOW = 1,
|
|
+};
|
|
+
|
|
+struct tpm_buf___2 {
|
|
+ struct page *data_page;
|
|
+ unsigned int flags;
|
|
+ u8 *data;
|
|
+};
|
|
+
|
|
+enum tpm_transmit_flags {
|
|
+ TPM_TRANSMIT_UNLOCKED = 1,
|
|
+ TPM_TRANSMIT_NESTED = 2,
|
|
+};
|
|
+
|
|
+struct trusted_key_payload___2;
|
|
+
|
|
+struct trusted_key_options___2;
|
|
+
|
|
+struct file_priv {
|
|
+ struct tpm_chip___2 *chip;
|
|
+ size_t data_pending;
|
|
+ struct mutex buffer_mutex;
|
|
+ struct timer_list user_read_timer;
|
|
+ struct work_struct work;
|
|
+ u8 data_buffer[4096];
|
|
+};
|
|
+
|
|
+struct tpm_readpubek_out {
|
|
+ u8 algorithm[4];
|
|
+ u8 encscheme[2];
|
|
+ u8 sigscheme[2];
|
|
+ __be32 paramsize;
|
|
+ u8 parameters[12];
|
|
+ __be32 keysize;
|
|
+ u8 modulus[256];
|
|
+ u8 checksum[20];
|
|
+};
|
|
+
|
|
+enum TPM_OPS_FLAGS {
|
|
+ TPM_OPS_AUTO_STARTUP = 1,
|
|
+};
|
|
+
|
|
+enum tpm2_const {
|
|
+ TPM2_PLATFORM_PCR = 24,
|
|
+ TPM2_PCR_SELECT_MIN = 3,
|
|
+};
|
|
+
|
|
+enum tpm2_permanent_handles {
|
|
+ TPM2_RS_PW = 1073741833,
|
|
+};
|
|
+
|
|
+enum tpm2_capabilities {
|
|
+ TPM2_CAP_HANDLES = 1,
|
|
+ TPM2_CAP_COMMANDS = 2,
|
|
+ TPM2_CAP_PCRS = 5,
|
|
+ TPM2_CAP_TPM_PROPERTIES = 6,
|
|
+};
|
|
+
|
|
+enum tpm2_properties {
|
|
+ TPM_PT_TOTAL_COMMANDS = 297,
|
|
+};
|
|
+
|
|
+enum tpm2_object_attributes {
|
|
+ TPM2_OA_USER_WITH_AUTH = 64,
|
|
+};
|
|
+
|
|
+enum tpm2_session_attributes {
|
|
+ TPM2_SA_CONTINUE_SESSION = 1,
|
|
+};
|
|
+
|
|
+struct tpm2_hash {
|
|
+ unsigned int crypto_id;
|
|
+ unsigned int tpm_id;
|
|
+};
|
|
+
|
|
+struct tpm2_pcr_read_out {
|
|
+ __be32 update_cnt;
|
|
+ __be32 pcr_selects_cnt;
|
|
+ __be16 hash_alg;
|
|
+ u8 pcr_select_size;
|
|
+ u8 pcr_select[3];
|
|
+ __be32 digests_cnt;
|
|
+ __be16 digest_size;
|
|
+ u8 digest[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tpm2_null_auth_area {
|
|
+ __be32 handle;
|
|
+ __be16 nonce_size;
|
|
+ u8 attributes;
|
|
+ __be16 auth_size;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tpm2_get_random_out {
|
|
+ __be16 size;
|
|
+ u8 buffer[128];
|
|
+};
|
|
+
|
|
+struct tpm2_get_cap_out {
|
|
+ u8 more_data;
|
|
+ __be32 subcap_id;
|
|
+ __be32 property_cnt;
|
|
+ __be32 property_id;
|
|
+ __be32 value;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tpm2_pcr_selection {
|
|
+ __be16 hash_alg;
|
|
+ u8 size_of_select;
|
|
+ u8 pcr_select[3];
|
|
+};
|
|
+
|
|
+struct tpmrm_priv {
|
|
+ struct file_priv priv;
|
|
+ struct tpm_space space;
|
|
+};
|
|
+
|
|
+struct tcpa_event {
|
|
+ u32 pcr_index;
|
|
+ u32 event_type;
|
|
+ u8 pcr_value[20];
|
|
+ u32 event_size;
|
|
+ u8 event_data[0];
|
|
+};
|
|
+
|
|
+enum tcpa_event_types {
|
|
+ PREBOOT = 0,
|
|
+ POST_CODE = 1,
|
|
+ UNUSED = 2,
|
|
+ NO_ACTION = 3,
|
|
+ SEPARATOR = 4,
|
|
+ ACTION = 5,
|
|
+ EVENT_TAG = 6,
|
|
+ SCRTM_CONTENTS = 7,
|
|
+ SCRTM_VERSION = 8,
|
|
+ CPU_MICROCODE = 9,
|
|
+ PLATFORM_CONFIG_FLAGS = 10,
|
|
+ TABLE_OF_DEVICES = 11,
|
|
+ COMPACT_HASH = 12,
|
|
+ IPL = 13,
|
|
+ IPL_PARTITION_DATA = 14,
|
|
+ NONHOST_CODE = 15,
|
|
+ NONHOST_CONFIG = 16,
|
|
+ NONHOST_INFO = 17,
|
|
+};
|
|
+
|
|
+struct tcpa_pc_event {
|
|
+ u32 event_id;
|
|
+ u32 event_size;
|
|
+ u8 event_data[0];
|
|
+};
|
|
+
|
|
+enum tcpa_pc_event_ids {
|
|
+ SMBIOS = 1,
|
|
+ BIS_CERT = 2,
|
|
+ POST_BIOS_ROM = 3,
|
|
+ ESCD = 4,
|
|
+ CMOS = 5,
|
|
+ NVRAM = 6,
|
|
+ OPTION_ROM_EXEC = 7,
|
|
+ OPTION_ROM_CONFIG = 8,
|
|
+ OPTION_ROM_MICROCODE = 10,
|
|
+ S_CRTM_VERSION = 11,
|
|
+ S_CRTM_CONTENTS = 12,
|
|
+ POST_CONTENTS = 13,
|
|
+ HOST_TABLE_OF_DEVICES = 14,
|
|
+};
|
|
+
|
|
+struct tcg_efi_specid_event_algs {
|
|
+ u16 alg_id;
|
|
+ u16 digest_size;
|
|
+};
|
|
+
|
|
+struct tcg_efi_specid_event {
|
|
+ u8 signature[16];
|
|
+ u32 platform_class;
|
|
+ u8 spec_version_minor;
|
|
+ u8 spec_version_major;
|
|
+ u8 spec_errata;
|
|
+ u8 uintnsize;
|
|
+ u32 num_algs;
|
|
+ struct tcg_efi_specid_event_algs digest_sizes[3];
|
|
+ u8 vendor_info_size;
|
|
+ u8 vendor_info[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tcg_pcr_event {
|
|
+ u32 pcr_idx;
|
|
+ u32 event_type;
|
|
+ u8 digest[20];
|
|
+ u32 event_size;
|
|
+ u8 event[0];
|
|
+};
|
|
+
|
|
+struct tcg_event_field {
|
|
+ u32 event_size;
|
|
+ u8 event[0];
|
|
+};
|
|
+
|
|
+struct tcg_pcr_event2 {
|
|
+ u32 pcr_idx;
|
|
+ u32 event_type;
|
|
+ u32 count;
|
|
+ struct tpm2_digest digests[3];
|
|
+ struct tcg_event_field event;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum tpm2_handle_types {
|
|
+ TPM2_HT_HMAC_SESSION = 33554432,
|
|
+ TPM2_HT_POLICY_SESSION = 50331648,
|
|
+ TPM2_HT_TRANSIENT = -2147483648,
|
|
+};
|
|
+
|
|
+struct tpm2_context {
|
|
+ __be64 sequence;
|
|
+ __be32 saved_handle;
|
|
+ __be32 hierarchy;
|
|
+ __be16 blob_size;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tpm2_cap_handles {
|
|
+ u8 more_data;
|
|
+ __be32 capability;
|
|
+ __be32 count;
|
|
+ __be32 handles[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum bios_platform_class {
|
|
+ BIOS_CLIENT = 0,
|
|
+ BIOS_SERVER = 1,
|
|
+};
|
|
+
|
|
+struct client_hdr {
|
|
+ u32 log_max_len;
|
|
+ u64 log_start_addr;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct server_hdr {
|
|
+ u16 reserved;
|
|
+ u64 log_max_len;
|
|
+ u64 log_start_addr;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_tcpa {
|
|
+ struct acpi_table_header hdr;
|
|
+ u16 platform_class;
|
|
+ union {
|
|
+ struct client_hdr client;
|
|
+ struct server_hdr server;
|
|
+ };
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct linux_efi_tpm_eventlog {
|
|
+ u32 size;
|
|
+ u8 version;
|
|
+ u8 log[0];
|
|
+};
|
|
+
|
|
+enum tis_access {
|
|
+ TPM_ACCESS_VALID = 128,
|
|
+ TPM_ACCESS_ACTIVE_LOCALITY = 32,
|
|
+ TPM_ACCESS_REQUEST_PENDING = 4,
|
|
+ TPM_ACCESS_REQUEST_USE = 2,
|
|
+};
|
|
+
|
|
+enum tis_status {
|
|
+ TPM_STS_VALID = 128,
|
|
+ TPM_STS_COMMAND_READY = 64,
|
|
+ TPM_STS_GO = 32,
|
|
+ TPM_STS_DATA_AVAIL = 16,
|
|
+ TPM_STS_DATA_EXPECT = 8,
|
|
+};
|
|
+
|
|
+enum tis_int_flags {
|
|
+ TPM_GLOBAL_INT_ENABLE = -2147483648,
|
|
+ TPM_INTF_BURST_COUNT_STATIC = 256,
|
|
+ TPM_INTF_CMD_READY_INT = 128,
|
|
+ TPM_INTF_INT_EDGE_FALLING = 64,
|
|
+ TPM_INTF_INT_EDGE_RISING = 32,
|
|
+ TPM_INTF_INT_LEVEL_LOW = 16,
|
|
+ TPM_INTF_INT_LEVEL_HIGH = 8,
|
|
+ TPM_INTF_LOCALITY_CHANGE_INT = 4,
|
|
+ TPM_INTF_STS_VALID_INT = 2,
|
|
+ TPM_INTF_DATA_AVAIL_INT = 1,
|
|
+};
|
|
+
|
|
+enum tis_defaults {
|
|
+ TIS_MEM_LEN = 20480,
|
|
+ TIS_SHORT_TIMEOUT = 750,
|
|
+ TIS_LONG_TIMEOUT = 2000,
|
|
+};
|
|
+
|
|
+enum tpm_tis_flags {
|
|
+ TPM_TIS_ITPM_WORKAROUND = 1,
|
|
+};
|
|
+
|
|
+struct tpm_tis_phy_ops;
|
|
+
|
|
+struct tpm_tis_data {
|
|
+ u16 manufacturer_id;
|
|
+ int locality;
|
|
+ int irq;
|
|
+ bool irq_tested;
|
|
+ unsigned int flags;
|
|
+ void *ilb_base_addr;
|
|
+ u16 clkrun_enabled;
|
|
+ wait_queue_head_t int_queue;
|
|
+ wait_queue_head_t read_queue;
|
|
+ const struct tpm_tis_phy_ops *phy_ops;
|
|
+ short unsigned int rng_quality;
|
|
+};
|
|
+
|
|
+struct tpm_tis_phy_ops {
|
|
+ int (*read_bytes)(struct tpm_tis_data *, u32, u16, u8 *);
|
|
+ int (*write_bytes)(struct tpm_tis_data *, u32, u16, const u8 *);
|
|
+ int (*read16)(struct tpm_tis_data *, u32, u16 *);
|
|
+ int (*read32)(struct tpm_tis_data *, u32, u32 *);
|
|
+ int (*write32)(struct tpm_tis_data *, u32, u32);
|
|
+};
|
|
+
|
|
+struct tis_vendor_timeout_override {
|
|
+ u32 did_vid;
|
|
+ long unsigned int timeout_us[4];
|
|
+};
|
|
+
|
|
+struct acpi_table_tpm2 {
|
|
+ struct acpi_table_header header;
|
|
+ u16 platform_class;
|
|
+ u16 reserved;
|
|
+ u64 control_address;
|
|
+ u32 start_method;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tpm_info {
|
|
+ struct resource res;
|
|
+ int irq;
|
|
+};
|
|
+
|
|
+struct tpm_tis_tcg_phy {
|
|
+ struct tpm_tis_data priv;
|
|
+ void *iobase;
|
|
+};
|
|
+
|
|
+enum crb_defaults {
|
|
+ CRB_ACPI_START_REVISION_ID = 1,
|
|
+ CRB_ACPI_START_INDEX = 1,
|
|
+};
|
|
+
|
|
+enum crb_loc_ctrl {
|
|
+ CRB_LOC_CTRL_REQUEST_ACCESS = 1,
|
|
+ CRB_LOC_CTRL_RELINQUISH = 2,
|
|
+};
|
|
+
|
|
+enum crb_loc_state {
|
|
+ CRB_LOC_STATE_LOC_ASSIGNED = 2,
|
|
+ CRB_LOC_STATE_TPM_REG_VALID_STS = 128,
|
|
+};
|
|
+
|
|
+enum crb_ctrl_req {
|
|
+ CRB_CTRL_REQ_CMD_READY = 1,
|
|
+ CRB_CTRL_REQ_GO_IDLE = 2,
|
|
+};
|
|
+
|
|
+enum crb_ctrl_sts {
|
|
+ CRB_CTRL_STS_ERROR = 1,
|
|
+ CRB_CTRL_STS_TPM_IDLE = 2,
|
|
+};
|
|
+
|
|
+enum crb_start {
|
|
+ CRB_START_INVOKE = 1,
|
|
+};
|
|
+
|
|
+enum crb_cancel {
|
|
+ CRB_CANCEL_INVOKE = 1,
|
|
+};
|
|
+
|
|
+struct crb_regs_head {
|
|
+ u32 loc_state;
|
|
+ u32 reserved1;
|
|
+ u32 loc_ctrl;
|
|
+ u32 loc_sts;
|
|
+ u8 reserved2[32];
|
|
+ u64 intf_id;
|
|
+ u64 ctrl_ext;
|
|
+};
|
|
+
|
|
+struct crb_regs_tail {
|
|
+ u32 ctrl_req;
|
|
+ u32 ctrl_sts;
|
|
+ u32 ctrl_cancel;
|
|
+ u32 ctrl_start;
|
|
+ u32 ctrl_int_enable;
|
|
+ u32 ctrl_int_sts;
|
|
+ u32 ctrl_cmd_size;
|
|
+ u32 ctrl_cmd_pa_low;
|
|
+ u32 ctrl_cmd_pa_high;
|
|
+ u32 ctrl_rsp_size;
|
|
+ u64 ctrl_rsp_pa;
|
|
+};
|
|
+
|
|
+enum crb_status {
|
|
+ CRB_DRV_STS_COMPLETE = 1,
|
|
+};
|
|
+
|
|
+struct crb_priv {
|
|
+ u32 sm;
|
|
+ const char *hid;
|
|
+ void *iobase;
|
|
+ struct crb_regs_head *regs_h;
|
|
+ struct crb_regs_tail *regs_t;
|
|
+ u8 *cmd;
|
|
+ u8 *rsp;
|
|
+ u32 cmd_size;
|
|
+ u32 smc_func_id;
|
|
+};
|
|
+
|
|
+struct tpm2_crb_smc {
|
|
+ u32 interrupt;
|
|
+ u8 interrupt_flags;
|
|
+ u8 op_flags;
|
|
+ u16 reserved2;
|
|
+ u32 smc_func_id;
|
|
+};
|
|
+
|
|
+struct iommu_group {
|
|
+ struct kobject kobj;
|
|
+ struct kobject *devices_kobj;
|
|
+ struct list_head devices;
|
|
+ struct mutex mutex;
|
|
+ struct blocking_notifier_head notifier;
|
|
+ void *iommu_data;
|
|
+ void (*iommu_data_release)(void *);
|
|
+ char *name;
|
|
+ int id;
|
|
+ struct iommu_domain *default_domain;
|
|
+ struct iommu_domain *domain;
|
|
+ atomic_t domain_shared_ref;
|
|
+};
|
|
+
|
|
+struct iommu_fwspec {
|
|
+ const struct iommu_ops *ops;
|
|
+ struct fwnode_handle *iommu_fwnode;
|
|
+ void *iommu_priv;
|
|
+ u32 flags;
|
|
+ unsigned int num_ids;
|
|
+ unsigned int num_pasid_bits;
|
|
+ bool can_stall;
|
|
+ u32 ids[1];
|
|
+};
|
|
+
|
|
+struct iopf_device_param;
|
|
+
|
|
+struct iommu_fault_param;
|
|
+
|
|
+struct iommu_param {
|
|
+ struct mutex lock;
|
|
+ struct iommu_fault_param *fault_param;
|
|
+ struct iommu_sva_param *sva_param;
|
|
+ struct iopf_device_param *iopf_param;
|
|
+};
|
|
+
|
|
+struct pasid_table_config {
|
|
+ __u32 version;
|
|
+ __u32 bytes;
|
|
+ __u64 base_ptr;
|
|
+ __u8 pasid_bits;
|
|
+};
|
|
+
|
|
+enum iommu_inv_granularity {
|
|
+ IOMMU_INV_GRANU_DOMAIN = 1,
|
|
+ IOMMU_INV_GRANU_DEVICE = 2,
|
|
+ IOMMU_INV_GRANU_DOMAIN_PAGE = 3,
|
|
+ IOMMU_INV_GRANU_ALL_PASID = 4,
|
|
+ IOMMU_INV_GRANU_PASID_SEL = 5,
|
|
+ IOMMU_INV_GRANU_NG_ALL_PASID = 6,
|
|
+ IOMMU_INV_GRANU_NG_PASID = 7,
|
|
+ IOMMU_INV_GRANU_PAGE_PASID = 8,
|
|
+ IOMMU_INV_NR_GRANU = 9,
|
|
+};
|
|
+
|
|
+enum iommu_inv_type {
|
|
+ IOMMU_INV_TYPE_DTLB = 0,
|
|
+ IOMMU_INV_TYPE_TLB = 1,
|
|
+ IOMMU_INV_TYPE_PASID = 2,
|
|
+ IOMMU_INV_TYPE_CONTEXT = 3,
|
|
+ IOMMU_INV_NR_TYPE = 4,
|
|
+};
|
|
+
|
|
+struct tlb_invalidate_hdr {
|
|
+ __u32 version;
|
|
+ enum iommu_inv_type type;
|
|
+};
|
|
+
|
|
+struct tlb_invalidate_info {
|
|
+ struct tlb_invalidate_hdr hdr;
|
|
+ enum iommu_inv_granularity granularity;
|
|
+ __u32 flags;
|
|
+ __u8 size;
|
|
+ __u32 pasid;
|
|
+ __u64 addr;
|
|
+};
|
|
+
|
|
+typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, long unsigned int, int, void *);
|
|
+
|
|
+struct iommu_domain_geometry {
|
|
+ dma_addr_t aperture_start;
|
|
+ dma_addr_t aperture_end;
|
|
+ bool force_aperture;
|
|
+};
|
|
+
|
|
+struct iommu_domain {
|
|
+ unsigned int type;
|
|
+ const struct iommu_ops *ops;
|
|
+ long unsigned int pgsize_bitmap;
|
|
+ iommu_fault_handler_t handler;
|
|
+ void *handler_token;
|
|
+ struct iommu_domain_geometry geometry;
|
|
+ void *iova_cookie;
|
|
+ struct list_head mm_list;
|
|
+};
|
|
+
|
|
+struct iommu_fault_event;
|
|
+
|
|
+typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault_event *, void *);
|
|
+
|
|
+enum iommu_fault_type {
|
|
+ IOMMU_FAULT_DMA_UNRECOV = 1,
|
|
+ IOMMU_FAULT_PAGE_REQ = 2,
|
|
+};
|
|
+
|
|
+enum iommu_fault_reason {
|
|
+ IOMMU_FAULT_REASON_UNKNOWN = 0,
|
|
+ IOMMU_FAULT_REASON_INTERNAL = 1,
|
|
+ IOMMU_FAULT_REASON_PASID_FETCH = 2,
|
|
+ IOMMU_FAULT_REASON_PASID_INVALID = 3,
|
|
+ IOMMU_FAULT_REASON_PGD_FETCH = 4,
|
|
+ IOMMU_FAULT_REASON_PTE_FETCH = 5,
|
|
+ IOMMU_FAULT_REASON_PERMISSION = 6,
|
|
+};
|
|
+
|
|
+struct iommu_fault_event {
|
|
+ struct list_head list;
|
|
+ enum iommu_fault_type type;
|
|
+ enum iommu_fault_reason reason;
|
|
+ u64 addr;
|
|
+ u32 pasid;
|
|
+ u32 page_req_group_id;
|
|
+ u32 last_req: 1;
|
|
+ u32 pasid_valid: 1;
|
|
+ u32 prot;
|
|
+ u64 device_private;
|
|
+ u64 iommu_private;
|
|
+ u64 expire;
|
|
+};
|
|
+
|
|
+typedef int (*iommu_mm_exit_handler_t)(struct device *, int, void *);
|
|
+
|
|
+struct io_mm {
|
|
+ int pasid;
|
|
+ long unsigned int flags;
|
|
+ struct list_head devices;
|
|
+ struct kref kref;
|
|
+ struct mmu_notifier notifier;
|
|
+ struct mm_struct *mm;
|
|
+ void (*release)(struct io_mm *);
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+enum iommu_resv_type {
|
|
+ IOMMU_RESV_DIRECT = 0,
|
|
+ IOMMU_RESV_RESERVED = 1,
|
|
+ IOMMU_RESV_MSI = 2,
|
|
+ IOMMU_RESV_SW_MSI = 3,
|
|
+};
|
|
+
|
|
+struct iommu_resv_region {
|
|
+ struct list_head list;
|
|
+ phys_addr_t start;
|
|
+ size_t length;
|
|
+ int prot;
|
|
+ enum iommu_resv_type type;
|
|
+};
|
|
+
|
|
+enum page_response_code {
|
|
+ IOMMU_PAGE_RESP_SUCCESS = 0,
|
|
+ IOMMU_PAGE_RESP_INVALID = 1,
|
|
+ IOMMU_PAGE_RESP_FAILURE = 2,
|
|
+};
|
|
+
|
|
+struct page_response_msg {
|
|
+ u64 addr;
|
|
+ u32 pasid;
|
|
+ enum page_response_code resp_code;
|
|
+ u32 pasid_present: 1;
|
|
+ u32 page_req_group_id;
|
|
+ u64 private_data;
|
|
+};
|
|
+
|
|
+struct iommu_sva_param {
|
|
+ long unsigned int features;
|
|
+ unsigned int min_pasid;
|
|
+ unsigned int max_pasid;
|
|
+ struct list_head mm_list;
|
|
+ iommu_mm_exit_handler_t mm_exit;
|
|
+};
|
|
+
|
|
+struct iommu_device {
|
|
+ struct list_head list;
|
|
+ const struct iommu_ops *ops;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct iommu_fault_param {
|
|
+ iommu_dev_fault_handler_t handler;
|
|
+ struct list_head faults;
|
|
+ struct timer_list timer;
|
|
+ struct mutex lock;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct iommu_callback_data {
|
|
+ const struct iommu_ops *ops;
|
|
+};
|
|
+
|
|
+struct group_device {
|
|
+ struct list_head list;
|
|
+ struct device *dev;
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+struct iommu_group_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct iommu_group *, char *);
|
|
+ ssize_t (*store)(struct iommu_group *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct group_for_pci_data {
|
|
+ struct pci_dev *pdev;
|
|
+ struct iommu_group *group;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_iommu_group_event {
|
|
+ struct trace_entry ent;
|
|
+ int gid;
|
|
+ u32 __data_loc_device;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_iommu_device_event {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_device;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_map {
|
|
+ struct trace_entry ent;
|
|
+ u64 iova;
|
|
+ u64 paddr;
|
|
+ size_t size;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_unmap {
|
|
+ struct trace_entry ent;
|
|
+ u64 iova;
|
|
+ size_t size;
|
|
+ size_t unmapped_size;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_iommu_error {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_device;
|
|
+ u32 __data_loc_driver;
|
|
+ u64 iova;
|
|
+ int flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dev_fault {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_device;
|
|
+ int type;
|
|
+ int reason;
|
|
+ u64 addr;
|
|
+ u32 pasid;
|
|
+ u32 pgid;
|
|
+ u32 last_req;
|
|
+ u32 prot;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dev_page_response {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_device;
|
|
+ int code;
|
|
+ u64 addr;
|
|
+ u32 pasid;
|
|
+ u32 pgid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sva_invalidate {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_device;
|
|
+ int type;
|
|
+ u32 granu;
|
|
+ u32 flags;
|
|
+ u8 size;
|
|
+ u32 pasid;
|
|
+ u64 addr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_iommu_group_event {
|
|
+ u32 device;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_iommu_device_event {
|
|
+ u32 device;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_map {};
|
|
+
|
|
+struct trace_event_data_offsets_unmap {};
|
|
+
|
|
+struct trace_event_data_offsets_iommu_error {
|
|
+ u32 device;
|
|
+ u32 driver;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_dev_fault {
|
|
+ u32 device;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_dev_page_response {
|
|
+ u32 device;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_sva_invalidate {
|
|
+ u32 device;
|
|
+};
|
|
+
|
|
+struct iova {
|
|
+ struct rb_node node;
|
|
+ long unsigned int pfn_hi;
|
|
+ long unsigned int pfn_lo;
|
|
+};
|
|
+
|
|
+struct iova_magazine;
|
|
+
|
|
+struct iova_cpu_rcache;
|
|
+
|
|
+struct iova_rcache {
|
|
+ spinlock_t lock;
|
|
+ long unsigned int depot_size;
|
|
+ struct iova_magazine *depot[32];
|
|
+ struct iova_cpu_rcache *cpu_rcaches;
|
|
+};
|
|
+
|
|
+struct iova_magazine {
|
|
+ long unsigned int size;
|
|
+ long unsigned int pfns[128];
|
|
+};
|
|
+
|
|
+struct iova_cpu_rcache {
|
|
+ spinlock_t lock;
|
|
+ struct iova_magazine *loaded;
|
|
+ struct iova_magazine *prev;
|
|
+};
|
|
+
|
|
+struct iova_domain;
|
|
+
|
|
+typedef void (*iova_flush_cb)(struct iova_domain *);
|
|
+
|
|
+typedef void (*iova_entry_dtor)(long unsigned int);
|
|
+
|
|
+struct iova_fq;
|
|
+
|
|
+struct iova_domain {
|
|
+ spinlock_t iova_rbtree_lock;
|
|
+ struct rb_root rbroot;
|
|
+ struct rb_node *cached_node;
|
|
+ struct rb_node *cached32_node;
|
|
+ long unsigned int granule;
|
|
+ long unsigned int start_pfn;
|
|
+ long unsigned int dma_32bit_pfn;
|
|
+ long unsigned int max32_alloc_size;
|
|
+ struct iova_fq *fq;
|
|
+ atomic64_t fq_flush_start_cnt;
|
|
+ atomic64_t fq_flush_finish_cnt;
|
|
+ struct iova anchor;
|
|
+ struct iova_rcache rcaches[6];
|
|
+ iova_flush_cb flush_cb;
|
|
+ iova_entry_dtor entry_dtor;
|
|
+ struct timer_list fq_timer;
|
|
+ atomic_t fq_timer_on;
|
|
+ struct work_struct free_iova_work;
|
|
+};
|
|
+
|
|
+struct iova_fq_entry {
|
|
+ long unsigned int iova_pfn;
|
|
+ long unsigned int pages;
|
|
+ long unsigned int data;
|
|
+ u64 counter;
|
|
+};
|
|
+
|
|
+struct iova_fq {
|
|
+ struct iova_fq_entry entries[256];
|
|
+ unsigned int head;
|
|
+ unsigned int tail;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct vcpu_data;
|
|
+
|
|
+struct amd_iommu_pi_data {
|
|
+ u32 ga_tag;
|
|
+ u32 prev_ga_tag;
|
|
+ u64 base;
|
|
+ bool is_guest_mode;
|
|
+ struct vcpu_data *vcpu_data;
|
|
+ void *ir_data;
|
|
+};
|
|
+
|
|
+struct vcpu_data {
|
|
+ u64 pi_desc_addr;
|
|
+ u32 vector;
|
|
+};
|
|
+
|
|
+struct amd_iommu_device_info {
|
|
+ int max_pasids;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct irq_remap_table {
|
|
+ raw_spinlock_t lock;
|
|
+ unsigned int min_index;
|
|
+ u32 *table;
|
|
+};
|
|
+
|
|
+struct amd_iommu_fault {
|
|
+ u64 address;
|
|
+ u32 pasid;
|
|
+ u16 device_id;
|
|
+ u16 tag;
|
|
+ u16 flags;
|
|
+};
|
|
+
|
|
+struct protection_domain {
|
|
+ struct list_head list;
|
|
+ struct list_head dev_list;
|
|
+ struct iommu_domain domain;
|
|
+ spinlock_t lock;
|
|
+ struct mutex api_lock;
|
|
+ u16 id;
|
|
+ int mode;
|
|
+ u64 *pt_root;
|
|
+ int glx;
|
|
+ u64 *gcr3_tbl;
|
|
+ long unsigned int flags;
|
|
+ bool updated;
|
|
+ unsigned int dev_cnt;
|
|
+ unsigned int dev_iommu[32];
|
|
+};
|
|
+
|
|
+struct amd_irte_ops;
|
|
+
|
|
+struct amd_iommu___2 {
|
|
+ struct list_head list;
|
|
+ int index;
|
|
+ raw_spinlock_t lock;
|
|
+ struct pci_dev *dev;
|
|
+ struct pci_dev *root_pdev;
|
|
+ u64 mmio_phys;
|
|
+ u64 mmio_phys_end;
|
|
+ u8 *mmio_base;
|
|
+ u32 cap;
|
|
+ u8 acpi_flags;
|
|
+ u64 features;
|
|
+ bool is_iommu_v2;
|
|
+ u16 devid;
|
|
+ u16 cap_ptr;
|
|
+ u16 pci_seg;
|
|
+ u64 exclusion_start;
|
|
+ u64 exclusion_length;
|
|
+ u8 *cmd_buf;
|
|
+ u32 cmd_buf_head;
|
|
+ u32 cmd_buf_tail;
|
|
+ u8 *evt_buf;
|
|
+ u8 *ppr_log;
|
|
+ u8 *ga_log;
|
|
+ u8 *ga_log_tail;
|
|
+ bool int_enabled;
|
|
+ bool need_sync;
|
|
+ struct iommu_device iommu;
|
|
+ u32 stored_addr_lo;
|
|
+ u32 stored_addr_hi;
|
|
+ u32 stored_l1[108];
|
|
+ u32 stored_l2[131];
|
|
+ u8 max_banks;
|
|
+ u8 max_counters;
|
|
+ struct irq_domain *ir_domain;
|
|
+ struct irq_domain *msi_domain;
|
|
+ struct amd_irte_ops *irte_ops;
|
|
+ u32 flags;
|
|
+ volatile u64 cmd_sem;
|
|
+};
|
|
+
|
|
+struct amd_irte_ops {
|
|
+ void (*prepare)(void *, u32, u32, u8, u32, int);
|
|
+ void (*activate)(void *, u16, u16);
|
|
+ void (*deactivate)(void *, u16, u16);
|
|
+ void (*set_affinity)(void *, u16, u16, u8, u32);
|
|
+ void * (*get)(struct irq_remap_table *, int);
|
|
+ void (*set_allocated)(struct irq_remap_table *, int);
|
|
+ bool (*is_allocated)(struct irq_remap_table *, int);
|
|
+ void (*clear_allocated)(struct irq_remap_table *, int);
|
|
+};
|
|
+
|
|
+struct acpihid_map_entry {
|
|
+ struct list_head list;
|
|
+ u8 uid[256];
|
|
+ u8 hid[9];
|
|
+ u16 devid;
|
|
+ u16 root_devid;
|
|
+ bool cmd_line;
|
|
+ struct iommu_group *group;
|
|
+};
|
|
+
|
|
+struct devid_map {
|
|
+ struct list_head list;
|
|
+ u8 id;
|
|
+ u16 devid;
|
|
+ bool cmd_line;
|
|
+};
|
|
+
|
|
+struct iommu_dev_data {
|
|
+ struct list_head list;
|
|
+ struct llist_node dev_data_list;
|
|
+ struct protection_domain *domain;
|
|
+ u16 devid;
|
|
+ u16 alias;
|
|
+ bool iommu_v2;
|
|
+ bool passthrough;
|
|
+ struct {
|
|
+ bool enabled;
|
|
+ int qdep;
|
|
+ } ats;
|
|
+ bool pri_tlp;
|
|
+ u32 errata;
|
|
+ bool use_vapic;
|
|
+ bool defer_attach;
|
|
+ struct ratelimit_state rs;
|
|
+};
|
|
+
|
|
+struct dev_table_entry {
|
|
+ u64 data[4];
|
|
+};
|
|
+
|
|
+struct unity_map_entry {
|
|
+ struct list_head list;
|
|
+ u16 devid_start;
|
|
+ u16 devid_end;
|
|
+ u64 address_start;
|
|
+ u64 address_end;
|
|
+ int prot;
|
|
+};
|
|
+
|
|
+enum amd_iommu_intr_mode_type {
|
|
+ AMD_IOMMU_GUEST_IR_LEGACY = 0,
|
|
+ AMD_IOMMU_GUEST_IR_LEGACY_GA = 1,
|
|
+ AMD_IOMMU_GUEST_IR_VAPIC = 2,
|
|
+};
|
|
+
|
|
+union irte {
|
|
+ u32 val;
|
|
+ struct {
|
|
+ u32 valid: 1;
|
|
+ u32 no_fault: 1;
|
|
+ u32 int_type: 3;
|
|
+ u32 rq_eoi: 1;
|
|
+ u32 dm: 1;
|
|
+ u32 rsvd_1: 1;
|
|
+ u32 destination: 8;
|
|
+ u32 vector: 8;
|
|
+ u32 rsvd_2: 8;
|
|
+ } fields;
|
|
+};
|
|
+
|
|
+union irte_ga_lo {
|
|
+ u64 val;
|
|
+ struct {
|
|
+ u64 valid: 1;
|
|
+ u64 no_fault: 1;
|
|
+ u64 int_type: 3;
|
|
+ u64 rq_eoi: 1;
|
|
+ u64 dm: 1;
|
|
+ u64 guest_mode: 1;
|
|
+ u64 destination: 24;
|
|
+ u64 ga_tag: 32;
|
|
+ } fields_remap;
|
|
+ struct {
|
|
+ u64 valid: 1;
|
|
+ u64 no_fault: 1;
|
|
+ u64 ga_log_intr: 1;
|
|
+ u64 rsvd1: 3;
|
|
+ u64 is_run: 1;
|
|
+ u64 guest_mode: 1;
|
|
+ u64 destination: 24;
|
|
+ u64 ga_tag: 32;
|
|
+ } fields_vapic;
|
|
+};
|
|
+
|
|
+union irte_ga_hi {
|
|
+ u64 val;
|
|
+ struct {
|
|
+ u64 vector: 8;
|
|
+ u64 rsvd_1: 4;
|
|
+ u64 ga_root_ptr: 40;
|
|
+ u64 rsvd_2: 4;
|
|
+ u64 destination: 8;
|
|
+ } fields;
|
|
+};
|
|
+
|
|
+struct irte_ga {
|
|
+ union irte_ga_lo lo;
|
|
+ union irte_ga_hi hi;
|
|
+};
|
|
+
|
|
+struct irq_2_irte {
|
|
+ u16 devid;
|
|
+ u16 index;
|
|
+};
|
|
+
|
|
+struct amd_ir_data {
|
|
+ u32 cached_ga_tag;
|
|
+ struct irq_2_irte irq_2_irte;
|
|
+ struct msi_msg msi_entry;
|
|
+ void *entry;
|
|
+ void *ref;
|
|
+};
|
|
+
|
|
+struct irq_remap_ops {
|
|
+ int capability;
|
|
+ int (*prepare)();
|
|
+ int (*enable)();
|
|
+ void (*disable)();
|
|
+ int (*reenable)(int);
|
|
+ int (*enable_faulting)();
|
|
+ struct irq_domain * (*get_ir_irq_domain)(struct irq_alloc_info *);
|
|
+ struct irq_domain * (*get_irq_domain)(struct irq_alloc_info *);
|
|
+};
|
|
+
|
|
+struct iommu_cmd {
|
|
+ u32 data[4];
|
|
+};
|
|
+
|
|
+struct dma_ops_domain {
|
|
+ struct protection_domain domain;
|
|
+ struct iova_domain iovad;
|
|
+};
|
|
+
|
|
+enum irq_remap_cap {
|
|
+ IRQ_POSTING_CAP = 0,
|
|
+};
|
|
+
|
|
+struct ivhd_header {
|
|
+ u8 type;
|
|
+ u8 flags;
|
|
+ u16 length;
|
|
+ u16 devid;
|
|
+ u16 cap_ptr;
|
|
+ u64 mmio_phys;
|
|
+ u16 pci_seg;
|
|
+ u16 info;
|
|
+ u32 efr_attr;
|
|
+ u64 efr_reg;
|
|
+ u64 res;
|
|
+};
|
|
+
|
|
+struct ivhd_entry {
|
|
+ u8 type;
|
|
+ u16 devid;
|
|
+ u8 flags;
|
|
+ u32 ext;
|
|
+ u32 hidh;
|
|
+ u64 cid;
|
|
+ u8 uidf;
|
|
+ u8 uidl;
|
|
+ u8 uid;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct ivmd_header {
|
|
+ u8 type;
|
|
+ u8 flags;
|
|
+ u16 length;
|
|
+ u16 devid;
|
|
+ u16 aux;
|
|
+ u64 resv;
|
|
+ u64 range_start;
|
|
+ u64 range_length;
|
|
+};
|
|
+
|
|
+enum iommu_init_state {
|
|
+ IOMMU_START_STATE = 0,
|
|
+ IOMMU_IVRS_DETECTED = 1,
|
|
+ IOMMU_ACPI_FINISHED = 2,
|
|
+ IOMMU_ENABLED = 3,
|
|
+ IOMMU_PCI_INIT = 4,
|
|
+ IOMMU_INTERRUPTS_EN = 5,
|
|
+ IOMMU_DMA_OPS = 6,
|
|
+ IOMMU_INITIALIZED = 7,
|
|
+ IOMMU_NOT_FOUND = 8,
|
|
+ IOMMU_INIT_ERROR = 9,
|
|
+ IOMMU_CMDLINE_DISABLED = 10,
|
|
+};
|
|
+
|
|
+struct ivrs_quirk_entry {
|
|
+ u8 id;
|
|
+ u16 devid;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DELL_INSPIRON_7375 = 0,
|
|
+ DELL_LATITUDE_5495 = 1,
|
|
+ LENOVO_IDEAPAD_330S_15ARR = 2,
|
|
+};
|
|
+
|
|
+struct acpi_table_dmar {
|
|
+ struct acpi_table_header header;
|
|
+ u8 width;
|
|
+ u8 flags;
|
|
+ u8 reserved[10];
|
|
+};
|
|
+
|
|
+struct acpi_dmar_header {
|
|
+ u16 type;
|
|
+ u16 length;
|
|
+};
|
|
+
|
|
+enum acpi_dmar_type {
|
|
+ ACPI_DMAR_TYPE_HARDWARE_UNIT = 0,
|
|
+ ACPI_DMAR_TYPE_RESERVED_MEMORY = 1,
|
|
+ ACPI_DMAR_TYPE_ROOT_ATS = 2,
|
|
+ ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3,
|
|
+ ACPI_DMAR_TYPE_NAMESPACE = 4,
|
|
+ ACPI_DMAR_TYPE_RESERVED = 5,
|
|
+};
|
|
+
|
|
+struct acpi_dmar_device_scope {
|
|
+ u8 entry_type;
|
|
+ u8 length;
|
|
+ u16 reserved;
|
|
+ u8 enumeration_id;
|
|
+ u8 bus;
|
|
+};
|
|
+
|
|
+enum acpi_dmar_scope_type {
|
|
+ ACPI_DMAR_SCOPE_TYPE_NOT_USED = 0,
|
|
+ ACPI_DMAR_SCOPE_TYPE_ENDPOINT = 1,
|
|
+ ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2,
|
|
+ ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3,
|
|
+ ACPI_DMAR_SCOPE_TYPE_HPET = 4,
|
|
+ ACPI_DMAR_SCOPE_TYPE_NAMESPACE = 5,
|
|
+ ACPI_DMAR_SCOPE_TYPE_RESERVED = 6,
|
|
+};
|
|
+
|
|
+struct acpi_dmar_pci_path {
|
|
+ u8 device;
|
|
+ u8 function;
|
|
+};
|
|
+
|
|
+struct acpi_dmar_hardware_unit {
|
|
+ struct acpi_dmar_header header;
|
|
+ u8 flags;
|
|
+ u8 reserved;
|
|
+ u16 segment;
|
|
+ u64 address;
|
|
+};
|
|
+
|
|
+struct acpi_dmar_reserved_memory {
|
|
+ struct acpi_dmar_header header;
|
|
+ u16 reserved;
|
|
+ u16 segment;
|
|
+ u64 base_address;
|
|
+ u64 end_address;
|
|
+};
|
|
+
|
|
+struct acpi_dmar_atsr {
|
|
+ struct acpi_dmar_header header;
|
|
+ u8 flags;
|
|
+ u8 reserved;
|
|
+ u16 segment;
|
|
+};
|
|
+
|
|
+struct acpi_dmar_rhsa {
|
|
+ struct acpi_dmar_header header;
|
|
+ u32 reserved;
|
|
+ u64 base_address;
|
|
+ u32 proximity_domain;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_dmar_andd {
|
|
+ struct acpi_dmar_header header;
|
|
+ u8 reserved[3];
|
|
+ u8 device_number;
|
|
+ char device_name[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct dmar_dev_scope {
|
|
+ struct device *dev;
|
|
+ u8 bus;
|
|
+ u8 devfn;
|
|
+};
|
|
+
|
|
+struct intel_iommu;
|
|
+
|
|
+struct dmar_drhd_unit {
|
|
+ struct list_head list;
|
|
+ struct acpi_dmar_header *hdr;
|
|
+ u64 reg_base_addr;
|
|
+ struct dmar_dev_scope *devices;
|
|
+ int devices_cnt;
|
|
+ u16 segment;
|
|
+ u8 ignored: 1;
|
|
+ u8 include_all: 1;
|
|
+ struct intel_iommu *iommu;
|
|
+};
|
|
+
|
|
+struct iommu_flush {
|
|
+ void (*flush_context)(struct intel_iommu *, u16, u16, u8, u64);
|
|
+ void (*flush_iotlb)(struct intel_iommu *, u16, u64, unsigned int, u64);
|
|
+};
|
|
+
|
|
+struct dmar_domain;
|
|
+
|
|
+struct root_entry;
|
|
+
|
|
+struct q_inval;
|
|
+
|
|
+struct ir_table;
|
|
+
|
|
+struct intel_iommu {
|
|
+ void *reg;
|
|
+ u64 reg_phys;
|
|
+ u64 reg_size;
|
|
+ u64 cap;
|
|
+ u64 ecap;
|
|
+ u32 gcmd;
|
|
+ raw_spinlock_t register_lock;
|
|
+ int seq_id;
|
|
+ int agaw;
|
|
+ int msagaw;
|
|
+ unsigned int irq;
|
|
+ unsigned int pr_irq;
|
|
+ u16 segment;
|
|
+ unsigned char name[13];
|
|
+ long unsigned int *domain_ids;
|
|
+ struct dmar_domain ***domains;
|
|
+ spinlock_t lock;
|
|
+ struct root_entry *root_entry;
|
|
+ struct iommu_flush flush;
|
|
+ struct q_inval *qi;
|
|
+ u32 *iommu_state;
|
|
+ struct ir_table *ir_table;
|
|
+ struct irq_domain *ir_domain;
|
|
+ struct irq_domain *ir_msi_domain;
|
|
+ struct iommu_device iommu;
|
|
+ int node;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct dmar_pci_path {
|
|
+ u8 bus;
|
|
+ u8 device;
|
|
+ u8 function;
|
|
+};
|
|
+
|
|
+struct dmar_pci_notify_info {
|
|
+ struct pci_dev *dev;
|
|
+ long unsigned int event;
|
|
+ int bus;
|
|
+ u16 seg;
|
|
+ u16 level;
|
|
+ struct dmar_pci_path path[0];
|
|
+};
|
|
+
|
|
+struct irte___2 {
|
|
+ union {
|
|
+ struct {
|
|
+ __u64 present: 1;
|
|
+ __u64 fpd: 1;
|
|
+ __u64 __res0: 6;
|
|
+ __u64 avail: 4;
|
|
+ __u64 __res1: 3;
|
|
+ __u64 pst: 1;
|
|
+ __u64 vector: 8;
|
|
+ __u64 __res2: 40;
|
|
+ };
|
|
+ struct {
|
|
+ __u64 r_present: 1;
|
|
+ __u64 r_fpd: 1;
|
|
+ __u64 dst_mode: 1;
|
|
+ __u64 redir_hint: 1;
|
|
+ __u64 trigger_mode: 1;
|
|
+ __u64 dlvry_mode: 3;
|
|
+ __u64 r_avail: 4;
|
|
+ __u64 r_res0: 4;
|
|
+ __u64 r_vector: 8;
|
|
+ __u64 r_res1: 8;
|
|
+ __u64 dest_id: 32;
|
|
+ };
|
|
+ struct {
|
|
+ __u64 p_present: 1;
|
|
+ __u64 p_fpd: 1;
|
|
+ __u64 p_res0: 6;
|
|
+ __u64 p_avail: 4;
|
|
+ __u64 p_res1: 2;
|
|
+ __u64 p_urgent: 1;
|
|
+ __u64 p_pst: 1;
|
|
+ __u64 p_vector: 8;
|
|
+ __u64 p_res2: 14;
|
|
+ __u64 pda_l: 26;
|
|
+ };
|
|
+ __u64 low;
|
|
+ };
|
|
+ union {
|
|
+ struct {
|
|
+ __u64 sid: 16;
|
|
+ __u64 sq: 2;
|
|
+ __u64 svt: 2;
|
|
+ __u64 __res3: 44;
|
|
+ };
|
|
+ struct {
|
|
+ __u64 p_sid: 16;
|
|
+ __u64 p_sq: 2;
|
|
+ __u64 p_svt: 2;
|
|
+ __u64 p_res3: 12;
|
|
+ __u64 pda_h: 32;
|
|
+ };
|
|
+ __u64 high;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum {
|
|
+ QI_FREE = 0,
|
|
+ QI_IN_USE = 1,
|
|
+ QI_DONE = 2,
|
|
+ QI_ABORT = 3,
|
|
+};
|
|
+
|
|
+struct qi_desc {
|
|
+ u64 low;
|
|
+ u64 high;
|
|
+};
|
|
+
|
|
+struct q_inval {
|
|
+ raw_spinlock_t q_lock;
|
|
+ struct qi_desc *desc;
|
|
+ int *desc_status;
|
|
+ int free_head;
|
|
+ int free_tail;
|
|
+ int free_cnt;
|
|
+};
|
|
+
|
|
+struct ir_table {
|
|
+ struct irte___2 *base;
|
|
+ long unsigned int *bitmap;
|
|
+};
|
|
+
|
|
+struct dma_pte;
|
|
+
|
|
+struct dmar_domain {
|
|
+ int nid;
|
|
+ unsigned int iommu_refcnt[128];
|
|
+ u16 iommu_did[128];
|
|
+ bool has_iotlb_device;
|
|
+ struct list_head devices;
|
|
+ struct iova_domain iovad;
|
|
+ struct dma_pte *pgd;
|
|
+ int gaw;
|
|
+ int agaw;
|
|
+ int flags;
|
|
+ int iommu_coherency;
|
|
+ int iommu_snooping;
|
|
+ int iommu_count;
|
|
+ int iommu_superpage;
|
|
+ u64 max_addr;
|
|
+ struct iommu_domain domain;
|
|
+};
|
|
+
|
|
+typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
|
|
+
|
|
+struct dmar_res_callback {
|
|
+ dmar_res_handler_t cb[5];
|
|
+ void *arg[5];
|
|
+ bool ignore_unhandled;
|
|
+ bool print_entry;
|
|
+};
|
|
+
|
|
+enum faulttype {
|
|
+ DMA_REMAP = 0,
|
|
+ INTR_REMAP = 1,
|
|
+ UNKNOWN = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SR_DMAR_FECTL_REG = 0,
|
|
+ SR_DMAR_FEDATA_REG = 1,
|
|
+ SR_DMAR_FEADDR_REG = 2,
|
|
+ SR_DMAR_FEUADDR_REG = 3,
|
|
+ MAX_SR_DMAR_REGS = 4,
|
|
+};
|
|
+
|
|
+struct dma_pte {
|
|
+ u64 val;
|
|
+};
|
|
+
|
|
+struct root_entry {
|
|
+ u64 lo;
|
|
+ u64 hi;
|
|
+};
|
|
+
|
|
+struct pasid_table;
|
|
+
|
|
+struct device_domain_info {
|
|
+ struct list_head link;
|
|
+ struct list_head global;
|
|
+ struct list_head table;
|
|
+ u8 bus;
|
|
+ u8 devfn;
|
|
+ u16 pfsid;
|
|
+ u8 pasid_supported: 3;
|
|
+ u8 pasid_enabled: 1;
|
|
+ u8 pri_supported: 1;
|
|
+ u8 pri_enabled: 1;
|
|
+ u8 ats_supported: 1;
|
|
+ u8 ats_enabled: 1;
|
|
+ u8 ats_qdep;
|
|
+ struct device *dev;
|
|
+ struct intel_iommu *iommu;
|
|
+ struct dmar_domain *domain;
|
|
+ struct pasid_table *pasid_table;
|
|
+};
|
|
+
|
|
+struct pasid_table {
|
|
+ void *table;
|
|
+ int order;
|
|
+ int max_pasid;
|
|
+ struct list_head dev;
|
|
+};
|
|
+
|
|
+struct context_entry {
|
|
+ u64 lo;
|
|
+ u64 hi;
|
|
+};
|
|
+
|
|
+struct dmar_rmrr_unit {
|
|
+ struct list_head list;
|
|
+ struct acpi_dmar_header *hdr;
|
|
+ u64 base_address;
|
|
+ u64 end_address;
|
|
+ struct dmar_dev_scope *devices;
|
|
+ int devices_cnt;
|
|
+};
|
|
+
|
|
+struct dmar_atsr_unit {
|
|
+ struct list_head list;
|
|
+ struct acpi_dmar_header *hdr;
|
|
+ struct dmar_dev_scope *devices;
|
|
+ int devices_cnt;
|
|
+ u8 include_all: 1;
|
|
+};
|
|
+
|
|
+struct domain_context_mapping_data {
|
|
+ struct dmar_domain *domain;
|
|
+ struct intel_iommu *iommu;
|
|
+};
|
|
+
|
|
+struct pasid_entry {
|
|
+ u64 val;
|
|
+};
|
|
+
|
|
+struct pasid_table_opaque {
|
|
+ struct pasid_table **pasid_table;
|
|
+ int segment;
|
|
+ int bus;
|
|
+ int devfn;
|
|
+};
|
|
+
|
|
+enum irq_mode {
|
|
+ IRQ_REMAPPING = 0,
|
|
+ IRQ_POSTING = 1,
|
|
+};
|
|
+
|
|
+struct ioapic_scope {
|
|
+ struct intel_iommu *iommu;
|
|
+ unsigned int id;
|
|
+ unsigned int bus;
|
|
+ unsigned int devfn;
|
|
+};
|
|
+
|
|
+struct hpet_scope {
|
|
+ struct intel_iommu *iommu;
|
|
+ u8 id;
|
|
+ unsigned int bus;
|
|
+ unsigned int devfn;
|
|
+};
|
|
+
|
|
+struct irq_2_iommu {
|
|
+ struct intel_iommu *iommu;
|
|
+ u16 irte_index;
|
|
+ u16 sub_handle;
|
|
+ u8 irte_mask;
|
|
+ enum irq_mode mode;
|
|
+};
|
|
+
|
|
+struct intel_ir_data {
|
|
+ struct irq_2_iommu irq_2_iommu;
|
|
+ struct irte___2 irte_entry;
|
|
+ union {
|
|
+ struct msi_msg msi_entry;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct set_msi_sid_data {
|
|
+ struct pci_dev *pdev;
|
|
+ u16 alias;
|
|
+};
|
|
+
|
|
+struct mipi_dsi_msg {
|
|
+ u8 channel;
|
|
+ u8 type;
|
|
+ u16 flags;
|
|
+ size_t tx_len;
|
|
+ const void *tx_buf;
|
|
+ size_t rx_len;
|
|
+ void *rx_buf;
|
|
+};
|
|
+
|
|
+struct mipi_dsi_packet {
|
|
+ size_t size;
|
|
+ u8 header[4];
|
|
+ size_t payload_length;
|
|
+ const u8 *payload;
|
|
+};
|
|
+
|
|
+struct mipi_dsi_host;
|
|
+
|
|
+struct mipi_dsi_device;
|
|
+
|
|
+struct mipi_dsi_host_ops {
|
|
+ int (*attach)(struct mipi_dsi_host *, struct mipi_dsi_device *);
|
|
+ int (*detach)(struct mipi_dsi_host *, struct mipi_dsi_device *);
|
|
+ ssize_t (*transfer)(struct mipi_dsi_host *, const struct mipi_dsi_msg *);
|
|
+};
|
|
+
|
|
+struct mipi_dsi_host {
|
|
+ struct device *dev;
|
|
+ const struct mipi_dsi_host_ops *ops;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+enum mipi_dsi_pixel_format {
|
|
+ MIPI_DSI_FMT_RGB888 = 0,
|
|
+ MIPI_DSI_FMT_RGB666 = 1,
|
|
+ MIPI_DSI_FMT_RGB666_PACKED = 2,
|
|
+ MIPI_DSI_FMT_RGB565 = 3,
|
|
+};
|
|
+
|
|
+struct mipi_dsi_device {
|
|
+ struct mipi_dsi_host *host;
|
|
+ struct device dev;
|
|
+ char name[20];
|
|
+ unsigned int channel;
|
|
+ unsigned int lanes;
|
|
+ enum mipi_dsi_pixel_format format;
|
|
+ long unsigned int mode_flags;
|
|
+};
|
|
+
|
|
+struct mipi_dsi_device_info {
|
|
+ char type[20];
|
|
+ u32 channel;
|
|
+ struct device_node *node;
|
|
+};
|
|
+
|
|
+enum mipi_dsi_dcs_tear_mode {
|
|
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK = 0,
|
|
+ MIPI_DSI_DCS_TEAR_MODE_VHBLANK = 1,
|
|
+};
|
|
+
|
|
+struct mipi_dsi_driver {
|
|
+ struct device_driver driver;
|
|
+ int (*probe)(struct mipi_dsi_device *);
|
|
+ int (*remove)(struct mipi_dsi_device *);
|
|
+ void (*shutdown)(struct mipi_dsi_device *);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MIPI_DSI_V_SYNC_START = 1,
|
|
+ MIPI_DSI_V_SYNC_END = 17,
|
|
+ MIPI_DSI_H_SYNC_START = 33,
|
|
+ MIPI_DSI_H_SYNC_END = 49,
|
|
+ MIPI_DSI_COLOR_MODE_OFF = 2,
|
|
+ MIPI_DSI_COLOR_MODE_ON = 18,
|
|
+ MIPI_DSI_SHUTDOWN_PERIPHERAL = 34,
|
|
+ MIPI_DSI_TURN_ON_PERIPHERAL = 50,
|
|
+ MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM = 3,
|
|
+ MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM = 19,
|
|
+ MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM = 35,
|
|
+ MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM = 4,
|
|
+ MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM = 20,
|
|
+ MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM = 36,
|
|
+ MIPI_DSI_DCS_SHORT_WRITE = 5,
|
|
+ MIPI_DSI_DCS_SHORT_WRITE_PARAM = 21,
|
|
+ MIPI_DSI_DCS_READ = 6,
|
|
+ MIPI_DSI_DCS_COMPRESSION_MODE = 7,
|
|
+ MIPI_DSI_PPS_LONG_WRITE = 10,
|
|
+ MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 55,
|
|
+ MIPI_DSI_END_OF_TRANSMISSION = 8,
|
|
+ MIPI_DSI_NULL_PACKET = 9,
|
|
+ MIPI_DSI_BLANKING_PACKET = 25,
|
|
+ MIPI_DSI_GENERIC_LONG_WRITE = 41,
|
|
+ MIPI_DSI_DCS_LONG_WRITE = 57,
|
|
+ MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 = 12,
|
|
+ MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 = 28,
|
|
+ MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 = 44,
|
|
+ MIPI_DSI_PACKED_PIXEL_STREAM_30 = 13,
|
|
+ MIPI_DSI_PACKED_PIXEL_STREAM_36 = 29,
|
|
+ MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12 = 61,
|
|
+ MIPI_DSI_PACKED_PIXEL_STREAM_16 = 14,
|
|
+ MIPI_DSI_PACKED_PIXEL_STREAM_18 = 30,
|
|
+ MIPI_DSI_PIXEL_STREAM_3BYTE_18 = 46,
|
|
+ MIPI_DSI_PACKED_PIXEL_STREAM_24 = 62,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MIPI_DCS_NOP = 0,
|
|
+ MIPI_DCS_SOFT_RESET = 1,
|
|
+ MIPI_DCS_GET_DISPLAY_ID = 4,
|
|
+ MIPI_DCS_GET_RED_CHANNEL = 6,
|
|
+ MIPI_DCS_GET_GREEN_CHANNEL = 7,
|
|
+ MIPI_DCS_GET_BLUE_CHANNEL = 8,
|
|
+ MIPI_DCS_GET_DISPLAY_STATUS = 9,
|
|
+ MIPI_DCS_GET_POWER_MODE = 10,
|
|
+ MIPI_DCS_GET_ADDRESS_MODE = 11,
|
|
+ MIPI_DCS_GET_PIXEL_FORMAT = 12,
|
|
+ MIPI_DCS_GET_DISPLAY_MODE = 13,
|
|
+ MIPI_DCS_GET_SIGNAL_MODE = 14,
|
|
+ MIPI_DCS_GET_DIAGNOSTIC_RESULT = 15,
|
|
+ MIPI_DCS_ENTER_SLEEP_MODE = 16,
|
|
+ MIPI_DCS_EXIT_SLEEP_MODE = 17,
|
|
+ MIPI_DCS_ENTER_PARTIAL_MODE = 18,
|
|
+ MIPI_DCS_ENTER_NORMAL_MODE = 19,
|
|
+ MIPI_DCS_EXIT_INVERT_MODE = 32,
|
|
+ MIPI_DCS_ENTER_INVERT_MODE = 33,
|
|
+ MIPI_DCS_SET_GAMMA_CURVE = 38,
|
|
+ MIPI_DCS_SET_DISPLAY_OFF = 40,
|
|
+ MIPI_DCS_SET_DISPLAY_ON = 41,
|
|
+ MIPI_DCS_SET_COLUMN_ADDRESS = 42,
|
|
+ MIPI_DCS_SET_PAGE_ADDRESS = 43,
|
|
+ MIPI_DCS_WRITE_MEMORY_START = 44,
|
|
+ MIPI_DCS_WRITE_LUT = 45,
|
|
+ MIPI_DCS_READ_MEMORY_START = 46,
|
|
+ MIPI_DCS_SET_PARTIAL_AREA = 48,
|
|
+ MIPI_DCS_SET_SCROLL_AREA = 51,
|
|
+ MIPI_DCS_SET_TEAR_OFF = 52,
|
|
+ MIPI_DCS_SET_TEAR_ON = 53,
|
|
+ MIPI_DCS_SET_ADDRESS_MODE = 54,
|
|
+ MIPI_DCS_SET_SCROLL_START = 55,
|
|
+ MIPI_DCS_EXIT_IDLE_MODE = 56,
|
|
+ MIPI_DCS_ENTER_IDLE_MODE = 57,
|
|
+ MIPI_DCS_SET_PIXEL_FORMAT = 58,
|
|
+ MIPI_DCS_WRITE_MEMORY_CONTINUE = 60,
|
|
+ MIPI_DCS_READ_MEMORY_CONTINUE = 62,
|
|
+ MIPI_DCS_SET_TEAR_SCANLINE = 68,
|
|
+ MIPI_DCS_GET_SCANLINE = 69,
|
|
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 81,
|
|
+ MIPI_DCS_GET_DISPLAY_BRIGHTNESS = 82,
|
|
+ MIPI_DCS_WRITE_CONTROL_DISPLAY = 83,
|
|
+ MIPI_DCS_GET_CONTROL_DISPLAY = 84,
|
|
+ MIPI_DCS_WRITE_POWER_SAVE = 85,
|
|
+ MIPI_DCS_GET_POWER_SAVE = 86,
|
|
+ MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 94,
|
|
+ MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 95,
|
|
+ MIPI_DCS_READ_DDB_START = 161,
|
|
+ MIPI_DCS_READ_DDB_CONTINUE = 168,
|
|
+};
|
|
+
|
|
+struct drm_dmi_panel_orientation_data {
|
|
+ int width;
|
|
+ int height;
|
|
+ const char * const *bios_dates;
|
|
+ int orientation;
|
|
+};
|
|
+
|
|
+struct vga_device {
|
|
+ struct list_head list;
|
|
+ struct pci_dev *pdev;
|
|
+ unsigned int decodes;
|
|
+ unsigned int owns;
|
|
+ unsigned int locks;
|
|
+ unsigned int io_lock_cnt;
|
|
+ unsigned int mem_lock_cnt;
|
|
+ unsigned int io_norm_cnt;
|
|
+ unsigned int mem_norm_cnt;
|
|
+ bool bridge_has_one_vga;
|
|
+ void *cookie;
|
|
+ void (*irq_set_state)(void *, bool);
|
|
+ unsigned int (*set_vga_decode)(void *, bool);
|
|
+};
|
|
+
|
|
+struct vga_arb_user_card {
|
|
+ struct pci_dev *pdev;
|
|
+ unsigned int mem_cnt;
|
|
+ unsigned int io_cnt;
|
|
+};
|
|
+
|
|
+struct vga_arb_private {
|
|
+ struct list_head list;
|
|
+ struct pci_dev *target;
|
|
+ struct vga_arb_user_card cards[64];
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+enum vga_switcheroo_handler_flags_t {
|
|
+ VGA_SWITCHEROO_CAN_SWITCH_DDC = 1,
|
|
+ VGA_SWITCHEROO_NEEDS_EDP_CONFIG = 2,
|
|
+};
|
|
+
|
|
+enum vga_switcheroo_state {
|
|
+ VGA_SWITCHEROO_OFF = 0,
|
|
+ VGA_SWITCHEROO_ON = 1,
|
|
+ VGA_SWITCHEROO_NOT_FOUND = 2,
|
|
+};
|
|
+
|
|
+enum vga_switcheroo_client_id {
|
|
+ VGA_SWITCHEROO_UNKNOWN_ID = 4096,
|
|
+ VGA_SWITCHEROO_IGD = 0,
|
|
+ VGA_SWITCHEROO_DIS = 1,
|
|
+ VGA_SWITCHEROO_MAX_CLIENTS = 2,
|
|
+};
|
|
+
|
|
+struct vga_switcheroo_handler {
|
|
+ int (*init)();
|
|
+ int (*switchto)(enum vga_switcheroo_client_id);
|
|
+ int (*switch_ddc)(enum vga_switcheroo_client_id);
|
|
+ int (*power_state)(enum vga_switcheroo_client_id, enum vga_switcheroo_state);
|
|
+ enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *);
|
|
+};
|
|
+
|
|
+struct vga_switcheroo_client_ops {
|
|
+ void (*set_gpu_state)(struct pci_dev *, enum vga_switcheroo_state);
|
|
+ void (*reprobe)(struct pci_dev *);
|
|
+ bool (*can_switch)(struct pci_dev *);
|
|
+ void (*gpu_bound)(struct pci_dev *, enum vga_switcheroo_client_id);
|
|
+};
|
|
+
|
|
+struct vga_switcheroo_client {
|
|
+ struct pci_dev *pdev;
|
|
+ struct fb_info *fb_info;
|
|
+ enum vga_switcheroo_state pwr_state;
|
|
+ const struct vga_switcheroo_client_ops *ops;
|
|
+ enum vga_switcheroo_client_id id;
|
|
+ bool active;
|
|
+ bool driver_power_control;
|
|
+ struct list_head list;
|
|
+ struct pci_dev *vga_dev;
|
|
+};
|
|
+
|
|
+struct vgasr_priv {
|
|
+ bool active;
|
|
+ bool delayed_switch_active;
|
|
+ enum vga_switcheroo_client_id delayed_client_id;
|
|
+ struct dentry *debugfs_root;
|
|
+ struct dentry *switch_file;
|
|
+ int registered_clients;
|
|
+ struct list_head clients;
|
|
+ const struct vga_switcheroo_handler *handler;
|
|
+ enum vga_switcheroo_handler_flags_t handler_flags;
|
|
+ struct mutex mux_hw_lock;
|
|
+ int old_ddc_owner;
|
|
+};
|
|
+
|
|
+struct cb_id {
|
|
+ __u32 idx;
|
|
+ __u32 val;
|
|
+};
|
|
+
|
|
+struct cn_msg {
|
|
+ struct cb_id id;
|
|
+ __u32 seq;
|
|
+ __u32 ack;
|
|
+ __u16 len;
|
|
+ __u16 flags;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct cn_queue_dev {
|
|
+ atomic_t refcnt;
|
|
+ unsigned char name[32];
|
|
+ struct list_head queue_list;
|
|
+ spinlock_t queue_lock;
|
|
+ struct sock *nls;
|
|
+};
|
|
+
|
|
+struct cn_callback_id {
|
|
+ unsigned char name[32];
|
|
+ struct cb_id id;
|
|
+};
|
|
+
|
|
+struct cn_callback_entry {
|
|
+ struct list_head callback_entry;
|
|
+ refcount_t refcnt;
|
|
+ struct cn_queue_dev *pdev;
|
|
+ struct cn_callback_id id;
|
|
+ void (*callback)(struct cn_msg *, struct netlink_skb_parms *);
|
|
+ u32 seq;
|
|
+ u32 group;
|
|
+};
|
|
+
|
|
+struct cn_dev {
|
|
+ struct cb_id id;
|
|
+ u32 seq;
|
|
+ u32 groups;
|
|
+ struct sock *nls;
|
|
+ void (*input)(struct sk_buff *);
|
|
+ struct cn_queue_dev *cbdev;
|
|
+};
|
|
+
|
|
+enum proc_cn_mcast_op {
|
|
+ PROC_CN_MCAST_LISTEN = 1,
|
|
+ PROC_CN_MCAST_IGNORE = 2,
|
|
+};
|
|
+
|
|
+struct fork_proc_event {
|
|
+ __kernel_pid_t parent_pid;
|
|
+ __kernel_pid_t parent_tgid;
|
|
+ __kernel_pid_t child_pid;
|
|
+ __kernel_pid_t child_tgid;
|
|
+};
|
|
+
|
|
+struct exec_proc_event {
|
|
+ __kernel_pid_t process_pid;
|
|
+ __kernel_pid_t process_tgid;
|
|
+};
|
|
+
|
|
+struct id_proc_event {
|
|
+ __kernel_pid_t process_pid;
|
|
+ __kernel_pid_t process_tgid;
|
|
+ union {
|
|
+ __u32 ruid;
|
|
+ __u32 rgid;
|
|
+ } r;
|
|
+ union {
|
|
+ __u32 euid;
|
|
+ __u32 egid;
|
|
+ } e;
|
|
+};
|
|
+
|
|
+struct sid_proc_event {
|
|
+ __kernel_pid_t process_pid;
|
|
+ __kernel_pid_t process_tgid;
|
|
+};
|
|
+
|
|
+struct ptrace_proc_event {
|
|
+ __kernel_pid_t process_pid;
|
|
+ __kernel_pid_t process_tgid;
|
|
+ __kernel_pid_t tracer_pid;
|
|
+ __kernel_pid_t tracer_tgid;
|
|
+};
|
|
+
|
|
+struct comm_proc_event {
|
|
+ __kernel_pid_t process_pid;
|
|
+ __kernel_pid_t process_tgid;
|
|
+ char comm[16];
|
|
+};
|
|
+
|
|
+struct coredump_proc_event {
|
|
+ __kernel_pid_t process_pid;
|
|
+ __kernel_pid_t process_tgid;
|
|
+ __kernel_pid_t parent_pid;
|
|
+ __kernel_pid_t parent_tgid;
|
|
+};
|
|
+
|
|
+struct exit_proc_event {
|
|
+ __kernel_pid_t process_pid;
|
|
+ __kernel_pid_t process_tgid;
|
|
+ __u32 exit_code;
|
|
+ __u32 exit_signal;
|
|
+ __kernel_pid_t parent_pid;
|
|
+ __kernel_pid_t parent_tgid;
|
|
+};
|
|
+
|
|
+struct proc_event {
|
|
+ enum what what;
|
|
+ __u32 cpu;
|
|
+ __u64 timestamp_ns;
|
|
+ union {
|
|
+ struct {
|
|
+ __u32 err;
|
|
+ } ack;
|
|
+ struct fork_proc_event fork;
|
|
+ struct exec_proc_event exec;
|
|
+ struct id_proc_event id;
|
|
+ struct sid_proc_event sid;
|
|
+ struct ptrace_proc_event ptrace;
|
|
+ struct comm_proc_event comm;
|
|
+ struct coredump_proc_event coredump;
|
|
+ struct exit_proc_event exit;
|
|
+ } event_data;
|
|
+};
|
|
+
|
|
+struct component_ops {
|
|
+ int (*bind)(struct device *, struct device *, void *);
|
|
+ void (*unbind)(struct device *, struct device *, void *);
|
|
+};
|
|
+
|
|
+struct component_master_ops {
|
|
+ int (*bind)(struct device *);
|
|
+ void (*unbind)(struct device *);
|
|
+};
|
|
+
|
|
+struct component;
|
|
+
|
|
+struct component_match_array {
|
|
+ void *data;
|
|
+ int (*compare)(struct device *, void *);
|
|
+ void (*release)(struct device *, void *);
|
|
+ struct component *component;
|
|
+ bool duplicate;
|
|
+};
|
|
+
|
|
+struct master;
|
|
+
|
|
+struct component {
|
|
+ struct list_head node;
|
|
+ struct master *master;
|
|
+ bool bound;
|
|
+ const struct component_ops *ops;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct component_match {
|
|
+ size_t alloc;
|
|
+ size_t num;
|
|
+ struct component_match_array *compare;
|
|
+};
|
|
+
|
|
+struct master {
|
|
+ struct list_head node;
|
|
+ bool bound;
|
|
+ const struct component_master_ops *ops;
|
|
+ struct device *dev;
|
|
+ struct component_match *match;
|
|
+ struct dentry *dentry;
|
|
+};
|
|
+
|
|
+struct wake_irq {
|
|
+ struct device *dev;
|
|
+ unsigned int status;
|
|
+ int irq;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+enum dpm_order {
|
|
+ DPM_ORDER_NONE = 0,
|
|
+ DPM_ORDER_DEV_AFTER_PARENT = 1,
|
|
+ DPM_ORDER_PARENT_BEFORE_DEV = 2,
|
|
+ DPM_ORDER_DEV_LAST = 3,
|
|
+};
|
|
+
|
|
+struct subsys_private {
|
|
+ struct kset subsys;
|
|
+ struct kset *devices_kset;
|
|
+ struct list_head interfaces;
|
|
+ struct mutex mutex;
|
|
+ struct kset *drivers_kset;
|
|
+ struct klist klist_devices;
|
|
+ struct klist klist_drivers;
|
|
+ struct blocking_notifier_head bus_notifier;
|
|
+ unsigned int drivers_autoprobe: 1;
|
|
+ struct bus_type *bus;
|
|
+ struct kset glue_dirs;
|
|
+ struct class *class;
|
|
+};
|
|
+
|
|
+struct driver_private {
|
|
+ struct kobject kobj;
|
|
+ struct klist klist_devices;
|
|
+ struct klist_node knode_bus;
|
|
+ struct module_kobject *mkobj;
|
|
+ struct device_driver *driver;
|
|
+};
|
|
+
|
|
+enum device_link_state {
|
|
+ DL_STATE_NONE = -1,
|
|
+ DL_STATE_DORMANT = 0,
|
|
+ DL_STATE_AVAILABLE = 1,
|
|
+ DL_STATE_CONSUMER_PROBE = 2,
|
|
+ DL_STATE_ACTIVE = 3,
|
|
+ DL_STATE_SUPPLIER_UNBIND = 4,
|
|
+};
|
|
+
|
|
+struct device_link {
|
|
+ struct device *supplier;
|
|
+ struct list_head s_node;
|
|
+ struct device *consumer;
|
|
+ struct list_head c_node;
|
|
+ enum device_link_state status;
|
|
+ u32 flags;
|
|
+ refcount_t rpm_active;
|
|
+ struct kref kref;
|
|
+ struct callback_head callback_head;
|
|
+ bool supplier_preactivated;
|
|
+};
|
|
+
|
|
+struct device_private {
|
|
+ struct klist klist_children;
|
|
+ struct klist_node knode_parent;
|
|
+ struct klist_node knode_driver;
|
|
+ struct klist_node knode_bus;
|
|
+ struct list_head deferred_probe;
|
|
+ struct device *device;
|
|
+ u8 dead: 1;
|
|
+};
|
|
+
|
|
+union device_attr_group_devres {
|
|
+ const struct attribute_group *group;
|
|
+ const struct attribute_group **groups;
|
|
+};
|
|
+
|
|
+struct class_dir {
|
|
+ struct kobject kobj;
|
|
+ struct class *class;
|
|
+};
|
|
+
|
|
+struct root_device {
|
|
+ struct device dev;
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct subsys_dev_iter {
|
|
+ struct klist_iter ki;
|
|
+ const struct device_type *type;
|
|
+};
|
|
+
|
|
+struct device_attach_data {
|
|
+ struct device *dev;
|
|
+ bool check_async;
|
|
+ bool want_async;
|
|
+ bool have_async;
|
|
+};
|
|
+
|
|
+struct class_attribute_string {
|
|
+ struct class_attribute attr;
|
|
+ char *str;
|
|
+};
|
|
+
|
|
+struct class_compat {
|
|
+ struct kobject *kobj;
|
|
+};
|
|
+
|
|
+struct early_platform_driver {
|
|
+ const char *class_str;
|
|
+ struct platform_driver *pdrv;
|
|
+ struct list_head list;
|
|
+ int requested_id;
|
|
+ char *buffer;
|
|
+ int bufsize;
|
|
+};
|
|
+
|
|
+struct platform_object {
|
|
+ struct platform_device pdev;
|
|
+ char name[0];
|
|
+};
|
|
+
|
|
+struct cpu_attr {
|
|
+ struct device_attribute attr;
|
|
+ const struct cpumask * const map;
|
|
+};
|
|
+
|
|
+typedef struct kobject *kobj_probe_t(dev_t, int *, void *);
|
|
+
|
|
+struct probe {
|
|
+ struct probe *next;
|
|
+ dev_t dev;
|
|
+ long unsigned int range;
|
|
+ struct module *owner;
|
|
+ kobj_probe_t *get;
|
|
+ int (*lock)(dev_t, void *);
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct kobj_map___2 {
|
|
+ struct probe *probes[255];
|
|
+ struct mutex *lock;
|
|
+};
|
|
+
|
|
+typedef int (*dr_match_t)(struct device *, void *, void *);
|
|
+
|
|
+struct devres_node {
|
|
+ struct list_head entry;
|
|
+ dr_release_t release;
|
|
+};
|
|
+
|
|
+struct devres {
|
|
+ struct devres_node node;
|
|
+ u8 data[0];
|
|
+};
|
|
+
|
|
+struct devres_group {
|
|
+ struct devres_node node[2];
|
|
+ void *id;
|
|
+ int color;
|
|
+};
|
|
+
|
|
+struct action_devres {
|
|
+ void *data;
|
|
+ void (*action)(void *);
|
|
+};
|
|
+
|
|
+struct pages_devres {
|
|
+ long unsigned int addr;
|
|
+ unsigned int order;
|
|
+};
|
|
+
|
|
+struct attribute_container {
|
|
+ struct list_head node;
|
|
+ struct klist containers;
|
|
+ struct class *class;
|
|
+ const struct attribute_group *grp;
|
|
+ struct device_attribute **attrs;
|
|
+ int (*match)(struct attribute_container *, struct device *);
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct internal_container {
|
|
+ struct klist_node node;
|
|
+ struct attribute_container *cont;
|
|
+ struct device classdev;
|
|
+};
|
|
+
|
|
+struct transport_container;
|
|
+
|
|
+struct transport_class {
|
|
+ struct class class;
|
|
+ int (*setup)(struct transport_container *, struct device *, struct device *);
|
|
+ int (*configure)(struct transport_container *, struct device *, struct device *);
|
|
+ int (*remove)(struct transport_container *, struct device *, struct device *);
|
|
+};
|
|
+
|
|
+struct transport_container {
|
|
+ struct attribute_container ac;
|
|
+ const struct attribute_group *statistics;
|
|
+};
|
|
+
|
|
+struct anon_transport_class {
|
|
+ struct transport_class tclass;
|
|
+ struct attribute_container container;
|
|
+};
|
|
+
|
|
+struct mii_bus;
|
|
+
|
|
+struct mdio_device {
|
|
+ struct device dev;
|
|
+ struct mii_bus *bus;
|
|
+ char modalias[32];
|
|
+ int (*bus_match)(struct device *, struct device_driver *);
|
|
+ void (*device_free)(struct mdio_device *);
|
|
+ void (*device_remove)(struct mdio_device *);
|
|
+ int addr;
|
|
+ int flags;
|
|
+ struct gpio_desc___2 *reset;
|
|
+ unsigned int reset_assert_delay;
|
|
+ unsigned int reset_deassert_delay;
|
|
+};
|
|
+
|
|
+struct phy_c45_device_ids {
|
|
+ u32 devices_in_package;
|
|
+ u32 device_ids[8];
|
|
+};
|
|
+
|
|
+enum phy_state {
|
|
+ PHY_DOWN = 0,
|
|
+ PHY_STARTING = 1,
|
|
+ PHY_READY = 2,
|
|
+ PHY_PENDING = 3,
|
|
+ PHY_UP = 4,
|
|
+ PHY_AN = 5,
|
|
+ PHY_RUNNING = 6,
|
|
+ PHY_NOLINK = 7,
|
|
+ PHY_FORCING = 8,
|
|
+ PHY_CHANGELINK = 9,
|
|
+ PHY_HALTED = 10,
|
|
+ PHY_RESUMING = 11,
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ PHY_INTERFACE_MODE_NA = 0,
|
|
+ PHY_INTERFACE_MODE_INTERNAL = 1,
|
|
+ PHY_INTERFACE_MODE_MII = 2,
|
|
+ PHY_INTERFACE_MODE_GMII = 3,
|
|
+ PHY_INTERFACE_MODE_SGMII = 4,
|
|
+ PHY_INTERFACE_MODE_TBI = 5,
|
|
+ PHY_INTERFACE_MODE_REVMII = 6,
|
|
+ PHY_INTERFACE_MODE_RMII = 7,
|
|
+ PHY_INTERFACE_MODE_RGMII = 8,
|
|
+ PHY_INTERFACE_MODE_RGMII_ID = 9,
|
|
+ PHY_INTERFACE_MODE_RGMII_RXID = 10,
|
|
+ PHY_INTERFACE_MODE_RGMII_TXID = 11,
|
|
+ PHY_INTERFACE_MODE_RTBI = 12,
|
|
+ PHY_INTERFACE_MODE_SMII = 13,
|
|
+ PHY_INTERFACE_MODE_XGMII = 14,
|
|
+ PHY_INTERFACE_MODE_MOCA = 15,
|
|
+ PHY_INTERFACE_MODE_QSGMII = 16,
|
|
+ PHY_INTERFACE_MODE_TRGMII = 17,
|
|
+ PHY_INTERFACE_MODE_1000BASEX = 18,
|
|
+ PHY_INTERFACE_MODE_2500BASEX = 19,
|
|
+ PHY_INTERFACE_MODE_RXAUI = 20,
|
|
+ PHY_INTERFACE_MODE_XAUI = 21,
|
|
+ PHY_INTERFACE_MODE_10GKR = 22,
|
|
+ PHY_INTERFACE_MODE_MAX = 23,
|
|
+} phy_interface_t;
|
|
+
|
|
+struct phylink;
|
|
+
|
|
+struct phy_driver;
|
|
+
|
|
+struct phy_led_trigger;
|
|
+
|
|
+struct phy_device {
|
|
+ struct mdio_device mdio;
|
|
+ struct phy_driver *drv;
|
|
+ u32 phy_id;
|
|
+ struct phy_c45_device_ids c45_ids;
|
|
+ unsigned int is_c45: 1;
|
|
+ unsigned int is_internal: 1;
|
|
+ unsigned int is_pseudo_fixed_link: 1;
|
|
+ unsigned int has_fixups: 1;
|
|
+ unsigned int suspended: 1;
|
|
+ unsigned int sysfs_links: 1;
|
|
+ unsigned int loopback_enabled: 1;
|
|
+ unsigned int autoneg: 1;
|
|
+ unsigned int link: 1;
|
|
+ enum phy_state state;
|
|
+ u32 dev_flags;
|
|
+ phy_interface_t interface;
|
|
+ int speed;
|
|
+ int duplex;
|
|
+ int pause;
|
|
+ int asym_pause;
|
|
+ u32 interrupts;
|
|
+ u32 supported;
|
|
+ u32 advertising;
|
|
+ u32 lp_advertising;
|
|
+ u32 eee_broken_modes;
|
|
+ int link_timeout;
|
|
+ struct phy_led_trigger *phy_led_triggers;
|
|
+ unsigned int phy_num_led_triggers;
|
|
+ struct phy_led_trigger *last_triggered;
|
|
+ struct phy_led_trigger *led_link_trigger;
|
|
+ int irq;
|
|
+ void *priv;
|
|
+ struct work_struct phy_queue;
|
|
+ struct delayed_work state_queue;
|
|
+ struct mutex lock;
|
|
+ struct phylink *phylink;
|
|
+ struct net_device *attached_dev;
|
|
+ u8 mdix;
|
|
+ u8 mdix_ctrl;
|
|
+ void (*phy_link_change)(struct phy_device *, bool, bool);
|
|
+ void (*adjust_link)(struct net_device *);
|
|
+};
|
|
+
|
|
+struct mii_bus {
|
|
+ struct module *owner;
|
|
+ const char *name;
|
|
+ char id[61];
|
|
+ void *priv;
|
|
+ int (*read)(struct mii_bus *, int, int);
|
|
+ int (*write)(struct mii_bus *, int, int, u16);
|
|
+ int (*reset)(struct mii_bus *);
|
|
+ struct mutex mdio_lock;
|
|
+ struct device *parent;
|
|
+ enum {
|
|
+ MDIOBUS_ALLOCATED = 1,
|
|
+ MDIOBUS_REGISTERED = 2,
|
|
+ MDIOBUS_UNREGISTERED = 3,
|
|
+ MDIOBUS_RELEASED = 4,
|
|
+ } state;
|
|
+ struct device dev;
|
|
+ struct mdio_device *mdio_map[32];
|
|
+ u32 phy_mask;
|
|
+ u32 phy_ignore_ta_mask;
|
|
+ int irq[32];
|
|
+ int reset_delay_us;
|
|
+ struct gpio_desc___2 *reset_gpiod;
|
|
+};
|
|
+
|
|
+struct mdio_driver_common {
|
|
+ struct device_driver driver;
|
|
+ int flags;
|
|
+};
|
|
+
|
|
+struct phy_driver {
|
|
+ struct mdio_driver_common mdiodrv;
|
|
+ u32 phy_id;
|
|
+ char *name;
|
|
+ u32 phy_id_mask;
|
|
+ u32 features;
|
|
+ u32 flags;
|
|
+ const void *driver_data;
|
|
+ int (*soft_reset)(struct phy_device *);
|
|
+ int (*config_init)(struct phy_device *);
|
|
+ int (*probe)(struct phy_device *);
|
|
+ int (*suspend)(struct phy_device *);
|
|
+ int (*resume)(struct phy_device *);
|
|
+ int (*config_aneg)(struct phy_device *);
|
|
+ int (*aneg_done)(struct phy_device *);
|
|
+ int (*read_status)(struct phy_device *);
|
|
+ int (*ack_interrupt)(struct phy_device *);
|
|
+ int (*config_intr)(struct phy_device *);
|
|
+ int (*did_interrupt)(struct phy_device *);
|
|
+ void (*remove)(struct phy_device *);
|
|
+ int (*match_phy_device)(struct phy_device *);
|
|
+ int (*ts_info)(struct phy_device *, struct ethtool_ts_info *);
|
|
+ int (*hwtstamp)(struct phy_device *, struct ifreq *);
|
|
+ bool (*rxtstamp)(struct phy_device *, struct sk_buff *, int);
|
|
+ void (*txtstamp)(struct phy_device *, struct sk_buff *, int);
|
|
+ int (*set_wol)(struct phy_device *, struct ethtool_wolinfo *);
|
|
+ void (*get_wol)(struct phy_device *, struct ethtool_wolinfo *);
|
|
+ void (*link_change_notify)(struct phy_device *);
|
|
+ int (*read_mmd)(struct phy_device *, int, u16);
|
|
+ int (*write_mmd)(struct phy_device *, int, u16, u16);
|
|
+ int (*read_page)(struct phy_device *);
|
|
+ int (*write_page)(struct phy_device *, int);
|
|
+ int (*module_info)(struct phy_device *, struct ethtool_modinfo *);
|
|
+ int (*module_eeprom)(struct phy_device *, struct ethtool_eeprom *, u8 *);
|
|
+ int (*get_sset_count)(struct phy_device *);
|
|
+ void (*get_strings)(struct phy_device *, u8 *);
|
|
+ void (*get_stats)(struct phy_device *, struct ethtool_stats *, u64 *);
|
|
+ int (*get_tunable)(struct phy_device *, struct ethtool_tunable *, void *);
|
|
+ int (*set_tunable)(struct phy_device *, struct ethtool_tunable *, const void *);
|
|
+ int (*set_loopback)(struct phy_device *, bool);
|
|
+};
|
|
+
|
|
+struct property_set {
|
|
+ struct device *dev;
|
|
+ struct fwnode_handle fwnode;
|
|
+ const struct property_entry *properties;
|
|
+};
|
|
+
|
|
+struct device_connection {
|
|
+ const char *endpoint[2];
|
|
+ const char *id;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct req {
|
|
+ struct req *next;
|
|
+ struct completion done;
|
|
+ int err;
|
|
+ const char *name;
|
|
+ umode_t mode;
|
|
+ kuid_t uid;
|
|
+ kgid_t gid;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+typedef int (*pm_callback_t)(struct device *);
|
|
+
|
|
+enum gpd_status {
|
|
+ GPD_STATE_ACTIVE = 0,
|
|
+ GPD_STATE_POWER_OFF = 1,
|
|
+};
|
|
+
|
|
+struct gpd_dev_ops {
|
|
+ int (*start)(struct device *);
|
|
+ int (*stop)(struct device *);
|
|
+};
|
|
+
|
|
+struct genpd_power_state {
|
|
+ s64 power_off_latency_ns;
|
|
+ s64 power_on_latency_ns;
|
|
+ s64 residency_ns;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ ktime_t idle_time;
|
|
+};
|
|
+
|
|
+struct dev_pm_opp;
|
|
+
|
|
+struct genpd_lock_ops;
|
|
+
|
|
+struct generic_pm_domain {
|
|
+ struct device dev;
|
|
+ struct dev_pm_domain domain;
|
|
+ struct list_head gpd_list_node;
|
|
+ struct list_head master_links;
|
|
+ struct list_head slave_links;
|
|
+ struct list_head dev_list;
|
|
+ struct dev_power_governor *gov;
|
|
+ struct work_struct power_off_work;
|
|
+ struct fwnode_handle *provider;
|
|
+ bool has_provider;
|
|
+ const char *name;
|
|
+ atomic_t sd_count;
|
|
+ enum gpd_status status;
|
|
+ unsigned int device_count;
|
|
+ unsigned int suspended_count;
|
|
+ unsigned int prepared_count;
|
|
+ unsigned int performance_state;
|
|
+ int (*power_off)(struct generic_pm_domain *);
|
|
+ int (*power_on)(struct generic_pm_domain *);
|
|
+ unsigned int (*opp_to_performance_state)(struct generic_pm_domain *, struct dev_pm_opp *);
|
|
+ int (*set_performance_state)(struct generic_pm_domain *, unsigned int);
|
|
+ struct gpd_dev_ops dev_ops;
|
|
+ s64 max_off_time_ns;
|
|
+ bool max_off_time_changed;
|
|
+ bool cached_power_down_ok;
|
|
+ int (*attach_dev)(struct generic_pm_domain *, struct device *);
|
|
+ void (*detach_dev)(struct generic_pm_domain *, struct device *);
|
|
+ unsigned int flags;
|
|
+ struct genpd_power_state *states;
|
|
+ unsigned int state_count;
|
|
+ unsigned int state_idx;
|
|
+ void *free;
|
|
+ ktime_t on_time;
|
|
+ ktime_t accounting_time;
|
|
+ const struct genpd_lock_ops *lock_ops;
|
|
+ union {
|
|
+ struct mutex mlock;
|
|
+ struct {
|
|
+ spinlock_t slock;
|
|
+ long unsigned int lock_flags;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct genpd_lock_ops {
|
|
+ void (*lock)(struct generic_pm_domain *);
|
|
+ void (*lock_nested)(struct generic_pm_domain *, int);
|
|
+ int (*lock_interruptible)(struct generic_pm_domain *);
|
|
+ void (*unlock)(struct generic_pm_domain *);
|
|
+};
|
|
+
|
|
+struct gpd_link {
|
|
+ struct generic_pm_domain *master;
|
|
+ struct list_head master_node;
|
|
+ struct generic_pm_domain *slave;
|
|
+ struct list_head slave_node;
|
|
+};
|
|
+
|
|
+struct gpd_timing_data {
|
|
+ s64 suspend_latency_ns;
|
|
+ s64 resume_latency_ns;
|
|
+ s64 effective_constraint_ns;
|
|
+ bool constraint_changed;
|
|
+ bool cached_suspend_ok;
|
|
+};
|
|
+
|
|
+struct generic_pm_domain_data {
|
|
+ struct pm_domain_data base;
|
|
+ struct gpd_timing_data td;
|
|
+ struct notifier_block nb;
|
|
+ unsigned int performance_state;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct pm_clk_notifier_block {
|
|
+ struct notifier_block nb;
|
|
+ struct dev_pm_domain *pm_domain;
|
|
+ char *con_ids[0];
|
|
+};
|
|
+
|
|
+enum pce_status {
|
|
+ PCE_STATUS_NONE = 0,
|
|
+ PCE_STATUS_ACQUIRED = 1,
|
|
+ PCE_STATUS_ENABLED = 2,
|
|
+ PCE_STATUS_ERROR = 3,
|
|
+};
|
|
+
|
|
+struct pm_clock_entry {
|
|
+ struct list_head node;
|
|
+ char *con_id;
|
|
+ struct clk *clk;
|
|
+ enum pce_status status;
|
|
+};
|
|
+
|
|
+struct firmware_fallback_config {
|
|
+ unsigned int force_sysfs_fallback;
|
|
+ unsigned int ignore_sysfs_fallback;
|
|
+ int old_timeout;
|
|
+ int loading_timeout;
|
|
+};
|
|
+
|
|
+enum fw_opt {
|
|
+ FW_OPT_UEVENT = 1,
|
|
+ FW_OPT_NOWAIT = 2,
|
|
+ FW_OPT_USERHELPER = 4,
|
|
+ FW_OPT_NO_WARN = 8,
|
|
+ FW_OPT_NOCACHE = 16,
|
|
+ FW_OPT_NOFALLBACK = 32,
|
|
+};
|
|
+
|
|
+enum fw_status {
|
|
+ FW_STATUS_UNKNOWN = 0,
|
|
+ FW_STATUS_LOADING = 1,
|
|
+ FW_STATUS_DONE = 2,
|
|
+ FW_STATUS_ABORTED = 3,
|
|
+};
|
|
+
|
|
+struct fw_state {
|
|
+ struct completion completion;
|
|
+ enum fw_status status;
|
|
+};
|
|
+
|
|
+struct firmware_cache;
|
|
+
|
|
+struct fw_priv {
|
|
+ struct kref ref;
|
|
+ struct list_head list;
|
|
+ struct firmware_cache *fwc;
|
|
+ struct fw_state fw_st;
|
|
+ void *data;
|
|
+ size_t size;
|
|
+ size_t allocated_size;
|
|
+ bool is_paged_buf;
|
|
+ bool need_uevent;
|
|
+ struct page **pages;
|
|
+ int nr_pages;
|
|
+ int page_array_size;
|
|
+ struct list_head pending_list;
|
|
+ const char *fw_name;
|
|
+};
|
|
+
|
|
+struct firmware_cache {
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+ int state;
|
|
+ spinlock_t name_lock;
|
|
+ struct list_head fw_names;
|
|
+ struct delayed_work work;
|
|
+ struct notifier_block pm_notify;
|
|
+};
|
|
+
|
|
+struct fw_cache_entry {
|
|
+ struct list_head list;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct fw_name_devm {
|
|
+ long unsigned int magic;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct firmware_work {
|
|
+ struct work_struct work;
|
|
+ struct module *module;
|
|
+ const char *name;
|
|
+ struct device *device;
|
|
+ void *context;
|
|
+ void (*cont)(const struct firmware *, void *);
|
|
+ enum fw_opt opt_flags;
|
|
+};
|
|
+
|
|
+struct fw_sysfs {
|
|
+ bool nowait;
|
|
+ struct device dev;
|
|
+ struct fw_priv *fw_priv;
|
|
+ struct firmware *fw;
|
|
+};
|
|
+
|
|
+typedef void (*node_registration_func_t)(struct node *);
|
|
+
|
|
+typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
|
|
+
|
|
+struct node_access_nodes {
|
|
+ struct device dev;
|
|
+ struct list_head list_node;
|
|
+ unsigned int access;
|
|
+ struct node_hmem_attrs hmem_attrs;
|
|
+};
|
|
+
|
|
+struct node_cache_info {
|
|
+ struct device dev;
|
|
+ struct list_head node;
|
|
+ struct node_cache_attrs cache_attrs;
|
|
+};
|
|
+
|
|
+struct node_attr {
|
|
+ struct device_attribute attr;
|
|
+ enum node_states state;
|
|
+};
|
|
+
|
|
+struct for_each_memory_block_cb_data {
|
|
+ walk_memory_blocks_func_t func;
|
|
+ void *arg;
|
|
+};
|
|
+
|
|
+enum regcache_type {
|
|
+ REGCACHE_NONE = 0,
|
|
+ REGCACHE_RBTREE = 1,
|
|
+ REGCACHE_COMPRESSED = 2,
|
|
+ REGCACHE_FLAT = 3,
|
|
+};
|
|
+
|
|
+struct reg_default {
|
|
+ unsigned int reg;
|
|
+ unsigned int def;
|
|
+};
|
|
+
|
|
+struct reg_sequence {
|
|
+ unsigned int reg;
|
|
+ unsigned int def;
|
|
+ unsigned int delay_us;
|
|
+};
|
|
+
|
|
+enum regmap_endian {
|
|
+ REGMAP_ENDIAN_DEFAULT = 0,
|
|
+ REGMAP_ENDIAN_BIG = 1,
|
|
+ REGMAP_ENDIAN_LITTLE = 2,
|
|
+ REGMAP_ENDIAN_NATIVE = 3,
|
|
+};
|
|
+
|
|
+struct regmap_range {
|
|
+ unsigned int range_min;
|
|
+ unsigned int range_max;
|
|
+};
|
|
+
|
|
+struct regmap_access_table {
|
|
+ const struct regmap_range *yes_ranges;
|
|
+ unsigned int n_yes_ranges;
|
|
+ const struct regmap_range *no_ranges;
|
|
+ unsigned int n_no_ranges;
|
|
+};
|
|
+
|
|
+typedef void (*regmap_lock)(void *);
|
|
+
|
|
+typedef void (*regmap_unlock)(void *);
|
|
+
|
|
+struct regmap_range_cfg;
|
|
+
|
|
+struct regmap_config {
|
|
+ const char *name;
|
|
+ int reg_bits;
|
|
+ int reg_stride;
|
|
+ int pad_bits;
|
|
+ int val_bits;
|
|
+ bool (*writeable_reg)(struct device *, unsigned int);
|
|
+ bool (*readable_reg)(struct device *, unsigned int);
|
|
+ bool (*volatile_reg)(struct device *, unsigned int);
|
|
+ bool (*precious_reg)(struct device *, unsigned int);
|
|
+ bool (*readable_noinc_reg)(struct device *, unsigned int);
|
|
+ bool disable_locking;
|
|
+ regmap_lock lock;
|
|
+ regmap_unlock unlock;
|
|
+ void *lock_arg;
|
|
+ int (*reg_read)(void *, unsigned int, unsigned int *);
|
|
+ int (*reg_write)(void *, unsigned int, unsigned int);
|
|
+ bool fast_io;
|
|
+ unsigned int max_register;
|
|
+ const struct regmap_access_table *wr_table;
|
|
+ const struct regmap_access_table *rd_table;
|
|
+ const struct regmap_access_table *volatile_table;
|
|
+ const struct regmap_access_table *precious_table;
|
|
+ const struct regmap_access_table *rd_noinc_table;
|
|
+ const struct reg_default *reg_defaults;
|
|
+ unsigned int num_reg_defaults;
|
|
+ enum regcache_type cache_type;
|
|
+ const void *reg_defaults_raw;
|
|
+ unsigned int num_reg_defaults_raw;
|
|
+ long unsigned int read_flag_mask;
|
|
+ long unsigned int write_flag_mask;
|
|
+ bool zero_flag_mask;
|
|
+ bool use_single_rw;
|
|
+ bool can_multi_write;
|
|
+ enum regmap_endian reg_format_endian;
|
|
+ enum regmap_endian val_format_endian;
|
|
+ const struct regmap_range_cfg *ranges;
|
|
+ unsigned int num_ranges;
|
|
+ bool use_hwlock;
|
|
+ unsigned int hwlock_id;
|
|
+ unsigned int hwlock_mode;
|
|
+};
|
|
+
|
|
+struct regmap_range_cfg {
|
|
+ const char *name;
|
|
+ unsigned int range_min;
|
|
+ unsigned int range_max;
|
|
+ unsigned int selector_reg;
|
|
+ unsigned int selector_mask;
|
|
+ int selector_shift;
|
|
+ unsigned int window_start;
|
|
+ unsigned int window_len;
|
|
+};
|
|
+
|
|
+typedef int (*regmap_hw_write)(void *, const void *, size_t);
|
|
+
|
|
+typedef int (*regmap_hw_gather_write)(void *, const void *, size_t, const void *, size_t);
|
|
+
|
|
+struct regmap_async;
|
|
+
|
|
+typedef int (*regmap_hw_async_write)(void *, const void *, size_t, const void *, size_t, struct regmap_async *);
|
|
+
|
|
+struct regmap___2;
|
|
+
|
|
+struct regmap_async {
|
|
+ struct list_head list;
|
|
+ struct regmap___2 *map;
|
|
+ void *work_buf;
|
|
+};
|
|
+
|
|
+typedef int (*regmap_hw_read)(void *, const void *, size_t, void *, size_t);
|
|
+
|
|
+typedef int (*regmap_hw_reg_read)(void *, unsigned int, unsigned int *);
|
|
+
|
|
+typedef int (*regmap_hw_reg_write)(void *, unsigned int, unsigned int);
|
|
+
|
|
+typedef int (*regmap_hw_reg_update_bits)(void *, unsigned int, unsigned int, unsigned int);
|
|
+
|
|
+typedef struct regmap_async * (*regmap_hw_async_alloc)();
|
|
+
|
|
+typedef void (*regmap_hw_free_context)(void *);
|
|
+
|
|
+struct regmap_bus {
|
|
+ bool fast_io;
|
|
+ regmap_hw_write write;
|
|
+ regmap_hw_gather_write gather_write;
|
|
+ regmap_hw_async_write async_write;
|
|
+ regmap_hw_reg_write reg_write;
|
|
+ regmap_hw_reg_update_bits reg_update_bits;
|
|
+ regmap_hw_read read;
|
|
+ regmap_hw_reg_read reg_read;
|
|
+ regmap_hw_free_context free_context;
|
|
+ regmap_hw_async_alloc async_alloc;
|
|
+ u8 read_flag_mask;
|
|
+ enum regmap_endian reg_format_endian_default;
|
|
+ enum regmap_endian val_format_endian_default;
|
|
+ size_t max_raw_read;
|
|
+ size_t max_raw_write;
|
|
+};
|
|
+
|
|
+struct reg_field {
|
|
+ unsigned int reg;
|
|
+ unsigned int lsb;
|
|
+ unsigned int msb;
|
|
+ unsigned int id_size;
|
|
+ unsigned int id_offset;
|
|
+};
|
|
+
|
|
+struct regmap_format {
|
|
+ size_t buf_size;
|
|
+ size_t reg_bytes;
|
|
+ size_t pad_bytes;
|
|
+ size_t val_bytes;
|
|
+ void (*format_write)(struct regmap___2 *, unsigned int, unsigned int);
|
|
+ void (*format_reg)(void *, unsigned int, unsigned int);
|
|
+ void (*format_val)(void *, unsigned int, unsigned int);
|
|
+ unsigned int (*parse_val)(const void *);
|
|
+ void (*parse_inplace)(void *);
|
|
+};
|
|
+
|
|
+struct hwspinlock;
|
|
+
|
|
+struct regcache_ops;
|
|
+
|
|
+struct regmap___2 {
|
|
+ union {
|
|
+ struct mutex mutex;
|
|
+ struct {
|
|
+ spinlock_t spinlock;
|
|
+ long unsigned int spinlock_flags;
|
|
+ };
|
|
+ };
|
|
+ regmap_lock lock;
|
|
+ regmap_unlock unlock;
|
|
+ void *lock_arg;
|
|
+ gfp_t alloc_flags;
|
|
+ struct device *dev;
|
|
+ void *work_buf;
|
|
+ struct regmap_format format;
|
|
+ const struct regmap_bus *bus;
|
|
+ void *bus_context;
|
|
+ const char *name;
|
|
+ bool async;
|
|
+ spinlock_t async_lock;
|
|
+ wait_queue_head_t async_waitq;
|
|
+ struct list_head async_list;
|
|
+ struct list_head async_free;
|
|
+ int async_ret;
|
|
+ bool debugfs_disable;
|
|
+ struct dentry *debugfs;
|
|
+ const char *debugfs_name;
|
|
+ unsigned int debugfs_reg_len;
|
|
+ unsigned int debugfs_val_len;
|
|
+ unsigned int debugfs_tot_len;
|
|
+ struct list_head debugfs_off_cache;
|
|
+ struct mutex cache_lock;
|
|
+ unsigned int max_register;
|
|
+ bool (*writeable_reg)(struct device *, unsigned int);
|
|
+ bool (*readable_reg)(struct device *, unsigned int);
|
|
+ bool (*volatile_reg)(struct device *, unsigned int);
|
|
+ bool (*precious_reg)(struct device *, unsigned int);
|
|
+ bool (*readable_noinc_reg)(struct device *, unsigned int);
|
|
+ const struct regmap_access_table *wr_table;
|
|
+ const struct regmap_access_table *rd_table;
|
|
+ const struct regmap_access_table *volatile_table;
|
|
+ const struct regmap_access_table *precious_table;
|
|
+ const struct regmap_access_table *rd_noinc_table;
|
|
+ int (*reg_read)(void *, unsigned int, unsigned int *);
|
|
+ int (*reg_write)(void *, unsigned int, unsigned int);
|
|
+ int (*reg_update_bits)(void *, unsigned int, unsigned int, unsigned int);
|
|
+ bool defer_caching;
|
|
+ long unsigned int read_flag_mask;
|
|
+ long unsigned int write_flag_mask;
|
|
+ int reg_shift;
|
|
+ int reg_stride;
|
|
+ int reg_stride_order;
|
|
+ const struct regcache_ops *cache_ops;
|
|
+ enum regcache_type cache_type;
|
|
+ unsigned int cache_size_raw;
|
|
+ unsigned int cache_word_size;
|
|
+ unsigned int num_reg_defaults;
|
|
+ unsigned int num_reg_defaults_raw;
|
|
+ bool cache_only;
|
|
+ bool cache_bypass;
|
|
+ bool cache_free;
|
|
+ struct reg_default *reg_defaults;
|
|
+ const void *reg_defaults_raw;
|
|
+ void *cache;
|
|
+ bool cache_dirty;
|
|
+ bool no_sync_defaults;
|
|
+ struct reg_sequence *patch;
|
|
+ int patch_regs;
|
|
+ bool use_single_read;
|
|
+ bool use_single_write;
|
|
+ bool can_multi_write;
|
|
+ size_t max_raw_read;
|
|
+ size_t max_raw_write;
|
|
+ struct rb_root range_tree;
|
|
+ void *selector_work_buf;
|
|
+ struct hwspinlock *hwlock;
|
|
+};
|
|
+
|
|
+struct regcache_ops {
|
|
+ const char *name;
|
|
+ enum regcache_type type;
|
|
+ int (*init)(struct regmap___2 *);
|
|
+ int (*exit)(struct regmap___2 *);
|
|
+ void (*debugfs_init)(struct regmap___2 *);
|
|
+ int (*read)(struct regmap___2 *, unsigned int, unsigned int *);
|
|
+ int (*write)(struct regmap___2 *, unsigned int, unsigned int);
|
|
+ int (*sync)(struct regmap___2 *, unsigned int, unsigned int);
|
|
+ int (*drop)(struct regmap___2 *, unsigned int, unsigned int);
|
|
+};
|
|
+
|
|
+struct regmap_range_node {
|
|
+ struct rb_node node;
|
|
+ const char *name;
|
|
+ struct regmap___2 *map;
|
|
+ unsigned int range_min;
|
|
+ unsigned int range_max;
|
|
+ unsigned int selector_reg;
|
|
+ unsigned int selector_mask;
|
|
+ int selector_shift;
|
|
+ unsigned int window_start;
|
|
+ unsigned int window_len;
|
|
+};
|
|
+
|
|
+struct regmap_field {
|
|
+ struct regmap___2 *regmap;
|
|
+ unsigned int mask;
|
|
+ unsigned int shift;
|
|
+ unsigned int reg;
|
|
+ unsigned int id_size;
|
|
+ unsigned int id_offset;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_regmap_reg {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ unsigned int reg;
|
|
+ unsigned int val;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_regmap_block {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ unsigned int reg;
|
|
+ int count;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_regcache_sync {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ u32 __data_loc_status;
|
|
+ u32 __data_loc_type;
|
|
+ int type;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_regmap_bool {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ int flag;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_regmap_async {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_regcache_drop_region {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ unsigned int from;
|
|
+ unsigned int to;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_regmap_reg {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_regmap_block {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_regcache_sync {
|
|
+ u32 name;
|
|
+ u32 status;
|
|
+ u32 type;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_regmap_bool {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_regmap_async {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_regcache_drop_region {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct regcache_rbtree_node {
|
|
+ void *block;
|
|
+ long int *cache_present;
|
|
+ unsigned int base_reg;
|
|
+ unsigned int blklen;
|
|
+ struct rb_node node;
|
|
+};
|
|
+
|
|
+struct regcache_rbtree_ctx {
|
|
+ struct rb_root root;
|
|
+ struct regcache_rbtree_node *cached_rbnode;
|
|
+};
|
|
+
|
|
+struct regmap_debugfs_off_cache {
|
|
+ struct list_head list;
|
|
+ off_t min;
|
|
+ off_t max;
|
|
+ unsigned int base_reg;
|
|
+ unsigned int max_reg;
|
|
+};
|
|
+
|
|
+struct regmap_debugfs_node {
|
|
+ struct regmap___2 *map;
|
|
+ const char *name;
|
|
+ struct list_head link;
|
|
+};
|
|
+
|
|
+struct i2c_msg {
|
|
+ __u16 addr;
|
|
+ __u16 flags;
|
|
+ __u16 len;
|
|
+ __u8 *buf;
|
|
+};
|
|
+
|
|
+union i2c_smbus_data {
|
|
+ __u8 byte;
|
|
+ __u16 word;
|
|
+ __u8 block[34];
|
|
+};
|
|
+
|
|
+struct i2c_adapter;
|
|
+
|
|
+struct i2c_client {
|
|
+ short unsigned int flags;
|
|
+ short unsigned int addr;
|
|
+ char name[20];
|
|
+ struct i2c_adapter *adapter;
|
|
+ struct device dev;
|
|
+ int init_irq;
|
|
+ int irq;
|
|
+ struct list_head detected;
|
|
+};
|
|
+
|
|
+struct i2c_algorithm;
|
|
+
|
|
+struct i2c_lock_operations;
|
|
+
|
|
+struct i2c_bus_recovery_info;
|
|
+
|
|
+struct i2c_adapter_quirks;
|
|
+
|
|
+struct i2c_adapter {
|
|
+ struct module *owner;
|
|
+ unsigned int class;
|
|
+ const struct i2c_algorithm *algo;
|
|
+ void *algo_data;
|
|
+ const struct i2c_lock_operations *lock_ops;
|
|
+ struct rt_mutex bus_lock;
|
|
+ struct rt_mutex mux_lock;
|
|
+ int timeout;
|
|
+ int retries;
|
|
+ struct device dev;
|
|
+ int nr;
|
|
+ char name[48];
|
|
+ struct completion dev_released;
|
|
+ struct mutex userspace_clients_lock;
|
|
+ struct list_head userspace_clients;
|
|
+ struct i2c_bus_recovery_info *bus_recovery_info;
|
|
+ const struct i2c_adapter_quirks *quirks;
|
|
+ struct irq_domain *host_notify_domain;
|
|
+};
|
|
+
|
|
+struct i2c_algorithm {
|
|
+ int (*master_xfer)(struct i2c_adapter *, struct i2c_msg *, int);
|
|
+ int (*smbus_xfer)(struct i2c_adapter *, u16, short unsigned int, char, u8, int, union i2c_smbus_data *);
|
|
+ u32 (*functionality)(struct i2c_adapter *);
|
|
+};
|
|
+
|
|
+struct i2c_lock_operations {
|
|
+ void (*lock_bus)(struct i2c_adapter *, unsigned int);
|
|
+ int (*trylock_bus)(struct i2c_adapter *, unsigned int);
|
|
+ void (*unlock_bus)(struct i2c_adapter *, unsigned int);
|
|
+};
|
|
+
|
|
+struct i2c_bus_recovery_info {
|
|
+ int (*recover_bus)(struct i2c_adapter *);
|
|
+ int (*get_scl)(struct i2c_adapter *);
|
|
+ void (*set_scl)(struct i2c_adapter *, int);
|
|
+ int (*get_sda)(struct i2c_adapter *);
|
|
+ void (*set_sda)(struct i2c_adapter *, int);
|
|
+ int (*get_bus_free)(struct i2c_adapter *);
|
|
+ void (*prepare_recovery)(struct i2c_adapter *);
|
|
+ void (*unprepare_recovery)(struct i2c_adapter *);
|
|
+ struct gpio_desc___2 *scl_gpiod;
|
|
+ struct gpio_desc___2 *sda_gpiod;
|
|
+};
|
|
+
|
|
+struct i2c_adapter_quirks {
|
|
+ u64 flags;
|
|
+ int max_num_msgs;
|
|
+ u16 max_write_len;
|
|
+ u16 max_read_len;
|
|
+ u16 max_comb_1st_msg_len;
|
|
+ u16 max_comb_2nd_msg_len;
|
|
+};
|
|
+
|
|
+struct spi_statistics {
|
|
+ spinlock_t lock;
|
|
+ long unsigned int messages;
|
|
+ long unsigned int transfers;
|
|
+ long unsigned int errors;
|
|
+ long unsigned int timedout;
|
|
+ long unsigned int spi_sync;
|
|
+ long unsigned int spi_sync_immediate;
|
|
+ long unsigned int spi_async;
|
|
+ long long unsigned int bytes;
|
|
+ long long unsigned int bytes_rx;
|
|
+ long long unsigned int bytes_tx;
|
|
+ long unsigned int transfer_bytes_histo[17];
|
|
+ long unsigned int transfers_split_maxsize;
|
|
+};
|
|
+
|
|
+struct spi_controller;
|
|
+
|
|
+struct spi_device {
|
|
+ struct device dev;
|
|
+ struct spi_controller *controller;
|
|
+ struct spi_controller *master;
|
|
+ u32 max_speed_hz;
|
|
+ u8 chip_select;
|
|
+ u8 bits_per_word;
|
|
+ u16 mode;
|
|
+ int irq;
|
|
+ void *controller_state;
|
|
+ void *controller_data;
|
|
+ char modalias[32];
|
|
+ int cs_gpio;
|
|
+ struct spi_statistics statistics;
|
|
+};
|
|
+
|
|
+struct spi_message;
|
|
+
|
|
+struct spi_transfer;
|
|
+
|
|
+struct spi_controller_mem_ops;
|
|
+
|
|
+struct spi_controller {
|
|
+ struct device dev;
|
|
+ struct list_head list;
|
|
+ s16 bus_num;
|
|
+ u16 num_chipselect;
|
|
+ u16 dma_alignment;
|
|
+ u16 mode_bits;
|
|
+ u32 bits_per_word_mask;
|
|
+ u32 min_speed_hz;
|
|
+ u32 max_speed_hz;
|
|
+ u16 flags;
|
|
+ bool slave;
|
|
+ size_t (*max_transfer_size)(struct spi_device *);
|
|
+ size_t (*max_message_size)(struct spi_device *);
|
|
+ struct mutex io_mutex;
|
|
+ spinlock_t bus_lock_spinlock;
|
|
+ struct mutex bus_lock_mutex;
|
|
+ bool bus_lock_flag;
|
|
+ int (*setup)(struct spi_device *);
|
|
+ int (*transfer)(struct spi_device *, struct spi_message *);
|
|
+ void (*cleanup)(struct spi_device *);
|
|
+ bool (*can_dma)(struct spi_controller *, struct spi_device *, struct spi_transfer *);
|
|
+ bool queued;
|
|
+ struct kthread_worker kworker;
|
|
+ struct task_struct *kworker_task;
|
|
+ struct kthread_work pump_messages;
|
|
+ spinlock_t queue_lock;
|
|
+ struct list_head queue;
|
|
+ struct spi_message *cur_msg;
|
|
+ bool idling;
|
|
+ bool busy;
|
|
+ bool running;
|
|
+ bool rt;
|
|
+ bool auto_runtime_pm;
|
|
+ bool cur_msg_prepared;
|
|
+ bool cur_msg_mapped;
|
|
+ struct completion xfer_completion;
|
|
+ size_t max_dma_len;
|
|
+ int (*prepare_transfer_hardware)(struct spi_controller *);
|
|
+ int (*transfer_one_message)(struct spi_controller *, struct spi_message *);
|
|
+ int (*unprepare_transfer_hardware)(struct spi_controller *);
|
|
+ int (*prepare_message)(struct spi_controller *, struct spi_message *);
|
|
+ int (*unprepare_message)(struct spi_controller *, struct spi_message *);
|
|
+ int (*slave_abort)(struct spi_controller *);
|
|
+ void (*set_cs)(struct spi_device *, bool);
|
|
+ int (*transfer_one)(struct spi_controller *, struct spi_device *, struct spi_transfer *);
|
|
+ void (*handle_err)(struct spi_controller *, struct spi_message *);
|
|
+ const struct spi_controller_mem_ops *mem_ops;
|
|
+ int *cs_gpios;
|
|
+ struct spi_statistics statistics;
|
|
+ struct dma_chan___2 *dma_tx;
|
|
+ struct dma_chan___2 *dma_rx;
|
|
+ void *dummy_rx;
|
|
+ void *dummy_tx;
|
|
+ int (*fw_translate_cs)(struct spi_controller *, unsigned int);
|
|
+};
|
|
+
|
|
+struct spi_message {
|
|
+ struct list_head transfers;
|
|
+ struct spi_device *spi;
|
|
+ unsigned int is_dma_mapped: 1;
|
|
+ void (*complete)(void *);
|
|
+ void *context;
|
|
+ unsigned int frame_length;
|
|
+ unsigned int actual_length;
|
|
+ int status;
|
|
+ struct list_head queue;
|
|
+ void *state;
|
|
+ struct list_head resources;
|
|
+};
|
|
+
|
|
+struct spi_transfer {
|
|
+ const void *tx_buf;
|
|
+ void *rx_buf;
|
|
+ unsigned int len;
|
|
+ dma_addr_t tx_dma;
|
|
+ dma_addr_t rx_dma;
|
|
+ struct sg_table tx_sg;
|
|
+ struct sg_table rx_sg;
|
|
+ unsigned int cs_change: 1;
|
|
+ unsigned int tx_nbits: 3;
|
|
+ unsigned int rx_nbits: 3;
|
|
+ u8 bits_per_word;
|
|
+ u16 delay_usecs;
|
|
+ u32 speed_hz;
|
|
+ struct list_head transfer_list;
|
|
+};
|
|
+
|
|
+struct spi_mem;
|
|
+
|
|
+struct spi_mem_op;
|
|
+
|
|
+struct spi_controller_mem_ops {
|
|
+ int (*adjust_op_size)(struct spi_mem *, struct spi_mem_op *);
|
|
+ bool (*supports_op)(struct spi_mem *, const struct spi_mem_op *);
|
|
+ int (*exec_op)(struct spi_mem *, const struct spi_mem_op *);
|
|
+ const char * (*get_name)(struct spi_mem *);
|
|
+};
|
|
+
|
|
+struct regmap_async_spi {
|
|
+ struct regmap_async core;
|
|
+ struct spi_message m;
|
|
+ struct spi_transfer t[2];
|
|
+};
|
|
+
|
|
+struct devcd_entry {
|
|
+ struct device devcd_dev;
|
|
+ void *data;
|
|
+ size_t datalen;
|
|
+ struct module *owner;
|
|
+ ssize_t (*read)(char *, loff_t, size_t, void *, size_t);
|
|
+ void (*free)(void *);
|
|
+ struct delayed_work del_wk;
|
|
+ struct device *failing_dev;
|
|
+};
|
|
+
|
|
+typedef void (*irq_write_msi_msg_t)(struct msi_desc *, struct msi_msg *);
|
|
+
|
|
+struct platform_msi_priv_data {
|
|
+ struct device *dev;
|
|
+ void *host_data;
|
|
+ msi_alloc_info_t arg;
|
|
+ irq_write_msi_msg_t write_msg;
|
|
+ int devid;
|
|
+};
|
|
+
|
|
+struct test_struct {
|
|
+ char *get;
|
|
+ char *put;
|
|
+ void (*get_handler)(char *);
|
|
+ int (*put_handler)(char *, char *);
|
|
+};
|
|
+
|
|
+struct test_state {
|
|
+ char *name;
|
|
+ struct test_struct *tst;
|
|
+ int idx;
|
|
+ int (*run_test)(int, int);
|
|
+ int (*validate_put)(char *);
|
|
+};
|
|
+
|
|
+struct mfd_cell_acpi_match;
|
|
+
|
|
+struct mfd_cell {
|
|
+ const char *name;
|
|
+ int id;
|
|
+ atomic_t *usage_count;
|
|
+ int (*enable)(struct platform_device *);
|
|
+ int (*disable)(struct platform_device *);
|
|
+ int (*suspend)(struct platform_device *);
|
|
+ int (*resume)(struct platform_device *);
|
|
+ void *platform_data;
|
|
+ size_t pdata_size;
|
|
+ struct property_entry *properties;
|
|
+ const char *of_compatible;
|
|
+ const struct mfd_cell_acpi_match *acpi_match;
|
|
+ int num_resources;
|
|
+ const struct resource *resources;
|
|
+ bool ignore_resource_conflicts;
|
|
+ bool pm_runtime_no_callbacks;
|
|
+ const char * const *parent_supplies;
|
|
+ int num_parent_supplies;
|
|
+};
|
|
+
|
|
+struct mfd_cell_acpi_match {
|
|
+ const char *pnpid;
|
|
+ const long long unsigned int adr;
|
|
+};
|
|
+
|
|
+struct intel_lpss_platform_info {
|
|
+ struct resource *mem;
|
|
+ int irq;
|
|
+ long unsigned int clk_rate;
|
|
+ const char *clk_con_id;
|
|
+ struct property_entry *properties;
|
|
+};
|
|
+
|
|
+enum intel_lpss_dev_type {
|
|
+ LPSS_DEV_I2C = 0,
|
|
+ LPSS_DEV_UART = 1,
|
|
+ LPSS_DEV_SPI = 2,
|
|
+};
|
|
+
|
|
+struct intel_lpss {
|
|
+ const struct intel_lpss_platform_info *info;
|
|
+ enum intel_lpss_dev_type type;
|
|
+ struct clk *clk;
|
|
+ struct clk_lookup *clock;
|
|
+ struct mfd_cell *cell;
|
|
+ struct device *dev;
|
|
+ void *priv;
|
|
+ u32 priv_ctx[64];
|
|
+ int devid;
|
|
+ u32 caps;
|
|
+ u32 active_ltr;
|
|
+ u32 idle_ltr;
|
|
+ struct dentry *debugfs;
|
|
+};
|
|
+
|
|
+struct dax_device___2;
|
|
+
|
|
+struct dax_operations {
|
|
+ long int (*direct_access)(struct dax_device___2 *, long unsigned int, long int, void **, pfn_t *);
|
|
+ size_t (*copy_from_iter)(struct dax_device___2 *, long unsigned int, void *, size_t, struct iov_iter *);
|
|
+ size_t (*copy_to_iter)(struct dax_device___2 *, long unsigned int, void *, size_t, struct iov_iter *);
|
|
+};
|
|
+
|
|
+struct dax_device___2 {
|
|
+ struct hlist_node list;
|
|
+ struct inode inode;
|
|
+ struct cdev cdev;
|
|
+ const char *host;
|
|
+ void *private;
|
|
+ long unsigned int flags;
|
|
+ const struct dax_operations *ops;
|
|
+};
|
|
+
|
|
+enum dax_device_flags {
|
|
+ DAXDEV_ALIVE = 0,
|
|
+ DAXDEV_WRITE_CACHE = 1,
|
|
+};
|
|
+
|
|
+struct dax_region {
|
|
+ int id;
|
|
+ int target_node;
|
|
+ struct kref kref;
|
|
+ struct device *dev;
|
|
+ unsigned int align;
|
|
+ struct resource res;
|
|
+ long unsigned int pfn_flags;
|
|
+};
|
|
+
|
|
+struct dev_dax {
|
|
+ struct dax_region *region;
|
|
+ struct dax_device *dax_dev;
|
|
+ int target_node;
|
|
+ struct device dev;
|
|
+ struct dev_pagemap pgmap;
|
|
+ struct percpu_ref ref;
|
|
+ struct completion cmp;
|
|
+};
|
|
+
|
|
+enum dev_dax_subsys {
|
|
+ DEV_DAX_BUS = 0,
|
|
+ DEV_DAX_CLASS = 1,
|
|
+};
|
|
+
|
|
+struct dax_device_driver {
|
|
+ struct device_driver drv;
|
|
+ struct list_head ids;
|
|
+ int match_always;
|
|
+};
|
|
+
|
|
+struct dax_id {
|
|
+ struct list_head list;
|
|
+ char dev_name[30];
|
|
+};
|
|
+
|
|
+enum id_action {
|
|
+ ID_REMOVE = 0,
|
|
+ ID_ADD = 1,
|
|
+};
|
|
+
|
|
+struct dma_fence_ops;
|
|
+
|
|
+struct dma_fence {
|
|
+ struct kref refcount;
|
|
+ const struct dma_fence_ops *ops;
|
|
+ struct callback_head rcu;
|
|
+ struct list_head cb_list;
|
|
+ spinlock_t *lock;
|
|
+ u64 context;
|
|
+ unsigned int seqno;
|
|
+ long unsigned int flags;
|
|
+ ktime_t timestamp;
|
|
+ int error;
|
|
+};
|
|
+
|
|
+struct dma_fence_ops {
|
|
+ const char * (*get_driver_name)(struct dma_fence *);
|
|
+ const char * (*get_timeline_name)(struct dma_fence *);
|
|
+ bool (*enable_signaling)(struct dma_fence *);
|
|
+ bool (*signaled)(struct dma_fence *);
|
|
+ long int (*wait)(struct dma_fence *, bool, long int);
|
|
+ void (*release)(struct dma_fence *);
|
|
+ void (*fence_value_str)(struct dma_fence *, char *, int);
|
|
+ void (*timeline_value_str)(struct dma_fence *, char *, int);
|
|
+};
|
|
+
|
|
+enum dma_fence_flag_bits {
|
|
+ DMA_FENCE_FLAG_SIGNALED_BIT = 0,
|
|
+ DMA_FENCE_FLAG_TIMESTAMP_BIT = 1,
|
|
+ DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT = 2,
|
|
+ DMA_FENCE_FLAG_USER_BITS = 3,
|
|
+};
|
|
+
|
|
+struct dma_fence_cb;
|
|
+
|
|
+typedef void (*dma_fence_func_t)(struct dma_fence *, struct dma_fence_cb *);
|
|
+
|
|
+struct dma_fence_cb {
|
|
+ struct list_head node;
|
|
+ dma_fence_func_t func;
|
|
+};
|
|
+
|
|
+struct dma_buf;
|
|
+
|
|
+struct dma_buf_attachment;
|
|
+
|
|
+struct dma_buf_ops {
|
|
+ int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
|
|
+ void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
|
|
+ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, enum dma_data_direction);
|
|
+ void (*unmap_dma_buf)(struct dma_buf_attachment *, struct sg_table *, enum dma_data_direction);
|
|
+ void (*release)(struct dma_buf *);
|
|
+ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
|
+ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
|
|
+ void * (*map)(struct dma_buf *, long unsigned int);
|
|
+ void (*unmap)(struct dma_buf *, long unsigned int, void *);
|
|
+ int (*mmap)(struct dma_buf *, struct vm_area_struct *);
|
|
+ void * (*vmap)(struct dma_buf *);
|
|
+ void (*vunmap)(struct dma_buf *, void *);
|
|
+};
|
|
+
|
|
+struct dma_buf_poll_cb_t {
|
|
+ struct dma_fence_cb cb;
|
|
+ wait_queue_head_t *poll;
|
|
+ __poll_t active;
|
|
+};
|
|
+
|
|
+struct reservation_object;
|
|
+
|
|
+struct dma_buf {
|
|
+ size_t size;
|
|
+ struct file *file;
|
|
+ struct list_head attachments;
|
|
+ const struct dma_buf_ops *ops;
|
|
+ struct mutex lock;
|
|
+ unsigned int vmapping_counter;
|
|
+ void *vmap_ptr;
|
|
+ const char *exp_name;
|
|
+ struct module *owner;
|
|
+ struct list_head list_node;
|
|
+ void *priv;
|
|
+ struct reservation_object *resv;
|
|
+ wait_queue_head_t poll;
|
|
+ struct dma_buf_poll_cb_t cb_excl;
|
|
+ struct dma_buf_poll_cb_t cb_shared;
|
|
+};
|
|
+
|
|
+struct dma_buf_attachment {
|
|
+ struct dma_buf *dmabuf;
|
|
+ struct device *dev;
|
|
+ struct list_head node;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct reservation_object_list;
|
|
+
|
|
+struct reservation_object {
|
|
+ struct ww_mutex lock;
|
|
+ seqcount_t seq;
|
|
+ struct dma_fence *fence_excl;
|
|
+ struct reservation_object_list *fence;
|
|
+ struct reservation_object_list *staged;
|
|
+};
|
|
+
|
|
+struct dma_buf_export_info {
|
|
+ const char *exp_name;
|
|
+ struct module *owner;
|
|
+ const struct dma_buf_ops *ops;
|
|
+ size_t size;
|
|
+ int flags;
|
|
+ struct reservation_object *resv;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct ww_class {
|
|
+ atomic_long_t stamp;
|
|
+ struct lock_class_key acquire_key;
|
|
+ struct lock_class_key mutex_key;
|
|
+ const char *acquire_name;
|
|
+ const char *mutex_name;
|
|
+ unsigned int is_wait_die;
|
|
+};
|
|
+
|
|
+struct reservation_object_list {
|
|
+ struct callback_head rcu;
|
|
+ u32 shared_count;
|
|
+ u32 shared_max;
|
|
+ struct dma_fence *shared[0];
|
|
+};
|
|
+
|
|
+struct dma_buf_sync {
|
|
+ __u64 flags;
|
|
+};
|
|
+
|
|
+struct dma_buf_list {
|
|
+ struct list_head head;
|
|
+ struct mutex lock;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_dma_fence {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_driver;
|
|
+ u32 __data_loc_timeline;
|
|
+ unsigned int context;
|
|
+ unsigned int seqno;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_dma_fence {
|
|
+ u32 driver;
|
|
+ u32 timeline;
|
|
+};
|
|
+
|
|
+struct default_wait_cb {
|
|
+ struct dma_fence_cb base;
|
|
+ struct task_struct *task;
|
|
+};
|
|
+
|
|
+struct dma_fence_array;
|
|
+
|
|
+struct dma_fence_array_cb {
|
|
+ struct dma_fence_cb cb;
|
|
+ struct dma_fence_array *array;
|
|
+};
|
|
+
|
|
+struct dma_fence_array {
|
|
+ struct dma_fence base;
|
|
+ spinlock_t lock;
|
|
+ unsigned int num_fences;
|
|
+ atomic_t num_pending;
|
|
+ struct dma_fence **fences;
|
|
+ struct irq_work work;
|
|
+};
|
|
+
|
|
+enum seqno_fence_condition {
|
|
+ SEQNO_FENCE_WAIT_GEQUAL = 0,
|
|
+ SEQNO_FENCE_WAIT_NONZERO = 1,
|
|
+};
|
|
+
|
|
+struct seqno_fence {
|
|
+ struct dma_fence base;
|
|
+ const struct dma_fence_ops *ops;
|
|
+ struct dma_buf *sync_buf;
|
|
+ uint32_t seqno_ofs;
|
|
+ enum seqno_fence_condition condition;
|
|
+};
|
|
+
|
|
+struct sync_file {
|
|
+ struct file *file;
|
|
+ char user_name[32];
|
|
+ struct list_head sync_file_list;
|
|
+ wait_queue_head_t wq;
|
|
+ long unsigned int flags;
|
|
+ struct dma_fence *fence;
|
|
+ struct dma_fence_cb cb;
|
|
+};
|
|
+
|
|
+struct sync_merge_data {
|
|
+ char name[32];
|
|
+ __s32 fd2;
|
|
+ __s32 fence;
|
|
+ __u32 flags;
|
|
+ __u32 pad;
|
|
+};
|
|
+
|
|
+struct sync_fence_info {
|
|
+ char obj_name[32];
|
|
+ char driver_name[32];
|
|
+ __s32 status;
|
|
+ __u32 flags;
|
|
+ __u64 timestamp_ns;
|
|
+};
|
|
+
|
|
+struct sync_file_info {
|
|
+ char name[32];
|
|
+ __s32 status;
|
|
+ __u32 flags;
|
|
+ __u32 num_fences;
|
|
+ __u32 pad;
|
|
+ __u64 sync_fence_info;
|
|
+};
|
|
+
|
|
+struct scsi_sense_hdr {
|
|
+ u8 response_code;
|
|
+ u8 sense_key;
|
|
+ u8 asc;
|
|
+ u8 ascq;
|
|
+ u8 byte4;
|
|
+ u8 byte5;
|
|
+ u8 byte6;
|
|
+ u8 additional_length;
|
|
+};
|
|
+
|
|
+typedef __u64 blist_flags_t;
|
|
+
|
|
+enum scsi_device_state {
|
|
+ SDEV_CREATED = 1,
|
|
+ SDEV_RUNNING = 2,
|
|
+ SDEV_CANCEL = 3,
|
|
+ SDEV_DEL = 4,
|
|
+ SDEV_QUIESCE = 5,
|
|
+ SDEV_OFFLINE = 6,
|
|
+ SDEV_TRANSPORT_OFFLINE = 7,
|
|
+ SDEV_BLOCK = 8,
|
|
+ SDEV_CREATED_BLOCK = 9,
|
|
+};
|
|
+
|
|
+struct scsi_vpd {
|
|
+ struct callback_head rcu;
|
|
+ int len;
|
|
+ unsigned char data[0];
|
|
+};
|
|
+
|
|
+struct Scsi_Host;
|
|
+
|
|
+struct scsi_target;
|
|
+
|
|
+struct scsi_device_handler;
|
|
+
|
|
+struct scsi_device {
|
|
+ struct Scsi_Host *host;
|
|
+ struct request_queue *request_queue;
|
|
+ struct list_head siblings;
|
|
+ struct list_head same_target_siblings;
|
|
+ atomic_t device_busy;
|
|
+ atomic_t device_blocked;
|
|
+ spinlock_t list_lock;
|
|
+ struct list_head cmd_list;
|
|
+ struct list_head starved_entry;
|
|
+ short unsigned int queue_depth;
|
|
+ short unsigned int max_queue_depth;
|
|
+ short unsigned int last_queue_full_depth;
|
|
+ short unsigned int last_queue_full_count;
|
|
+ long unsigned int last_queue_full_time;
|
|
+ long unsigned int queue_ramp_up_period;
|
|
+ long unsigned int last_queue_ramp_up;
|
|
+ unsigned int id;
|
|
+ unsigned int channel;
|
|
+ u64 lun;
|
|
+ unsigned int manufacturer;
|
|
+ unsigned int sector_size;
|
|
+ void *hostdata;
|
|
+ unsigned char type;
|
|
+ char scsi_level;
|
|
+ char inq_periph_qual;
|
|
+ struct mutex inquiry_mutex;
|
|
+ unsigned char inquiry_len;
|
|
+ unsigned char *inquiry;
|
|
+ const char *vendor;
|
|
+ const char *model;
|
|
+ const char *rev;
|
|
+ struct scsi_vpd *vpd_pg83;
|
|
+ struct scsi_vpd *vpd_pg80;
|
|
+ unsigned char current_tag;
|
|
+ struct scsi_target *sdev_target;
|
|
+ blist_flags_t sdev_bflags;
|
|
+ unsigned int eh_timeout;
|
|
+ unsigned int removable: 1;
|
|
+ unsigned int changed: 1;
|
|
+ unsigned int busy: 1;
|
|
+ unsigned int lockable: 1;
|
|
+ unsigned int locked: 1;
|
|
+ unsigned int borken: 1;
|
|
+ unsigned int disconnect: 1;
|
|
+ unsigned int soft_reset: 1;
|
|
+ unsigned int sdtr: 1;
|
|
+ unsigned int wdtr: 1;
|
|
+ unsigned int ppr: 1;
|
|
+ unsigned int tagged_supported: 1;
|
|
+ unsigned int simple_tags: 1;
|
|
+ unsigned int was_reset: 1;
|
|
+ unsigned int expecting_cc_ua: 1;
|
|
+ unsigned int use_10_for_rw: 1;
|
|
+ unsigned int use_10_for_ms: 1;
|
|
+ unsigned int no_report_opcodes: 1;
|
|
+ unsigned int no_write_same: 1;
|
|
+ unsigned int use_16_for_rw: 1;
|
|
+ unsigned int skip_ms_page_8: 1;
|
|
+ unsigned int skip_ms_page_3f: 1;
|
|
+ unsigned int skip_vpd_pages: 1;
|
|
+ unsigned int try_vpd_pages: 1;
|
|
+ unsigned int use_192_bytes_for_3f: 1;
|
|
+ unsigned int no_start_on_add: 1;
|
|
+ unsigned int allow_restart: 1;
|
|
+ unsigned int manage_start_stop: 1;
|
|
+ unsigned int start_stop_pwr_cond: 1;
|
|
+ unsigned int no_uld_attach: 1;
|
|
+ unsigned int select_no_atn: 1;
|
|
+ unsigned int fix_capacity: 1;
|
|
+ unsigned int guess_capacity: 1;
|
|
+ unsigned int retry_hwerror: 1;
|
|
+ unsigned int last_sector_bug: 1;
|
|
+ unsigned int no_read_disc_info: 1;
|
|
+ unsigned int no_read_capacity_16: 1;
|
|
+ unsigned int try_rc_10_first: 1;
|
|
+ unsigned int security_supported: 1;
|
|
+ unsigned int is_visible: 1;
|
|
+ unsigned int wce_default_on: 1;
|
|
+ unsigned int no_dif: 1;
|
|
+ unsigned int broken_fua: 1;
|
|
+ unsigned int lun_in_cdb: 1;
|
|
+ unsigned int unmap_limit_for_ws: 1;
|
|
+ atomic_t disk_events_disable_depth;
|
|
+ long unsigned int supported_events[1];
|
|
+ long unsigned int pending_events[1];
|
|
+ struct list_head event_list;
|
|
+ struct work_struct event_work;
|
|
+ unsigned int max_device_blocked;
|
|
+ atomic_t iorequest_cnt;
|
|
+ atomic_t iodone_cnt;
|
|
+ atomic_t ioerr_cnt;
|
|
+ struct device sdev_gendev;
|
|
+ struct device sdev_dev;
|
|
+ struct execute_work ew;
|
|
+ struct work_struct requeue_work;
|
|
+ struct scsi_device_handler *handler;
|
|
+ void *handler_data;
|
|
+ unsigned char access_state;
|
|
+ struct mutex state_mutex;
|
|
+ enum scsi_device_state sdev_state;
|
|
+ struct task_struct *quiesced_by;
|
|
+ long unsigned int offline_already;
|
|
+ atomic_t restarts;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int sdev_data[0];
|
|
+};
|
|
+
|
|
+enum scsi_host_state {
|
|
+ SHOST_CREATED = 1,
|
|
+ SHOST_RUNNING = 2,
|
|
+ SHOST_CANCEL = 3,
|
|
+ SHOST_DEL = 4,
|
|
+ SHOST_RECOVERY = 5,
|
|
+ SHOST_CANCEL_RECOVERY = 6,
|
|
+ SHOST_DEL_RECOVERY = 7,
|
|
+};
|
|
+
|
|
+struct scsi_host_template;
|
|
+
|
|
+struct scsi_transport_template;
|
|
+
|
|
+struct Scsi_Host {
|
|
+ struct list_head __devices;
|
|
+ struct list_head __targets;
|
|
+ struct list_head starved_list;
|
|
+ spinlock_t default_lock;
|
|
+ spinlock_t *host_lock;
|
|
+ struct mutex scan_mutex;
|
|
+ struct list_head eh_cmd_q;
|
|
+ struct task_struct *ehandler;
|
|
+ struct completion *eh_action;
|
|
+ wait_queue_head_t host_wait;
|
|
+ struct scsi_host_template *hostt;
|
|
+ struct scsi_transport_template *transportt;
|
|
+ union {
|
|
+ struct blk_queue_tag *bqt;
|
|
+ struct blk_mq_tag_set tag_set;
|
|
+ };
|
|
+ atomic_t host_busy;
|
|
+ atomic_t host_blocked;
|
|
+ unsigned int host_failed;
|
|
+ unsigned int host_eh_scheduled;
|
|
+ unsigned int host_no;
|
|
+ int eh_deadline;
|
|
+ long unsigned int last_reset;
|
|
+ unsigned int max_channel;
|
|
+ unsigned int max_id;
|
|
+ u64 max_lun;
|
|
+ unsigned int unique_id;
|
|
+ short unsigned int max_cmd_len;
|
|
+ int this_id;
|
|
+ int can_queue;
|
|
+ short int cmd_per_lun;
|
|
+ short unsigned int sg_tablesize;
|
|
+ short unsigned int sg_prot_tablesize;
|
|
+ unsigned int max_sectors;
|
|
+ long unsigned int dma_boundary;
|
|
+ unsigned int nr_hw_queues;
|
|
+ long unsigned int cmd_serial_number;
|
|
+ unsigned int active_mode: 2;
|
|
+ unsigned int unchecked_isa_dma: 1;
|
|
+ unsigned int use_clustering: 1;
|
|
+ unsigned int host_self_blocked: 1;
|
|
+ unsigned int reverse_ordering: 1;
|
|
+ unsigned int tmf_in_progress: 1;
|
|
+ unsigned int async_scan: 1;
|
|
+ unsigned int eh_noresume: 1;
|
|
+ unsigned int no_write_same: 1;
|
|
+ unsigned int use_blk_mq: 1;
|
|
+ unsigned int use_cmd_list: 1;
|
|
+ unsigned int short_inquiry: 1;
|
|
+ char work_q_name[20];
|
|
+ struct workqueue_struct *work_q;
|
|
+ struct workqueue_struct *tmf_work_q;
|
|
+ unsigned int no_scsi2_lun_in_cdb: 1;
|
|
+ unsigned int max_host_blocked;
|
|
+ unsigned int prot_capabilities;
|
|
+ unsigned char prot_guard_type;
|
|
+ long unsigned int base;
|
|
+ long unsigned int io_port;
|
|
+ unsigned char n_io_port;
|
|
+ unsigned char dma_channel;
|
|
+ unsigned int irq;
|
|
+ enum scsi_host_state shost_state;
|
|
+ struct device shost_gendev;
|
|
+ struct device shost_dev;
|
|
+ void *shost_data;
|
|
+ struct device *dma_dev;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int kabi_reserved5;
|
|
+ long unsigned int kabi_reserved6;
|
|
+ long unsigned int hostdata[0];
|
|
+};
|
|
+
|
|
+enum scsi_target_state {
|
|
+ STARGET_CREATED = 1,
|
|
+ STARGET_RUNNING = 2,
|
|
+ STARGET_REMOVE = 3,
|
|
+ STARGET_CREATED_REMOVE = 4,
|
|
+ STARGET_DEL = 5,
|
|
+};
|
|
+
|
|
+struct scsi_target {
|
|
+ struct scsi_device *starget_sdev_user;
|
|
+ struct list_head siblings;
|
|
+ struct list_head devices;
|
|
+ struct device dev;
|
|
+ struct kref reap_ref;
|
|
+ unsigned int channel;
|
|
+ unsigned int id;
|
|
+ unsigned int create: 1;
|
|
+ unsigned int single_lun: 1;
|
|
+ unsigned int pdt_1f_for_no_lun: 1;
|
|
+ unsigned int no_report_luns: 1;
|
|
+ unsigned int expecting_lun_change: 1;
|
|
+ atomic_t target_busy;
|
|
+ atomic_t target_blocked;
|
|
+ unsigned int can_queue;
|
|
+ unsigned int max_target_blocked;
|
|
+ char scsi_level;
|
|
+ enum scsi_target_state state;
|
|
+ void *hostdata;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ long unsigned int starget_data[0];
|
|
+};
|
|
+
|
|
+struct scsi_data_buffer {
|
|
+ struct sg_table table;
|
|
+ unsigned int length;
|
|
+ int resid;
|
|
+};
|
|
+
|
|
+struct scsi_pointer {
|
|
+ char *ptr;
|
|
+ int this_residual;
|
|
+ struct scatterlist *buffer;
|
|
+ int buffers_residual;
|
|
+ dma_addr_t dma_handle;
|
|
+ volatile int Status;
|
|
+ volatile int Message;
|
|
+ volatile int have_data_in;
|
|
+ volatile int sent_command;
|
|
+ volatile int phase;
|
|
+};
|
|
+
|
|
+struct scsi_cmnd {
|
|
+ struct scsi_request req;
|
|
+ struct scsi_device *device;
|
|
+ struct list_head list;
|
|
+ struct list_head eh_entry;
|
|
+ struct delayed_work abort_work;
|
|
+ struct callback_head rcu;
|
|
+ int eh_eflags;
|
|
+ long unsigned int serial_number;
|
|
+ long unsigned int jiffies_at_alloc;
|
|
+ int retries;
|
|
+ int allowed;
|
|
+ unsigned char prot_op;
|
|
+ unsigned char prot_type;
|
|
+ unsigned char prot_flags;
|
|
+ short unsigned int cmd_len;
|
|
+ enum dma_data_direction sc_data_direction;
|
|
+ unsigned char *cmnd;
|
|
+ struct scsi_data_buffer sdb;
|
|
+ struct scsi_data_buffer *prot_sdb;
|
|
+ unsigned int underflow;
|
|
+ unsigned int transfersize;
|
|
+ struct request *request;
|
|
+ unsigned char *sense_buffer;
|
|
+ void (*scsi_done)(struct scsi_cmnd *);
|
|
+ struct scsi_pointer SCp;
|
|
+ unsigned char *host_scribble;
|
|
+ int result;
|
|
+ int flags;
|
|
+ unsigned char tag;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+enum scsi_prot_operations {
|
|
+ SCSI_PROT_NORMAL = 0,
|
|
+ SCSI_PROT_READ_INSERT = 1,
|
|
+ SCSI_PROT_WRITE_STRIP = 2,
|
|
+ SCSI_PROT_READ_STRIP = 3,
|
|
+ SCSI_PROT_WRITE_INSERT = 4,
|
|
+ SCSI_PROT_READ_PASS = 5,
|
|
+ SCSI_PROT_WRITE_PASS = 6,
|
|
+};
|
|
+
|
|
+struct scsi_driver {
|
|
+ struct device_driver gendrv;
|
|
+ void (*rescan)(struct device *);
|
|
+ int (*init_command)(struct scsi_cmnd *);
|
|
+ void (*uninit_command)(struct scsi_cmnd *);
|
|
+ int (*done)(struct scsi_cmnd *);
|
|
+ int (*eh_action)(struct scsi_cmnd *, int);
|
|
+ void (*eh_reset)(struct scsi_cmnd *);
|
|
+};
|
|
+
|
|
+struct scsi_host_cmd_pool;
|
|
+
|
|
+struct scsi_host_template {
|
|
+ struct module *module;
|
|
+ const char *name;
|
|
+ const char * (*info)(struct Scsi_Host *);
|
|
+ int (*ioctl)(struct scsi_device *, unsigned int, void *);
|
|
+ int (*compat_ioctl)(struct scsi_device *, unsigned int, void *);
|
|
+ int (*queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
|
|
+ int (*eh_abort_handler)(struct scsi_cmnd *);
|
|
+ int (*eh_device_reset_handler)(struct scsi_cmnd *);
|
|
+ int (*eh_target_reset_handler)(struct scsi_cmnd *);
|
|
+ int (*eh_bus_reset_handler)(struct scsi_cmnd *);
|
|
+ int (*eh_host_reset_handler)(struct scsi_cmnd *);
|
|
+ int (*slave_alloc)(struct scsi_device *);
|
|
+ int (*slave_configure)(struct scsi_device *);
|
|
+ void (*slave_destroy)(struct scsi_device *);
|
|
+ int (*target_alloc)(struct scsi_target *);
|
|
+ void (*target_destroy)(struct scsi_target *);
|
|
+ int (*scan_finished)(struct Scsi_Host *, long unsigned int);
|
|
+ void (*scan_start)(struct Scsi_Host *);
|
|
+ int (*change_queue_depth)(struct scsi_device *, int);
|
|
+ int (*map_queues)(struct Scsi_Host *);
|
|
+ int (*bios_param)(struct scsi_device *, struct block_device *, sector_t, int *);
|
|
+ void (*unlock_native_capacity)(struct scsi_device *);
|
|
+ int (*show_info)(struct seq_file *, struct Scsi_Host *);
|
|
+ int (*write_info)(struct Scsi_Host *, char *, int);
|
|
+ enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
|
|
+ int (*host_reset)(struct Scsi_Host *, int);
|
|
+ const char *proc_name;
|
|
+ struct proc_dir_entry *proc_dir;
|
|
+ int can_queue;
|
|
+ int this_id;
|
|
+ short unsigned int sg_tablesize;
|
|
+ short unsigned int sg_prot_tablesize;
|
|
+ unsigned int max_sectors;
|
|
+ long unsigned int dma_boundary;
|
|
+ short int cmd_per_lun;
|
|
+ unsigned char present;
|
|
+ int tag_alloc_policy;
|
|
+ unsigned int track_queue_depth: 1;
|
|
+ unsigned int supported_mode: 2;
|
|
+ unsigned int unchecked_isa_dma: 1;
|
|
+ unsigned int use_clustering: 1;
|
|
+ unsigned int emulated: 1;
|
|
+ unsigned int skip_settle_delay: 1;
|
|
+ unsigned int no_write_same: 1;
|
|
+ unsigned int force_blk_mq: 1;
|
|
+ unsigned int max_host_blocked;
|
|
+ struct device_attribute **shost_attrs;
|
|
+ struct device_attribute **sdev_attrs;
|
|
+ const struct attribute_group **sdev_groups;
|
|
+ u64 vendor_id;
|
|
+ unsigned int cmd_size;
|
|
+ struct scsi_host_cmd_pool *cmd_pool;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_scsi_dispatch_cmd_start {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int host_no;
|
|
+ unsigned int channel;
|
|
+ unsigned int id;
|
|
+ unsigned int lun;
|
|
+ unsigned int opcode;
|
|
+ unsigned int cmd_len;
|
|
+ unsigned int data_sglen;
|
|
+ unsigned int prot_sglen;
|
|
+ unsigned char prot_op;
|
|
+ u32 __data_loc_cmnd;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_scsi_dispatch_cmd_error {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int host_no;
|
|
+ unsigned int channel;
|
|
+ unsigned int id;
|
|
+ unsigned int lun;
|
|
+ int rtn;
|
|
+ unsigned int opcode;
|
|
+ unsigned int cmd_len;
|
|
+ unsigned int data_sglen;
|
|
+ unsigned int prot_sglen;
|
|
+ unsigned char prot_op;
|
|
+ u32 __data_loc_cmnd;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_scsi_cmd_done_timeout_template {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int host_no;
|
|
+ unsigned int channel;
|
|
+ unsigned int id;
|
|
+ unsigned int lun;
|
|
+ int result;
|
|
+ unsigned int opcode;
|
|
+ unsigned int cmd_len;
|
|
+ unsigned int data_sglen;
|
|
+ unsigned int prot_sglen;
|
|
+ unsigned char prot_op;
|
|
+ u32 __data_loc_cmnd;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_scsi_eh_wakeup {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int host_no;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_scsi_dispatch_cmd_start {
|
|
+ u32 cmnd;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_scsi_dispatch_cmd_error {
|
|
+ u32 cmnd;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_scsi_cmd_done_timeout_template {
|
|
+ u32 cmnd;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_scsi_eh_wakeup {};
|
|
+
|
|
+struct scsi_transport_template {
|
|
+ struct transport_container host_attrs;
|
|
+ struct transport_container target_attrs;
|
|
+ struct transport_container device_attrs;
|
|
+ int (*user_scan)(struct Scsi_Host *, uint, uint, u64);
|
|
+ int device_size;
|
|
+ int device_private_offset;
|
|
+ int target_size;
|
|
+ int target_private_offset;
|
|
+ int host_size;
|
|
+ unsigned int create_work_queue: 1;
|
|
+ void (*eh_strategy_handler)(struct Scsi_Host *);
|
|
+};
|
|
+
|
|
+struct scsi_idlun {
|
|
+ __u32 dev_id;
|
|
+ __u32 host_unique_id;
|
|
+};
|
|
+
|
|
+typedef void (*activate_complete)(void *, int);
|
|
+
|
|
+struct scsi_device_handler {
|
|
+ struct list_head list;
|
|
+ struct module *module;
|
|
+ const char *name;
|
|
+ int (*check_sense)(struct scsi_device *, struct scsi_sense_hdr *);
|
|
+ int (*attach)(struct scsi_device *);
|
|
+ void (*detach)(struct scsi_device *);
|
|
+ int (*activate)(struct scsi_device *, activate_complete, void *);
|
|
+ int (*prep_fn)(struct scsi_device *, struct request *);
|
|
+ int (*set_params)(struct scsi_device *, const char *);
|
|
+ void (*rescan)(struct scsi_device *);
|
|
+};
|
|
+
|
|
+struct scsi_eh_save {
|
|
+ int result;
|
|
+ unsigned int resid_len;
|
|
+ int eh_eflags;
|
|
+ enum dma_data_direction data_direction;
|
|
+ unsigned int underflow;
|
|
+ unsigned char cmd_len;
|
|
+ unsigned char prot_op;
|
|
+ unsigned char *cmnd;
|
|
+ struct scsi_data_buffer sdb;
|
|
+ struct request *next_rq;
|
|
+ unsigned char eh_cmnd[16];
|
|
+ struct scatterlist sense_sgl;
|
|
+};
|
|
+
|
|
+struct scsi_varlen_cdb_hdr {
|
|
+ __u8 opcode;
|
|
+ __u8 control;
|
|
+ __u8 misc[5];
|
|
+ __u8 additional_cdb_length;
|
|
+ __be16 service_action;
|
|
+};
|
|
+
|
|
+struct scsi_mode_data {
|
|
+ __u32 length;
|
|
+ __u16 block_descriptor_length;
|
|
+ __u8 medium_type;
|
|
+ __u8 device_specific;
|
|
+ __u8 header_length;
|
|
+ __u8 longlba: 1;
|
|
+};
|
|
+
|
|
+struct scsi_event {
|
|
+ enum scsi_device_event evt_type;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+enum scsi_host_prot_capabilities {
|
|
+ SHOST_DIF_TYPE1_PROTECTION = 1,
|
|
+ SHOST_DIF_TYPE2_PROTECTION = 2,
|
|
+ SHOST_DIF_TYPE3_PROTECTION = 4,
|
|
+ SHOST_DIX_TYPE0_PROTECTION = 8,
|
|
+ SHOST_DIX_TYPE1_PROTECTION = 16,
|
|
+ SHOST_DIX_TYPE2_PROTECTION = 32,
|
|
+ SHOST_DIX_TYPE3_PROTECTION = 64,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ACTION_FAIL = 0,
|
|
+ ACTION_REPREP = 1,
|
|
+ ACTION_RETRY = 2,
|
|
+ ACTION_DELAYED_RETRY = 3,
|
|
+};
|
|
+
|
|
+struct value_name_pair;
|
|
+
|
|
+struct sa_name_list {
|
|
+ int opcode;
|
|
+ const struct value_name_pair *arr;
|
|
+ int arr_sz;
|
|
+};
|
|
+
|
|
+struct value_name_pair {
|
|
+ int value;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct error_info {
|
|
+ short unsigned int code12;
|
|
+ short unsigned int size;
|
|
+};
|
|
+
|
|
+struct error_info2 {
|
|
+ unsigned char code1;
|
|
+ unsigned char code2_min;
|
|
+ unsigned char code2_max;
|
|
+ const char *str;
|
|
+ const char *fmt;
|
|
+};
|
|
+
|
|
+struct scsi_lun {
|
|
+ __u8 scsi_lun[8];
|
|
+};
|
|
+
|
|
+enum scsi_timeouts {
|
|
+ SCSI_DEFAULT_EH_TIMEOUT = 10000,
|
|
+};
|
|
+
|
|
+enum scsi_scan_mode {
|
|
+ SCSI_SCAN_INITIAL = 0,
|
|
+ SCSI_SCAN_RESCAN = 1,
|
|
+ SCSI_SCAN_MANUAL = 2,
|
|
+};
|
|
+
|
|
+struct async_scan_data {
|
|
+ struct list_head list;
|
|
+ struct Scsi_Host *shost;
|
|
+ struct completion prev_finished;
|
|
+};
|
|
+
|
|
+enum scsi_devinfo_key {
|
|
+ SCSI_DEVINFO_GLOBAL = 0,
|
|
+ SCSI_DEVINFO_SPI = 1,
|
|
+};
|
|
+
|
|
+struct scsi_dev_info_list {
|
|
+ struct list_head dev_info_list;
|
|
+ char vendor[8];
|
|
+ char model[16];
|
|
+ blist_flags_t flags;
|
|
+ unsigned int compatible;
|
|
+};
|
|
+
|
|
+struct scsi_dev_info_list_table {
|
|
+ struct list_head node;
|
|
+ struct list_head scsi_dev_info_list;
|
|
+ const char *name;
|
|
+ int key;
|
|
+};
|
|
+
|
|
+struct double_list {
|
|
+ struct list_head *top;
|
|
+ struct list_head *bottom;
|
|
+};
|
|
+
|
|
+struct scsi_nl_hdr {
|
|
+ uint8_t version;
|
|
+ uint8_t transport;
|
|
+ uint16_t magic;
|
|
+ uint16_t msgtype;
|
|
+ uint16_t msglen;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SCSI_DH_OK = 0,
|
|
+ SCSI_DH_DEV_FAILED = 1,
|
|
+ SCSI_DH_DEV_TEMP_BUSY = 2,
|
|
+ SCSI_DH_DEV_UNSUPP = 3,
|
|
+ SCSI_DH_DEVICE_MAX = 4,
|
|
+ SCSI_DH_NOTCONN = 5,
|
|
+ SCSI_DH_CONN_FAILURE = 6,
|
|
+ SCSI_DH_TRANSPORT_MAX = 7,
|
|
+ SCSI_DH_IO = 8,
|
|
+ SCSI_DH_INVALID_IO = 9,
|
|
+ SCSI_DH_RETRY = 10,
|
|
+ SCSI_DH_IMM_RETRY = 11,
|
|
+ SCSI_DH_TIMED_OUT = 12,
|
|
+ SCSI_DH_RES_TEMP_UNAVAIL = 13,
|
|
+ SCSI_DH_DEV_OFFLINED = 14,
|
|
+ SCSI_DH_NOMEM = 15,
|
|
+ SCSI_DH_NOSYS = 16,
|
|
+ SCSI_DH_DRIVER_MAX = 17,
|
|
+};
|
|
+
|
|
+struct scsi_dh_blist {
|
|
+ const char *vendor;
|
|
+ const char *model;
|
|
+ const char *driver;
|
|
+};
|
|
+
|
|
+struct rdac_mode_6_hdr {
|
|
+ u8 data_len;
|
|
+ u8 medium_type;
|
|
+ u8 device_params;
|
|
+ u8 block_desc_len;
|
|
+};
|
|
+
|
|
+struct rdac_mode_10_hdr {
|
|
+ u16 data_len;
|
|
+ u8 medium_type;
|
|
+ u8 device_params;
|
|
+ u16 reserved;
|
|
+ u16 block_desc_len;
|
|
+};
|
|
+
|
|
+struct rdac_mode_common {
|
|
+ u8 controller_serial[16];
|
|
+ u8 alt_controller_serial[16];
|
|
+ u8 rdac_mode[2];
|
|
+ u8 alt_rdac_mode[2];
|
|
+ u8 quiescence_timeout;
|
|
+ u8 rdac_options;
|
|
+};
|
|
+
|
|
+struct rdac_pg_legacy {
|
|
+ struct rdac_mode_6_hdr hdr;
|
|
+ u8 page_code;
|
|
+ u8 page_len;
|
|
+ struct rdac_mode_common common;
|
|
+ u8 lun_table[32];
|
|
+ u8 reserved2[32];
|
|
+ u8 reserved3;
|
|
+ u8 reserved4;
|
|
+};
|
|
+
|
|
+struct rdac_pg_expanded {
|
|
+ struct rdac_mode_10_hdr hdr;
|
|
+ u8 page_code;
|
|
+ u8 subpage_code;
|
|
+ u8 page_len[2];
|
|
+ struct rdac_mode_common common;
|
|
+ u8 lun_table[256];
|
|
+ u8 reserved3;
|
|
+ u8 reserved4;
|
|
+};
|
|
+
|
|
+struct c9_inquiry {
|
|
+ u8 peripheral_info;
|
|
+ u8 page_code;
|
|
+ u8 reserved1;
|
|
+ u8 page_len;
|
|
+ u8 page_id[4];
|
|
+ u8 avte_cvp;
|
|
+ u8 path_prio;
|
|
+ u8 reserved2[38];
|
|
+};
|
|
+
|
|
+struct c4_inquiry {
|
|
+ u8 peripheral_info;
|
|
+ u8 page_code;
|
|
+ u8 reserved1;
|
|
+ u8 page_len;
|
|
+ u8 page_id[4];
|
|
+ u8 subsys_id[16];
|
|
+ u8 revision[4];
|
|
+ u8 slot_id[2];
|
|
+ u8 reserved[2];
|
|
+};
|
|
+
|
|
+struct c8_inquiry {
|
|
+ u8 peripheral_info;
|
|
+ u8 page_code;
|
|
+ u8 reserved1;
|
|
+ u8 page_len;
|
|
+ u8 page_id[4];
|
|
+ u8 reserved2[3];
|
|
+ u8 vol_uniq_id_len;
|
|
+ u8 vol_uniq_id[16];
|
|
+ u8 vol_user_label_len;
|
|
+ u8 vol_user_label[60];
|
|
+ u8 array_uniq_id_len;
|
|
+ u8 array_unique_id[16];
|
|
+ u8 array_user_label_len;
|
|
+ u8 array_user_label[60];
|
|
+ u8 lun[8];
|
|
+};
|
|
+
|
|
+struct rdac_controller {
|
|
+ u8 array_id[16];
|
|
+ int use_ms10;
|
|
+ struct kref kref;
|
|
+ struct list_head node;
|
|
+ union {
|
|
+ struct rdac_pg_legacy legacy;
|
|
+ struct rdac_pg_expanded expanded;
|
|
+ } mode_select;
|
|
+ u8 index;
|
|
+ u8 array_name[31];
|
|
+ struct Scsi_Host *host;
|
|
+ spinlock_t ms_lock;
|
|
+ int ms_queued;
|
|
+ struct work_struct ms_work;
|
|
+ struct scsi_device *ms_sdev;
|
|
+ struct list_head ms_head;
|
|
+ struct list_head dh_list;
|
|
+};
|
|
+
|
|
+struct c2_inquiry {
|
|
+ u8 peripheral_info;
|
|
+ u8 page_code;
|
|
+ u8 reserved1;
|
|
+ u8 page_len;
|
|
+ u8 page_id[4];
|
|
+ u8 sw_version[3];
|
|
+ u8 sw_date[3];
|
|
+ u8 features_enabled;
|
|
+ u8 max_lun_supported;
|
|
+ u8 partitions[239];
|
|
+};
|
|
+
|
|
+struct rdac_dh_data {
|
|
+ struct list_head node;
|
|
+ struct rdac_controller *ctlr;
|
|
+ struct scsi_device *sdev;
|
|
+ unsigned int lun;
|
|
+ unsigned char mode;
|
|
+ unsigned char state;
|
|
+ char lun_state;
|
|
+ char preferred;
|
|
+ union {
|
|
+ struct c2_inquiry c2;
|
|
+ struct c4_inquiry c4;
|
|
+ struct c8_inquiry c8;
|
|
+ struct c9_inquiry c9;
|
|
+ } inq;
|
|
+};
|
|
+
|
|
+struct rdac_queue_data {
|
|
+ struct list_head entry;
|
|
+ struct rdac_dh_data *h;
|
|
+ activate_complete callback_fn;
|
|
+ void *callback_data;
|
|
+};
|
|
+
|
|
+struct hp_sw_dh_data {
|
|
+ int path_state;
|
|
+ int retries;
|
|
+ int retry_cnt;
|
|
+ struct scsi_device *sdev;
|
|
+};
|
|
+
|
|
+struct clariion_dh_data {
|
|
+ unsigned int flags;
|
|
+ unsigned char buffer[252];
|
|
+ int lun_state;
|
|
+ int port;
|
|
+ int default_sp;
|
|
+ int current_sp;
|
|
+};
|
|
+
|
|
+struct alua_port_group {
|
|
+ struct kref kref;
|
|
+ struct callback_head rcu;
|
|
+ struct list_head node;
|
|
+ struct list_head dh_list;
|
|
+ unsigned char device_id_str[256];
|
|
+ int device_id_len;
|
|
+ int group_id;
|
|
+ int tpgs;
|
|
+ int state;
|
|
+ int pref;
|
|
+ int valid_states;
|
|
+ unsigned int flags;
|
|
+ unsigned char transition_tmo;
|
|
+ long unsigned int expiry;
|
|
+ long unsigned int interval;
|
|
+ struct delayed_work rtpg_work;
|
|
+ spinlock_t lock;
|
|
+ struct list_head rtpg_list;
|
|
+ struct scsi_device *rtpg_sdev;
|
|
+};
|
|
+
|
|
+struct alua_dh_data {
|
|
+ struct list_head node;
|
|
+ struct alua_port_group *pg;
|
|
+ int group_id;
|
|
+ spinlock_t pg_lock;
|
|
+ struct scsi_device *sdev;
|
|
+ int init_error;
|
|
+ struct mutex init_mutex;
|
|
+};
|
|
+
|
|
+struct alua_queue_data {
|
|
+ struct list_head entry;
|
|
+ activate_complete callback_fn;
|
|
+ void *callback_data;
|
|
+};
|
|
+
|
|
+struct spi_device_id {
|
|
+ char name[32];
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+struct spi_driver {
|
|
+ const struct spi_device_id *id_table;
|
|
+ int (*probe)(struct spi_device *);
|
|
+ int (*remove)(struct spi_device *);
|
|
+ void (*shutdown)(struct spi_device *);
|
|
+ struct device_driver driver;
|
|
+};
|
|
+
|
|
+typedef void (*spi_res_release_t)(struct spi_controller *, struct spi_message *, void *);
|
|
+
|
|
+struct spi_res {
|
|
+ struct list_head entry;
|
|
+ spi_res_release_t release;
|
|
+ long long unsigned int data[0];
|
|
+};
|
|
+
|
|
+struct spi_replaced_transfers;
|
|
+
|
|
+typedef void (*spi_replaced_release_t)(struct spi_controller *, struct spi_message *, struct spi_replaced_transfers *);
|
|
+
|
|
+struct spi_replaced_transfers {
|
|
+ spi_replaced_release_t release;
|
|
+ void *extradata;
|
|
+ struct list_head replaced_transfers;
|
|
+ struct list_head *replaced_after;
|
|
+ size_t inserted;
|
|
+ struct spi_transfer inserted_transfers[0];
|
|
+};
|
|
+
|
|
+struct spi_board_info {
|
|
+ char modalias[32];
|
|
+ const void *platform_data;
|
|
+ const struct property_entry *properties;
|
|
+ void *controller_data;
|
|
+ int irq;
|
|
+ u32 max_speed_hz;
|
|
+ u16 bus_num;
|
|
+ u16 chip_select;
|
|
+ u16 mode;
|
|
+};
|
|
+
|
|
+enum spi_mem_data_dir {
|
|
+ SPI_MEM_DATA_IN = 0,
|
|
+ SPI_MEM_DATA_OUT = 1,
|
|
+};
|
|
+
|
|
+struct spi_mem_op {
|
|
+ struct {
|
|
+ u8 buswidth;
|
|
+ u8 opcode;
|
|
+ } cmd;
|
|
+ struct {
|
|
+ u8 nbytes;
|
|
+ u8 buswidth;
|
|
+ u64 val;
|
|
+ } addr;
|
|
+ struct {
|
|
+ u8 nbytes;
|
|
+ u8 buswidth;
|
|
+ } dummy;
|
|
+ struct {
|
|
+ u8 buswidth;
|
|
+ enum spi_mem_data_dir dir;
|
|
+ unsigned int nbytes;
|
|
+ union {
|
|
+ void *in;
|
|
+ const void *out;
|
|
+ } buf;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+struct spi_mem {
|
|
+ struct spi_device *spi;
|
|
+ void *drvpriv;
|
|
+ const char *name;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_spi_controller {
|
|
+ struct trace_entry ent;
|
|
+ int bus_num;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_spi_message {
|
|
+ struct trace_entry ent;
|
|
+ int bus_num;
|
|
+ int chip_select;
|
|
+ struct spi_message *msg;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_spi_message_done {
|
|
+ struct trace_entry ent;
|
|
+ int bus_num;
|
|
+ int chip_select;
|
|
+ struct spi_message *msg;
|
|
+ unsigned int frame;
|
|
+ unsigned int actual;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_spi_transfer {
|
|
+ struct trace_entry ent;
|
|
+ int bus_num;
|
|
+ int chip_select;
|
|
+ struct spi_transfer *xfer;
|
|
+ int len;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_spi_controller {};
|
|
+
|
|
+struct trace_event_data_offsets_spi_message {};
|
|
+
|
|
+struct trace_event_data_offsets_spi_message_done {};
|
|
+
|
|
+struct trace_event_data_offsets_spi_transfer {};
|
|
+
|
|
+struct boardinfo {
|
|
+ struct list_head list;
|
|
+ struct spi_board_info board_info;
|
|
+};
|
|
+
|
|
+struct devprobe2 {
|
|
+ struct net_device * (*probe)(int);
|
|
+ int status;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SKBTX_HW_TSTAMP = 1,
|
|
+ SKBTX_SW_TSTAMP = 2,
|
|
+ SKBTX_IN_PROGRESS = 4,
|
|
+ SKBTX_DEV_ZEROCOPY = 8,
|
|
+ SKBTX_WIFI_STATUS = 16,
|
|
+ SKBTX_SHARED_FRAG = 32,
|
|
+ SKBTX_SCHED_TSTAMP = 64,
|
|
+};
|
|
+
|
|
+enum netdev_priv_flags {
|
|
+ IFF_802_1Q_VLAN = 1,
|
|
+ IFF_EBRIDGE = 2,
|
|
+ IFF_BONDING = 4,
|
|
+ IFF_ISATAP = 8,
|
|
+ IFF_WAN_HDLC = 16,
|
|
+ IFF_XMIT_DST_RELEASE = 32,
|
|
+ IFF_DONT_BRIDGE = 64,
|
|
+ IFF_DISABLE_NETPOLL = 128,
|
|
+ IFF_MACVLAN_PORT = 256,
|
|
+ IFF_BRIDGE_PORT = 512,
|
|
+ IFF_OVS_DATAPATH = 1024,
|
|
+ IFF_TX_SKB_SHARING = 2048,
|
|
+ IFF_UNICAST_FLT = 4096,
|
|
+ IFF_TEAM_PORT = 8192,
|
|
+ IFF_SUPP_NOFCS = 16384,
|
|
+ IFF_LIVE_ADDR_CHANGE = 32768,
|
|
+ IFF_MACVLAN = 65536,
|
|
+ IFF_XMIT_DST_RELEASE_PERM = 131072,
|
|
+ IFF_L3MDEV_MASTER = 262144,
|
|
+ IFF_NO_QUEUE = 524288,
|
|
+ IFF_OPENVSWITCH = 1048576,
|
|
+ IFF_L3MDEV_SLAVE = 2097152,
|
|
+ IFF_TEAM = 4194304,
|
|
+ IFF_RXFH_CONFIGURED = 8388608,
|
|
+ IFF_PHONY_HEADROOM = 16777216,
|
|
+ IFF_MACSEC = 33554432,
|
|
+ IFF_NO_RX_HANDLER = 67108864,
|
|
+ IFF_FAILOVER = 134217728,
|
|
+ IFF_FAILOVER_SLAVE = 268435456,
|
|
+ IFF_L3MDEV_RX_HANDLER = 536870912,
|
|
+ IFF_LIVE_RENAME_OK = 1073741824,
|
|
+};
|
|
+
|
|
+struct pcpu_lstats {
|
|
+ u64 packets;
|
|
+ u64 bytes;
|
|
+ struct u64_stats_sync syncp;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SOF_TIMESTAMPING_TX_HARDWARE = 1,
|
|
+ SOF_TIMESTAMPING_TX_SOFTWARE = 2,
|
|
+ SOF_TIMESTAMPING_RX_HARDWARE = 4,
|
|
+ SOF_TIMESTAMPING_RX_SOFTWARE = 8,
|
|
+ SOF_TIMESTAMPING_SOFTWARE = 16,
|
|
+ SOF_TIMESTAMPING_SYS_HARDWARE = 32,
|
|
+ SOF_TIMESTAMPING_RAW_HARDWARE = 64,
|
|
+ SOF_TIMESTAMPING_OPT_ID = 128,
|
|
+ SOF_TIMESTAMPING_TX_SCHED = 256,
|
|
+ SOF_TIMESTAMPING_TX_ACK = 512,
|
|
+ SOF_TIMESTAMPING_OPT_CMSG = 1024,
|
|
+ SOF_TIMESTAMPING_OPT_TSONLY = 2048,
|
|
+ SOF_TIMESTAMPING_OPT_STATS = 4096,
|
|
+ SOF_TIMESTAMPING_OPT_PKTINFO = 8192,
|
|
+ SOF_TIMESTAMPING_OPT_TX_SWHW = 16384,
|
|
+ SOF_TIMESTAMPING_LAST = 16384,
|
|
+ SOF_TIMESTAMPING_MASK = 32767,
|
|
+};
|
|
+
|
|
+enum ethtool_link_mode_bit_indices {
|
|
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT = 0,
|
|
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT = 1,
|
|
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT = 2,
|
|
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT = 3,
|
|
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT = 4,
|
|
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT = 5,
|
|
+ ETHTOOL_LINK_MODE_Autoneg_BIT = 6,
|
|
+ ETHTOOL_LINK_MODE_TP_BIT = 7,
|
|
+ ETHTOOL_LINK_MODE_AUI_BIT = 8,
|
|
+ ETHTOOL_LINK_MODE_MII_BIT = 9,
|
|
+ ETHTOOL_LINK_MODE_FIBRE_BIT = 10,
|
|
+ ETHTOOL_LINK_MODE_BNC_BIT = 11,
|
|
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT = 12,
|
|
+ ETHTOOL_LINK_MODE_Pause_BIT = 13,
|
|
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT = 14,
|
|
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT = 15,
|
|
+ ETHTOOL_LINK_MODE_Backplane_BIT = 16,
|
|
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT = 17,
|
|
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT = 18,
|
|
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT = 19,
|
|
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT = 20,
|
|
+ ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT = 21,
|
|
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT = 22,
|
|
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT = 23,
|
|
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT = 24,
|
|
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT = 25,
|
|
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT = 26,
|
|
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT = 27,
|
|
+ ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28,
|
|
+ ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29,
|
|
+ ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30,
|
|
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31,
|
|
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32,
|
|
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33,
|
|
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34,
|
|
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35,
|
|
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36,
|
|
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37,
|
|
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38,
|
|
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39,
|
|
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT = 40,
|
|
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT = 41,
|
|
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT = 42,
|
|
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT = 43,
|
|
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
|
|
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
|
|
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
|
|
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
|
|
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
|
|
+ ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49,
|
|
+ ETHTOOL_LINK_MODE_FEC_RS_BIT = 50,
|
|
+ ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51,
|
|
+ __ETHTOOL_LINK_MODE_LAST = 51,
|
|
+};
|
|
+
|
|
+struct mdio_board_info {
|
|
+ const char *bus_id;
|
|
+ char modalias[32];
|
|
+ int mdio_addr;
|
|
+ const void *platform_data;
|
|
+};
|
|
+
|
|
+struct mdio_board_entry {
|
|
+ struct list_head list;
|
|
+ struct mdio_board_info board_info;
|
|
+};
|
|
+
|
|
+struct mii_ioctl_data {
|
|
+ __u16 phy_id;
|
|
+ __u16 reg_num;
|
|
+ __u16 val_in;
|
|
+ __u16 val_out;
|
|
+};
|
|
+
|
|
+struct phy_led_trigger {
|
|
+ struct led_trigger trigger;
|
|
+ char name[75];
|
|
+ unsigned int speed;
|
|
+};
|
|
+
|
|
+struct phy_setting {
|
|
+ u32 speed;
|
|
+ u8 duplex;
|
|
+ u8 bit;
|
|
+};
|
|
+
|
|
+struct phy_fixup {
|
|
+ struct list_head list;
|
|
+ char bus_id[64];
|
|
+ u32 phy_uid;
|
|
+ u32 phy_uid_mask;
|
|
+ int (*run)(struct phy_device *);
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mdio_access {
|
|
+ struct trace_entry ent;
|
|
+ char busid[61];
|
|
+ char read;
|
|
+ u8 addr;
|
|
+ u16 val;
|
|
+ unsigned int regnum;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mdio_access {};
|
|
+
|
|
+struct mdio_driver {
|
|
+ struct mdio_driver_common mdiodrv;
|
|
+ int (*probe)(struct mdio_device *);
|
|
+ void (*remove)(struct mdio_device *);
|
|
+};
|
|
+
|
|
+struct fixed_phy_status {
|
|
+ int link;
|
|
+ int speed;
|
|
+ int duplex;
|
|
+ int pause;
|
|
+ int asym_pause;
|
|
+};
|
|
+
|
|
+struct swmii_regs {
|
|
+ u16 bmcr;
|
|
+ u16 bmsr;
|
|
+ u16 lpa;
|
|
+ u16 lpagb;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SWMII_SPEED_10 = 0,
|
|
+ SWMII_SPEED_100 = 1,
|
|
+ SWMII_SPEED_1000 = 2,
|
|
+ SWMII_DUPLEX_HALF = 0,
|
|
+ SWMII_DUPLEX_FULL = 1,
|
|
+};
|
|
+
|
|
+struct fixed_mdio_bus {
|
|
+ struct mii_bus *mii_bus;
|
|
+ struct list_head phys;
|
|
+};
|
|
+
|
|
+struct fixed_phy {
|
|
+ int addr;
|
|
+ struct phy_device *phydev;
|
|
+ seqcount_t seqcount;
|
|
+ struct fixed_phy_status status;
|
|
+ int (*link_update)(struct net_device *, struct fixed_phy_status *);
|
|
+ struct list_head node;
|
|
+ int link_gpio;
|
|
+};
|
|
+
|
|
+struct ptp_clock_time {
|
|
+ __s64 sec;
|
|
+ __u32 nsec;
|
|
+ __u32 reserved;
|
|
+};
|
|
+
|
|
+struct ptp_extts_request {
|
|
+ unsigned int index;
|
|
+ unsigned int flags;
|
|
+ unsigned int rsv[2];
|
|
+};
|
|
+
|
|
+struct ptp_perout_request {
|
|
+ struct ptp_clock_time start;
|
|
+ struct ptp_clock_time period;
|
|
+ unsigned int index;
|
|
+ unsigned int flags;
|
|
+ unsigned int rsv[4];
|
|
+};
|
|
+
|
|
+enum ptp_pin_function {
|
|
+ PTP_PF_NONE = 0,
|
|
+ PTP_PF_EXTTS = 1,
|
|
+ PTP_PF_PEROUT = 2,
|
|
+ PTP_PF_PHYSYNC = 3,
|
|
+};
|
|
+
|
|
+struct ptp_pin_desc {
|
|
+ char name[64];
|
|
+ unsigned int index;
|
|
+ unsigned int func;
|
|
+ unsigned int chan;
|
|
+ unsigned int rsv[5];
|
|
+};
|
|
+
|
|
+struct ptp_clock_request {
|
|
+ enum {
|
|
+ PTP_CLK_REQ_EXTTS = 0,
|
|
+ PTP_CLK_REQ_PEROUT = 1,
|
|
+ PTP_CLK_REQ_PPS = 2,
|
|
+ } type;
|
|
+ union {
|
|
+ struct ptp_extts_request extts;
|
|
+ struct ptp_perout_request perout;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct ptp_clock_info {
|
|
+ struct module *owner;
|
|
+ char name[16];
|
|
+ s32 max_adj;
|
|
+ int n_alarm;
|
|
+ int n_ext_ts;
|
|
+ int n_per_out;
|
|
+ int n_pins;
|
|
+ int pps;
|
|
+ struct ptp_pin_desc *pin_config;
|
|
+ int (*adjfine)(struct ptp_clock_info *, long int);
|
|
+ int (*adjfreq)(struct ptp_clock_info *, s32);
|
|
+ int (*adjtime)(struct ptp_clock_info *, s64);
|
|
+ int (*gettime64)(struct ptp_clock_info *, struct timespec64 *);
|
|
+ int (*getcrosststamp)(struct ptp_clock_info *, struct system_device_crosststamp *);
|
|
+ int (*settime64)(struct ptp_clock_info *, const struct timespec64 *);
|
|
+ int (*enable)(struct ptp_clock_info *, struct ptp_clock_request *, int);
|
|
+ int (*verify)(struct ptp_clock_info *, unsigned int, enum ptp_pin_function, unsigned int);
|
|
+ long int (*do_aux_work)(struct ptp_clock_info *);
|
|
+};
|
|
+
|
|
+struct ptp_clock;
|
|
+
|
|
+struct cavium_ptp {
|
|
+ struct pci_dev *pdev;
|
|
+ spinlock_t spin_lock;
|
|
+ struct cyclecounter cycle_counter;
|
|
+ struct timecounter time_counter;
|
|
+ void *reg_base;
|
|
+ u32 clock_rate;
|
|
+ struct ptp_clock_info ptp_info;
|
|
+ struct ptp_clock *ptp_clock;
|
|
+};
|
|
+
|
|
+struct ohci {
|
|
+ void *registers;
|
|
+};
|
|
+
|
|
+struct socket_state_t {
|
|
+ u_int flags;
|
|
+ u_int csc_mask;
|
|
+ u_char Vcc;
|
|
+ u_char Vpp;
|
|
+ u_char io_irq;
|
|
+};
|
|
+
|
|
+typedef struct socket_state_t socket_state_t;
|
|
+
|
|
+struct pccard_io_map {
|
|
+ u_char map;
|
|
+ u_char flags;
|
|
+ u_short speed;
|
|
+ phys_addr_t start;
|
|
+ phys_addr_t stop;
|
|
+};
|
|
+
|
|
+struct pccard_mem_map {
|
|
+ u_char map;
|
|
+ u_char flags;
|
|
+ u_short speed;
|
|
+ phys_addr_t static_start;
|
|
+ u_int card_start;
|
|
+ struct resource *res;
|
|
+};
|
|
+
|
|
+typedef struct pccard_mem_map pccard_mem_map;
|
|
+
|
|
+struct io_window_t {
|
|
+ u_int InUse;
|
|
+ u_int Config;
|
|
+ struct resource *res;
|
|
+};
|
|
+
|
|
+typedef struct io_window_t io_window_t;
|
|
+
|
|
+struct pcmcia_socket;
|
|
+
|
|
+struct pccard_operations {
|
|
+ int (*init)(struct pcmcia_socket *);
|
|
+ int (*suspend)(struct pcmcia_socket *);
|
|
+ int (*get_status)(struct pcmcia_socket *, u_int *);
|
|
+ int (*set_socket)(struct pcmcia_socket *, socket_state_t *);
|
|
+ int (*set_io_map)(struct pcmcia_socket *, struct pccard_io_map *);
|
|
+ int (*set_mem_map)(struct pcmcia_socket *, struct pccard_mem_map *);
|
|
+};
|
|
+
|
|
+struct pccard_resource_ops;
|
|
+
|
|
+struct pcmcia_callback;
|
|
+
|
|
+struct pcmcia_socket {
|
|
+ struct module *owner;
|
|
+ socket_state_t socket;
|
|
+ u_int state;
|
|
+ u_int suspended_state;
|
|
+ u_short functions;
|
|
+ u_short lock_count;
|
|
+ pccard_mem_map cis_mem;
|
|
+ void *cis_virt;
|
|
+ io_window_t io[2];
|
|
+ pccard_mem_map win[4];
|
|
+ struct list_head cis_cache;
|
|
+ size_t fake_cis_len;
|
|
+ u8 *fake_cis;
|
|
+ struct list_head socket_list;
|
|
+ struct completion socket_released;
|
|
+ unsigned int sock;
|
|
+ u_int features;
|
|
+ u_int irq_mask;
|
|
+ u_int map_size;
|
|
+ u_int io_offset;
|
|
+ u_int pci_irq;
|
|
+ struct pci_dev *cb_dev;
|
|
+ u8 resource_setup_done;
|
|
+ struct pccard_operations *ops;
|
|
+ struct pccard_resource_ops *resource_ops;
|
|
+ void *resource_data;
|
|
+ void (*zoom_video)(struct pcmcia_socket *, int);
|
|
+ int (*power_hook)(struct pcmcia_socket *, int);
|
|
+ void (*tune_bridge)(struct pcmcia_socket *, struct pci_bus *);
|
|
+ struct task_struct *thread;
|
|
+ struct completion thread_done;
|
|
+ unsigned int thread_events;
|
|
+ unsigned int sysfs_events;
|
|
+ struct mutex skt_mutex;
|
|
+ struct mutex ops_mutex;
|
|
+ spinlock_t thread_lock;
|
|
+ struct pcmcia_callback *callback;
|
|
+ struct device dev;
|
|
+ void *driver_data;
|
|
+ int resume_status;
|
|
+};
|
|
+
|
|
+struct pccard_resource_ops {
|
|
+ int (*validate_mem)(struct pcmcia_socket *);
|
|
+ int (*find_io)(struct pcmcia_socket *, unsigned int, unsigned int *, unsigned int, unsigned int, struct resource **);
|
|
+ struct resource * (*find_mem)(long unsigned int, long unsigned int, long unsigned int, int, struct pcmcia_socket *);
|
|
+ int (*init)(struct pcmcia_socket *);
|
|
+ void (*exit)(struct pcmcia_socket *);
|
|
+};
|
|
+
|
|
+struct pcmcia_callback {
|
|
+ struct module *owner;
|
|
+ int (*add)(struct pcmcia_socket *);
|
|
+ int (*remove)(struct pcmcia_socket *);
|
|
+ void (*requery)(struct pcmcia_socket *);
|
|
+ int (*validate)(struct pcmcia_socket *, unsigned int *);
|
|
+ int (*suspend)(struct pcmcia_socket *);
|
|
+ int (*early_resume)(struct pcmcia_socket *);
|
|
+ int (*resume)(struct pcmcia_socket *);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PCMCIA_IOPORT_0 = 0,
|
|
+ PCMCIA_IOPORT_1 = 1,
|
|
+ PCMCIA_IOMEM_0 = 2,
|
|
+ PCMCIA_IOMEM_1 = 3,
|
|
+ PCMCIA_IOMEM_2 = 4,
|
|
+ PCMCIA_IOMEM_3 = 5,
|
|
+ PCMCIA_NUM_RESOURCES = 6,
|
|
+};
|
|
+
|
|
+struct usb_device_id {
|
|
+ __u16 match_flags;
|
|
+ __u16 idVendor;
|
|
+ __u16 idProduct;
|
|
+ __u16 bcdDevice_lo;
|
|
+ __u16 bcdDevice_hi;
|
|
+ __u8 bDeviceClass;
|
|
+ __u8 bDeviceSubClass;
|
|
+ __u8 bDeviceProtocol;
|
|
+ __u8 bInterfaceClass;
|
|
+ __u8 bInterfaceSubClass;
|
|
+ __u8 bInterfaceProtocol;
|
|
+ __u8 bInterfaceNumber;
|
|
+ kernel_ulong_t driver_info;
|
|
+};
|
|
+
|
|
+struct usb_descriptor_header {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+};
|
|
+
|
|
+struct usb_device_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __le16 bcdUSB;
|
|
+ __u8 bDeviceClass;
|
|
+ __u8 bDeviceSubClass;
|
|
+ __u8 bDeviceProtocol;
|
|
+ __u8 bMaxPacketSize0;
|
|
+ __le16 idVendor;
|
|
+ __le16 idProduct;
|
|
+ __le16 bcdDevice;
|
|
+ __u8 iManufacturer;
|
|
+ __u8 iProduct;
|
|
+ __u8 iSerialNumber;
|
|
+ __u8 bNumConfigurations;
|
|
+};
|
|
+
|
|
+struct usb_config_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __le16 wTotalLength;
|
|
+ __u8 bNumInterfaces;
|
|
+ __u8 bConfigurationValue;
|
|
+ __u8 iConfiguration;
|
|
+ __u8 bmAttributes;
|
|
+ __u8 bMaxPower;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_interface_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bInterfaceNumber;
|
|
+ __u8 bAlternateSetting;
|
|
+ __u8 bNumEndpoints;
|
|
+ __u8 bInterfaceClass;
|
|
+ __u8 bInterfaceSubClass;
|
|
+ __u8 bInterfaceProtocol;
|
|
+ __u8 iInterface;
|
|
+};
|
|
+
|
|
+struct usb_endpoint_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bEndpointAddress;
|
|
+ __u8 bmAttributes;
|
|
+ __le16 wMaxPacketSize;
|
|
+ __u8 bInterval;
|
|
+ __u8 bRefresh;
|
|
+ __u8 bSynchAddress;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_ssp_isoc_ep_comp_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __le16 wReseved;
|
|
+ __le32 dwBytesPerInterval;
|
|
+};
|
|
+
|
|
+struct usb_ss_ep_comp_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bMaxBurst;
|
|
+ __u8 bmAttributes;
|
|
+ __le16 wBytesPerInterval;
|
|
+};
|
|
+
|
|
+struct usb_interface_assoc_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bFirstInterface;
|
|
+ __u8 bInterfaceCount;
|
|
+ __u8 bFunctionClass;
|
|
+ __u8 bFunctionSubClass;
|
|
+ __u8 bFunctionProtocol;
|
|
+ __u8 iFunction;
|
|
+};
|
|
+
|
|
+struct usb_bos_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __le16 wTotalLength;
|
|
+ __u8 bNumDeviceCaps;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_ext_cap_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDevCapabilityType;
|
|
+ __le32 bmAttributes;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_ss_cap_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDevCapabilityType;
|
|
+ __u8 bmAttributes;
|
|
+ __le16 wSpeedSupported;
|
|
+ __u8 bFunctionalitySupport;
|
|
+ __u8 bU1devExitLat;
|
|
+ __le16 bU2DevExitLat;
|
|
+};
|
|
+
|
|
+struct usb_ss_container_id_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDevCapabilityType;
|
|
+ __u8 bReserved;
|
|
+ __u8 ContainerID[16];
|
|
+};
|
|
+
|
|
+struct usb_ssp_cap_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDevCapabilityType;
|
|
+ __u8 bReserved;
|
|
+ __le32 bmAttributes;
|
|
+ __le16 wFunctionalitySupport;
|
|
+ __le16 wReserved;
|
|
+ __le32 bmSublinkSpeedAttr[1];
|
|
+};
|
|
+
|
|
+struct usb_ptm_cap_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDevCapabilityType;
|
|
+};
|
|
+
|
|
+enum usb_device_speed {
|
|
+ USB_SPEED_UNKNOWN = 0,
|
|
+ USB_SPEED_LOW = 1,
|
|
+ USB_SPEED_FULL = 2,
|
|
+ USB_SPEED_HIGH = 3,
|
|
+ USB_SPEED_WIRELESS = 4,
|
|
+ USB_SPEED_SUPER = 5,
|
|
+ USB_SPEED_SUPER_PLUS = 6,
|
|
+};
|
|
+
|
|
+enum usb_device_state {
|
|
+ USB_STATE_NOTATTACHED = 0,
|
|
+ USB_STATE_ATTACHED = 1,
|
|
+ USB_STATE_POWERED = 2,
|
|
+ USB_STATE_RECONNECTING = 3,
|
|
+ USB_STATE_UNAUTHENTICATED = 4,
|
|
+ USB_STATE_DEFAULT = 5,
|
|
+ USB_STATE_ADDRESS = 6,
|
|
+ USB_STATE_CONFIGURED = 7,
|
|
+ USB_STATE_SUSPENDED = 8,
|
|
+};
|
|
+
|
|
+enum usb3_link_state {
|
|
+ USB3_LPM_U0 = 0,
|
|
+ USB3_LPM_U1 = 1,
|
|
+ USB3_LPM_U2 = 2,
|
|
+ USB3_LPM_U3 = 3,
|
|
+};
|
|
+
|
|
+struct ep_device;
|
|
+
|
|
+struct usb_host_endpoint {
|
|
+ struct usb_endpoint_descriptor desc;
|
|
+ struct usb_ss_ep_comp_descriptor ss_ep_comp;
|
|
+ struct usb_ssp_isoc_ep_comp_descriptor ssp_isoc_ep_comp;
|
|
+ char: 8;
|
|
+ struct list_head urb_list;
|
|
+ void *hcpriv;
|
|
+ struct ep_device *ep_dev;
|
|
+ unsigned char *extra;
|
|
+ int extralen;
|
|
+ int enabled;
|
|
+ int streams;
|
|
+ int: 32;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_host_interface {
|
|
+ struct usb_interface_descriptor desc;
|
|
+ int extralen;
|
|
+ unsigned char *extra;
|
|
+ struct usb_host_endpoint *endpoint;
|
|
+ char *string;
|
|
+};
|
|
+
|
|
+enum usb_interface_condition {
|
|
+ USB_INTERFACE_UNBOUND = 0,
|
|
+ USB_INTERFACE_BINDING = 1,
|
|
+ USB_INTERFACE_BOUND = 2,
|
|
+ USB_INTERFACE_UNBINDING = 3,
|
|
+};
|
|
+
|
|
+struct usb_interface {
|
|
+ struct usb_host_interface *altsetting;
|
|
+ struct usb_host_interface *cur_altsetting;
|
|
+ unsigned int num_altsetting;
|
|
+ struct usb_interface_assoc_descriptor *intf_assoc;
|
|
+ int minor;
|
|
+ enum usb_interface_condition condition;
|
|
+ unsigned int sysfs_files_created: 1;
|
|
+ unsigned int ep_devs_created: 1;
|
|
+ unsigned int unregistering: 1;
|
|
+ unsigned int needs_remote_wakeup: 1;
|
|
+ unsigned int needs_altsetting0: 1;
|
|
+ unsigned int needs_binding: 1;
|
|
+ unsigned int resetting_device: 1;
|
|
+ unsigned int authorized: 1;
|
|
+ struct device dev;
|
|
+ struct device *usb_dev;
|
|
+ struct work_struct reset_ws;
|
|
+};
|
|
+
|
|
+struct usb_interface_cache {
|
|
+ unsigned int num_altsetting;
|
|
+ struct kref ref;
|
|
+ struct usb_host_interface altsetting[0];
|
|
+};
|
|
+
|
|
+struct usb_host_config {
|
|
+ struct usb_config_descriptor desc;
|
|
+ char *string;
|
|
+ struct usb_interface_assoc_descriptor *intf_assoc[16];
|
|
+ struct usb_interface *interface[32];
|
|
+ struct usb_interface_cache *intf_cache[32];
|
|
+ unsigned char *extra;
|
|
+ int extralen;
|
|
+};
|
|
+
|
|
+struct usb_host_bos {
|
|
+ struct usb_bos_descriptor *desc;
|
|
+ struct usb_ext_cap_descriptor *ext_cap;
|
|
+ struct usb_ss_cap_descriptor *ss_cap;
|
|
+ struct usb_ssp_cap_descriptor *ssp_cap;
|
|
+ struct usb_ss_container_id_descriptor *ss_id;
|
|
+ struct usb_ptm_cap_descriptor *ptm_cap;
|
|
+};
|
|
+
|
|
+struct usb_devmap {
|
|
+ long unsigned int devicemap[2];
|
|
+};
|
|
+
|
|
+struct usb_device;
|
|
+
|
|
+struct mon_bus;
|
|
+
|
|
+struct usb_bus {
|
|
+ struct device *controller;
|
|
+ struct device *sysdev;
|
|
+ int busnum;
|
|
+ const char *bus_name;
|
|
+ u8 uses_dma;
|
|
+ u8 uses_pio_for_control;
|
|
+ u8 otg_port;
|
|
+ unsigned int is_b_host: 1;
|
|
+ unsigned int b_hnp_enable: 1;
|
|
+ unsigned int no_stop_on_short: 1;
|
|
+ unsigned int no_sg_constraint: 1;
|
|
+ unsigned int sg_tablesize;
|
|
+ int devnum_next;
|
|
+ struct mutex devnum_next_mutex;
|
|
+ struct usb_devmap devmap;
|
|
+ struct usb_device *root_hub;
|
|
+ struct usb_bus *hs_companion;
|
|
+ int bandwidth_allocated;
|
|
+ int bandwidth_int_reqs;
|
|
+ int bandwidth_isoc_reqs;
|
|
+ unsigned int resuming_ports;
|
|
+ struct mon_bus *mon_bus;
|
|
+ int monitored;
|
|
+};
|
|
+
|
|
+struct wusb_dev;
|
|
+
|
|
+enum usb_device_removable {
|
|
+ USB_DEVICE_REMOVABLE_UNKNOWN = 0,
|
|
+ USB_DEVICE_REMOVABLE = 1,
|
|
+ USB_DEVICE_FIXED = 2,
|
|
+};
|
|
+
|
|
+struct usb2_lpm_parameters {
|
|
+ unsigned int besl;
|
|
+ int timeout;
|
|
+};
|
|
+
|
|
+struct usb3_lpm_parameters {
|
|
+ unsigned int mel;
|
|
+ unsigned int pel;
|
|
+ unsigned int sel;
|
|
+ int timeout;
|
|
+};
|
|
+
|
|
+struct usb_tt;
|
|
+
|
|
+struct usb_device {
|
|
+ int devnum;
|
|
+ char devpath[16];
|
|
+ u32 route;
|
|
+ enum usb_device_state state;
|
|
+ enum usb_device_speed speed;
|
|
+ unsigned int rx_lanes;
|
|
+ unsigned int tx_lanes;
|
|
+ struct usb_tt *tt;
|
|
+ int ttport;
|
|
+ unsigned int toggle[2];
|
|
+ struct usb_device *parent;
|
|
+ struct usb_bus *bus;
|
|
+ struct usb_host_endpoint ep0;
|
|
+ struct device dev;
|
|
+ struct usb_device_descriptor descriptor;
|
|
+ struct usb_host_bos *bos;
|
|
+ struct usb_host_config *config;
|
|
+ struct usb_host_config *actconfig;
|
|
+ struct usb_host_endpoint *ep_in[16];
|
|
+ struct usb_host_endpoint *ep_out[16];
|
|
+ char **rawdescriptors;
|
|
+ short unsigned int bus_mA;
|
|
+ u8 portnum;
|
|
+ u8 level;
|
|
+ unsigned int can_submit: 1;
|
|
+ unsigned int persist_enabled: 1;
|
|
+ unsigned int have_langid: 1;
|
|
+ unsigned int authorized: 1;
|
|
+ unsigned int authenticated: 1;
|
|
+ unsigned int wusb: 1;
|
|
+ unsigned int lpm_capable: 1;
|
|
+ unsigned int usb2_hw_lpm_capable: 1;
|
|
+ unsigned int usb2_hw_lpm_besl_capable: 1;
|
|
+ unsigned int usb2_hw_lpm_enabled: 1;
|
|
+ unsigned int usb2_hw_lpm_allowed: 1;
|
|
+ unsigned int usb3_lpm_u1_enabled: 1;
|
|
+ unsigned int usb3_lpm_u2_enabled: 1;
|
|
+ int string_langid;
|
|
+ char *product;
|
|
+ char *manufacturer;
|
|
+ char *serial;
|
|
+ struct list_head filelist;
|
|
+ int maxchild;
|
|
+ u32 quirks;
|
|
+ atomic_t urbnum;
|
|
+ long unsigned int active_duration;
|
|
+ long unsigned int connect_time;
|
|
+ unsigned int do_remote_wakeup: 1;
|
|
+ unsigned int reset_resume: 1;
|
|
+ unsigned int port_is_suspended: 1;
|
|
+ struct wusb_dev *wusb_dev;
|
|
+ int slot_id;
|
|
+ enum usb_device_removable removable;
|
|
+ struct usb2_lpm_parameters l1_params;
|
|
+ struct usb3_lpm_parameters u1_params;
|
|
+ struct usb3_lpm_parameters u2_params;
|
|
+ unsigned int lpm_disable_count;
|
|
+ u16 hub_delay;
|
|
+};
|
|
+
|
|
+struct usb_tt {
|
|
+ struct usb_device *hub;
|
|
+ int multi;
|
|
+ unsigned int think_time;
|
|
+ void *hcpriv;
|
|
+ spinlock_t lock;
|
|
+ struct list_head clear_list;
|
|
+ struct work_struct clear_work;
|
|
+};
|
|
+
|
|
+struct usb_dynids {
|
|
+ spinlock_t lock;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct usbdrv_wrap {
|
|
+ struct device_driver driver;
|
|
+ int for_devices;
|
|
+};
|
|
+
|
|
+struct usb_driver {
|
|
+ const char *name;
|
|
+ int (*probe)(struct usb_interface *, const struct usb_device_id *);
|
|
+ void (*disconnect)(struct usb_interface *);
|
|
+ int (*unlocked_ioctl)(struct usb_interface *, unsigned int, void *);
|
|
+ int (*suspend)(struct usb_interface *, pm_message_t);
|
|
+ int (*resume)(struct usb_interface *);
|
|
+ int (*reset_resume)(struct usb_interface *);
|
|
+ int (*pre_reset)(struct usb_interface *);
|
|
+ int (*post_reset)(struct usb_interface *);
|
|
+ const struct usb_device_id *id_table;
|
|
+ struct usb_dynids dynids;
|
|
+ struct usbdrv_wrap drvwrap;
|
|
+ unsigned int no_dynamic_id: 1;
|
|
+ unsigned int supports_autosuspend: 1;
|
|
+ unsigned int disable_hub_initiated_lpm: 1;
|
|
+ unsigned int soft_unbind: 1;
|
|
+};
|
|
+
|
|
+struct usb_device_driver {
|
|
+ const char *name;
|
|
+ int (*probe)(struct usb_device *);
|
|
+ void (*disconnect)(struct usb_device *);
|
|
+ int (*suspend)(struct usb_device *, pm_message_t);
|
|
+ int (*resume)(struct usb_device *, pm_message_t);
|
|
+ struct usbdrv_wrap drvwrap;
|
|
+ unsigned int supports_autosuspend: 1;
|
|
+};
|
|
+
|
|
+struct usb_iso_packet_descriptor {
|
|
+ unsigned int offset;
|
|
+ unsigned int length;
|
|
+ unsigned int actual_length;
|
|
+ int status;
|
|
+};
|
|
+
|
|
+struct usb_anchor {
|
|
+ struct list_head urb_list;
|
|
+ wait_queue_head_t wait;
|
|
+ spinlock_t lock;
|
|
+ atomic_t suspend_wakeups;
|
|
+ unsigned int poisoned: 1;
|
|
+};
|
|
+
|
|
+struct urb;
|
|
+
|
|
+typedef void (*usb_complete_t)(struct urb *);
|
|
+
|
|
+struct urb {
|
|
+ struct kref kref;
|
|
+ void *hcpriv;
|
|
+ atomic_t use_count;
|
|
+ atomic_t reject;
|
|
+ int unlinked;
|
|
+ struct list_head urb_list;
|
|
+ struct list_head anchor_list;
|
|
+ struct usb_anchor *anchor;
|
|
+ struct usb_device *dev;
|
|
+ struct usb_host_endpoint *ep;
|
|
+ unsigned int pipe;
|
|
+ unsigned int stream_id;
|
|
+ int status;
|
|
+ unsigned int transfer_flags;
|
|
+ void *transfer_buffer;
|
|
+ dma_addr_t transfer_dma;
|
|
+ struct scatterlist *sg;
|
|
+ int num_mapped_sgs;
|
|
+ int num_sgs;
|
|
+ u32 transfer_buffer_length;
|
|
+ u32 actual_length;
|
|
+ unsigned char *setup_packet;
|
|
+ dma_addr_t setup_dma;
|
|
+ int start_frame;
|
|
+ int number_of_packets;
|
|
+ int interval;
|
|
+ int error_count;
|
|
+ void *context;
|
|
+ usb_complete_t complete;
|
|
+ struct usb_iso_packet_descriptor iso_frame_desc[0];
|
|
+};
|
|
+
|
|
+struct giveback_urb_bh {
|
|
+ bool running;
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+ struct tasklet_struct bh;
|
|
+ struct usb_host_endpoint *completing_ep;
|
|
+};
|
|
+
|
|
+struct usb_phy_roothub;
|
|
+
|
|
+struct hc_driver;
|
|
+
|
|
+struct usb_phy;
|
|
+
|
|
+struct usb_hcd {
|
|
+ struct usb_bus self;
|
|
+ struct kref kref;
|
|
+ const char *product_desc;
|
|
+ int speed;
|
|
+ char irq_descr[24];
|
|
+ struct timer_list rh_timer;
|
|
+ struct urb *status_urb;
|
|
+ struct work_struct wakeup_work;
|
|
+ const struct hc_driver *driver;
|
|
+ struct usb_phy *usb_phy;
|
|
+ struct usb_phy_roothub *phy_roothub;
|
|
+ long unsigned int flags;
|
|
+ unsigned int rh_registered: 1;
|
|
+ unsigned int rh_pollable: 1;
|
|
+ unsigned int msix_enabled: 1;
|
|
+ unsigned int msi_enabled: 1;
|
|
+ unsigned int skip_phy_initialization: 1;
|
|
+ unsigned int uses_new_polling: 1;
|
|
+ unsigned int wireless: 1;
|
|
+ unsigned int has_tt: 1;
|
|
+ unsigned int amd_resume_bug: 1;
|
|
+ unsigned int can_do_streams: 1;
|
|
+ unsigned int tpl_support: 1;
|
|
+ unsigned int cant_recv_wakeups: 1;
|
|
+ unsigned int irq;
|
|
+ void *regs;
|
|
+ resource_size_t rsrc_start;
|
|
+ resource_size_t rsrc_len;
|
|
+ unsigned int power_budget;
|
|
+ struct giveback_urb_bh high_prio_bh;
|
|
+ struct giveback_urb_bh low_prio_bh;
|
|
+ struct mutex *address0_mutex;
|
|
+ struct mutex *bandwidth_mutex;
|
|
+ struct usb_hcd *shared_hcd;
|
|
+ struct usb_hcd *primary_hcd;
|
|
+ struct dma_pool___2 *pool[4];
|
|
+ int state;
|
|
+ long unsigned int hcd_priv[0];
|
|
+};
|
|
+
|
|
+struct hc_driver {
|
|
+ const char *description;
|
|
+ const char *product_desc;
|
|
+ size_t hcd_priv_size;
|
|
+ irqreturn_t (*irq)(struct usb_hcd *);
|
|
+ int flags;
|
|
+ int (*reset)(struct usb_hcd *);
|
|
+ int (*start)(struct usb_hcd *);
|
|
+ int (*pci_suspend)(struct usb_hcd *, bool);
|
|
+ int (*pci_resume)(struct usb_hcd *, bool);
|
|
+ void (*stop)(struct usb_hcd *);
|
|
+ void (*shutdown)(struct usb_hcd *);
|
|
+ int (*get_frame_number)(struct usb_hcd *);
|
|
+ int (*urb_enqueue)(struct usb_hcd *, struct urb *, gfp_t);
|
|
+ int (*urb_dequeue)(struct usb_hcd *, struct urb *, int);
|
|
+ int (*map_urb_for_dma)(struct usb_hcd *, struct urb *, gfp_t);
|
|
+ void (*unmap_urb_for_dma)(struct usb_hcd *, struct urb *);
|
|
+ void (*endpoint_disable)(struct usb_hcd *, struct usb_host_endpoint *);
|
|
+ void (*endpoint_reset)(struct usb_hcd *, struct usb_host_endpoint *);
|
|
+ int (*hub_status_data)(struct usb_hcd *, char *);
|
|
+ int (*hub_control)(struct usb_hcd *, u16, u16, u16, char *, u16);
|
|
+ int (*bus_suspend)(struct usb_hcd *);
|
|
+ int (*bus_resume)(struct usb_hcd *);
|
|
+ int (*start_port_reset)(struct usb_hcd *, unsigned int);
|
|
+ long unsigned int (*get_resuming_ports)(struct usb_hcd *);
|
|
+ void (*relinquish_port)(struct usb_hcd *, int);
|
|
+ int (*port_handed_over)(struct usb_hcd *, int);
|
|
+ void (*clear_tt_buffer_complete)(struct usb_hcd *, struct usb_host_endpoint *);
|
|
+ int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
|
|
+ void (*free_dev)(struct usb_hcd *, struct usb_device *);
|
|
+ int (*alloc_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, unsigned int, gfp_t);
|
|
+ int (*free_streams)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint **, unsigned int, gfp_t);
|
|
+ int (*add_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
|
|
+ int (*drop_endpoint)(struct usb_hcd *, struct usb_device *, struct usb_host_endpoint *);
|
|
+ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
|
|
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
|
|
+ int (*address_device)(struct usb_hcd *, struct usb_device *);
|
|
+ int (*enable_device)(struct usb_hcd *, struct usb_device *);
|
|
+ int (*update_hub_device)(struct usb_hcd *, struct usb_device *, struct usb_tt *, gfp_t);
|
|
+ int (*reset_device)(struct usb_hcd *, struct usb_device *);
|
|
+ int (*update_device)(struct usb_hcd *, struct usb_device *);
|
|
+ int (*set_usb2_hw_lpm)(struct usb_hcd *, struct usb_device *, int);
|
|
+ int (*enable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state);
|
|
+ int (*disable_usb3_lpm_timeout)(struct usb_hcd *, struct usb_device *, enum usb3_link_state);
|
|
+ int (*find_raw_port_number)(struct usb_hcd *, int);
|
|
+ int (*port_power)(struct usb_hcd *, int, bool);
|
|
+};
|
|
+
|
|
+enum usb_phy_type {
|
|
+ USB_PHY_TYPE_UNDEFINED = 0,
|
|
+ USB_PHY_TYPE_USB2 = 1,
|
|
+ USB_PHY_TYPE_USB3 = 2,
|
|
+};
|
|
+
|
|
+enum usb_phy_events {
|
|
+ USB_EVENT_NONE = 0,
|
|
+ USB_EVENT_VBUS = 1,
|
|
+ USB_EVENT_ID = 2,
|
|
+ USB_EVENT_CHARGER = 3,
|
|
+ USB_EVENT_ENUMERATED = 4,
|
|
+};
|
|
+
|
|
+struct extcon_dev;
|
|
+
|
|
+enum usb_charger_type {
|
|
+ UNKNOWN_TYPE = 0,
|
|
+ SDP_TYPE = 1,
|
|
+ DCP_TYPE = 2,
|
|
+ CDP_TYPE = 3,
|
|
+ ACA_TYPE = 4,
|
|
+};
|
|
+
|
|
+enum usb_charger_state {
|
|
+ USB_CHARGER_DEFAULT = 0,
|
|
+ USB_CHARGER_PRESENT = 1,
|
|
+ USB_CHARGER_ABSENT = 2,
|
|
+};
|
|
+
|
|
+struct usb_charger_current {
|
|
+ unsigned int sdp_min;
|
|
+ unsigned int sdp_max;
|
|
+ unsigned int dcp_min;
|
|
+ unsigned int dcp_max;
|
|
+ unsigned int cdp_min;
|
|
+ unsigned int cdp_max;
|
|
+ unsigned int aca_min;
|
|
+ unsigned int aca_max;
|
|
+};
|
|
+
|
|
+struct usb_otg;
|
|
+
|
|
+struct usb_phy_io_ops;
|
|
+
|
|
+struct usb_phy {
|
|
+ struct device *dev;
|
|
+ const char *label;
|
|
+ unsigned int flags;
|
|
+ enum usb_phy_type type;
|
|
+ enum usb_phy_events last_event;
|
|
+ struct usb_otg *otg;
|
|
+ struct device *io_dev;
|
|
+ struct usb_phy_io_ops *io_ops;
|
|
+ void *io_priv;
|
|
+ struct extcon_dev *edev;
|
|
+ struct extcon_dev *id_edev;
|
|
+ struct notifier_block vbus_nb;
|
|
+ struct notifier_block id_nb;
|
|
+ struct notifier_block type_nb;
|
|
+ enum usb_charger_type chg_type;
|
|
+ enum usb_charger_state chg_state;
|
|
+ struct usb_charger_current chg_cur;
|
|
+ struct work_struct chg_work;
|
|
+ struct atomic_notifier_head notifier;
|
|
+ u16 port_status;
|
|
+ u16 port_change;
|
|
+ struct list_head head;
|
|
+ int (*init)(struct usb_phy *);
|
|
+ void (*shutdown)(struct usb_phy *);
|
|
+ int (*set_vbus)(struct usb_phy *, int);
|
|
+ int (*set_power)(struct usb_phy *, unsigned int);
|
|
+ int (*set_suspend)(struct usb_phy *, int);
|
|
+ int (*set_wakeup)(struct usb_phy *, bool);
|
|
+ int (*notify_connect)(struct usb_phy *, enum usb_device_speed);
|
|
+ int (*notify_disconnect)(struct usb_phy *, enum usb_device_speed);
|
|
+ enum usb_charger_type (*charger_detect)(struct usb_phy *);
|
|
+};
|
|
+
|
|
+struct usb_mon_operations {
|
|
+ void (*urb_submit)(struct usb_bus *, struct urb *);
|
|
+ void (*urb_submit_error)(struct usb_bus *, struct urb *, int);
|
|
+ void (*urb_complete)(struct usb_bus *, struct urb *, int);
|
|
+};
|
|
+
|
|
+struct regulator;
|
|
+
|
|
+enum phy_mode {
|
|
+ PHY_MODE_INVALID = 0,
|
|
+ PHY_MODE_USB_HOST = 1,
|
|
+ PHY_MODE_USB_HOST_LS = 2,
|
|
+ PHY_MODE_USB_HOST_FS = 3,
|
|
+ PHY_MODE_USB_HOST_HS = 4,
|
|
+ PHY_MODE_USB_HOST_SS = 5,
|
|
+ PHY_MODE_USB_DEVICE = 6,
|
|
+ PHY_MODE_USB_DEVICE_LS = 7,
|
|
+ PHY_MODE_USB_DEVICE_FS = 8,
|
|
+ PHY_MODE_USB_DEVICE_HS = 9,
|
|
+ PHY_MODE_USB_DEVICE_SS = 10,
|
|
+ PHY_MODE_USB_OTG = 11,
|
|
+ PHY_MODE_SGMII = 12,
|
|
+ PHY_MODE_2500SGMII = 13,
|
|
+ PHY_MODE_10GKR = 14,
|
|
+ PHY_MODE_UFS_HS_A = 15,
|
|
+ PHY_MODE_UFS_HS_B = 16,
|
|
+};
|
|
+
|
|
+struct phy;
|
|
+
|
|
+struct phy_ops {
|
|
+ int (*init)(struct phy *);
|
|
+ int (*exit)(struct phy *);
|
|
+ int (*power_on)(struct phy *);
|
|
+ int (*power_off)(struct phy *);
|
|
+ int (*set_mode)(struct phy *, enum phy_mode);
|
|
+ int (*reset)(struct phy *);
|
|
+ int (*calibrate)(struct phy *);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct phy_attrs {
|
|
+ u32 bus_width;
|
|
+ enum phy_mode mode;
|
|
+};
|
|
+
|
|
+struct phy {
|
|
+ struct device dev;
|
|
+ int id;
|
|
+ const struct phy_ops *ops;
|
|
+ struct mutex mutex;
|
|
+ int init_count;
|
|
+ int power_count;
|
|
+ struct phy_attrs attrs;
|
|
+ struct regulator *pwr;
|
|
+};
|
|
+
|
|
+enum usb_otg_state {
|
|
+ OTG_STATE_UNDEFINED = 0,
|
|
+ OTG_STATE_B_IDLE = 1,
|
|
+ OTG_STATE_B_SRP_INIT = 2,
|
|
+ OTG_STATE_B_PERIPHERAL = 3,
|
|
+ OTG_STATE_B_WAIT_ACON = 4,
|
|
+ OTG_STATE_B_HOST = 5,
|
|
+ OTG_STATE_A_IDLE = 6,
|
|
+ OTG_STATE_A_WAIT_VRISE = 7,
|
|
+ OTG_STATE_A_WAIT_BCON = 8,
|
|
+ OTG_STATE_A_HOST = 9,
|
|
+ OTG_STATE_A_SUSPEND = 10,
|
|
+ OTG_STATE_A_PERIPHERAL = 11,
|
|
+ OTG_STATE_A_WAIT_VFALL = 12,
|
|
+ OTG_STATE_A_VBUS_ERR = 13,
|
|
+};
|
|
+
|
|
+struct usb_phy_io_ops {
|
|
+ int (*read)(struct usb_phy *, u32);
|
|
+ int (*write)(struct usb_phy *, u32, u32);
|
|
+};
|
|
+
|
|
+struct usb_gadget;
|
|
+
|
|
+struct usb_otg {
|
|
+ u8 default_a;
|
|
+ struct phy *phy;
|
|
+ struct usb_phy *usb_phy;
|
|
+ struct usb_bus *host;
|
|
+ struct usb_gadget *gadget;
|
|
+ enum usb_otg_state state;
|
|
+ int (*set_host)(struct usb_otg *, struct usb_bus *);
|
|
+ int (*set_peripheral)(struct usb_otg *, struct usb_gadget *);
|
|
+ int (*set_vbus)(struct usb_otg *, bool);
|
|
+ int (*start_srp)(struct usb_otg *);
|
|
+ int (*start_hnp)(struct usb_otg *);
|
|
+};
|
|
+
|
|
+struct find_interface_arg {
|
|
+ int minor;
|
|
+ struct device_driver *drv;
|
|
+};
|
|
+
|
|
+struct each_dev_arg {
|
|
+ void *data;
|
|
+ int (*fn)(struct usb_device *, void *);
|
|
+};
|
|
+
|
|
+struct usb_qualifier_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __le16 bcdUSB;
|
|
+ __u8 bDeviceClass;
|
|
+ __u8 bDeviceSubClass;
|
|
+ __u8 bDeviceProtocol;
|
|
+ __u8 bMaxPacketSize0;
|
|
+ __u8 bNumConfigurations;
|
|
+ __u8 bRESERVED;
|
|
+};
|
|
+
|
|
+struct usb_set_sel_req {
|
|
+ __u8 u1_sel;
|
|
+ __u8 u1_pel;
|
|
+ __le16 u2_sel;
|
|
+ __le16 u2_pel;
|
|
+};
|
|
+
|
|
+enum usb_port_connect_type {
|
|
+ USB_PORT_CONNECT_TYPE_UNKNOWN = 0,
|
|
+ USB_PORT_CONNECT_TYPE_HOT_PLUG = 1,
|
|
+ USB_PORT_CONNECT_TYPE_HARD_WIRED = 2,
|
|
+ USB_PORT_NOT_USED = 3,
|
|
+};
|
|
+
|
|
+struct usbdevfs_hub_portinfo {
|
|
+ char nports;
|
|
+ char port[127];
|
|
+};
|
|
+
|
|
+struct usb_port_status {
|
|
+ __le16 wPortStatus;
|
|
+ __le16 wPortChange;
|
|
+ __le32 dwExtPortStatus;
|
|
+};
|
|
+
|
|
+struct usb_hub_status {
|
|
+ __le16 wHubStatus;
|
|
+ __le16 wHubChange;
|
|
+};
|
|
+
|
|
+struct usb_hub_descriptor {
|
|
+ __u8 bDescLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bNbrPorts;
|
|
+ __le16 wHubCharacteristics;
|
|
+ __u8 bPwrOn2PwrGood;
|
|
+ __u8 bHubContrCurrent;
|
|
+ union {
|
|
+ struct {
|
|
+ __u8 DeviceRemovable[4];
|
|
+ __u8 PortPwrCtrlMask[4];
|
|
+ } hs;
|
|
+ struct {
|
|
+ __u8 bHubHdrDecLat;
|
|
+ __le16 wHubDelay;
|
|
+ __le16 DeviceRemovable;
|
|
+ } __attribute__((packed)) ss;
|
|
+ } u;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum hub_led_mode {
|
|
+ INDICATOR_AUTO = 0,
|
|
+ INDICATOR_CYCLE = 1,
|
|
+ INDICATOR_GREEN_BLINK = 2,
|
|
+ INDICATOR_GREEN_BLINK_OFF = 3,
|
|
+ INDICATOR_AMBER_BLINK = 4,
|
|
+ INDICATOR_AMBER_BLINK_OFF = 5,
|
|
+ INDICATOR_ALT_BLINK = 6,
|
|
+ INDICATOR_ALT_BLINK_OFF = 7,
|
|
+};
|
|
+
|
|
+struct usb_tt_clear {
|
|
+ struct list_head clear_list;
|
|
+ unsigned int tt;
|
|
+ u16 devinfo;
|
|
+ struct usb_hcd *hcd;
|
|
+ struct usb_host_endpoint *ep;
|
|
+};
|
|
+
|
|
+typedef u32 usb_port_location_t;
|
|
+
|
|
+struct usb_port;
|
|
+
|
|
+struct usb_hub {
|
|
+ struct device *intfdev;
|
|
+ struct usb_device *hdev;
|
|
+ struct kref kref;
|
|
+ struct urb *urb;
|
|
+ u8 (*buffer)[8];
|
|
+ union {
|
|
+ struct usb_hub_status hub;
|
|
+ struct usb_port_status port;
|
|
+ } *status;
|
|
+ struct mutex status_mutex;
|
|
+ int error;
|
|
+ int nerrors;
|
|
+ long unsigned int event_bits[1];
|
|
+ long unsigned int change_bits[1];
|
|
+ long unsigned int removed_bits[1];
|
|
+ long unsigned int wakeup_bits[1];
|
|
+ long unsigned int power_bits[1];
|
|
+ long unsigned int child_usage_bits[1];
|
|
+ long unsigned int warm_reset_bits[1];
|
|
+ struct usb_hub_descriptor *descriptor;
|
|
+ struct usb_tt tt;
|
|
+ unsigned int mA_per_port;
|
|
+ unsigned int wakeup_enabled_descendants;
|
|
+ unsigned int limited_power: 1;
|
|
+ unsigned int quiescing: 1;
|
|
+ unsigned int disconnected: 1;
|
|
+ unsigned int in_reset: 1;
|
|
+ unsigned int quirk_check_port_auto_suspend: 1;
|
|
+ unsigned int has_indicators: 1;
|
|
+ u8 indicator[31];
|
|
+ struct delayed_work leds;
|
|
+ struct delayed_work init_work;
|
|
+ struct work_struct events;
|
|
+ struct usb_port **ports;
|
|
+};
|
|
+
|
|
+struct usb_dev_state;
|
|
+
|
|
+struct usb_port {
|
|
+ struct usb_device *child;
|
|
+ struct device dev;
|
|
+ struct usb_dev_state *port_owner;
|
|
+ struct usb_port *peer;
|
|
+ struct dev_pm_qos_request *req;
|
|
+ enum usb_port_connect_type connect_type;
|
|
+ usb_port_location_t location;
|
|
+ struct mutex status_lock;
|
|
+ u32 over_current_count;
|
|
+ u8 portnum;
|
|
+ u32 quirks;
|
|
+ unsigned int is_superspeed: 1;
|
|
+ unsigned int usb3_lpm_u1_permit: 1;
|
|
+ unsigned int usb3_lpm_u2_permit: 1;
|
|
+};
|
|
+
|
|
+enum hub_activation_type {
|
|
+ HUB_INIT = 0,
|
|
+ HUB_INIT2 = 1,
|
|
+ HUB_INIT3 = 2,
|
|
+ HUB_POST_RESET = 3,
|
|
+ HUB_RESUME = 4,
|
|
+ HUB_RESET_RESUME = 5,
|
|
+};
|
|
+
|
|
+enum hub_quiescing_type {
|
|
+ HUB_DISCONNECT = 0,
|
|
+ HUB_PRE_RESET = 1,
|
|
+ HUB_SUSPEND = 2,
|
|
+};
|
|
+
|
|
+struct usb_ctrlrequest {
|
|
+ __u8 bRequestType;
|
|
+ __u8 bRequest;
|
|
+ __le16 wValue;
|
|
+ __le16 wIndex;
|
|
+ __le16 wLength;
|
|
+};
|
|
+
|
|
+enum usb_led_event {
|
|
+ USB_LED_EVENT_HOST = 0,
|
|
+ USB_LED_EVENT_GADGET = 1,
|
|
+};
|
|
+
|
|
+struct usb_sg_request {
|
|
+ int status;
|
|
+ size_t bytes;
|
|
+ spinlock_t lock;
|
|
+ struct usb_device *dev;
|
|
+ int pipe;
|
|
+ int entries;
|
|
+ struct urb **urbs;
|
|
+ int count;
|
|
+ struct completion complete;
|
|
+};
|
|
+
|
|
+struct usb_cdc_header_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __le16 bcdCDC;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_cdc_call_mgmt_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __u8 bmCapabilities;
|
|
+ __u8 bDataInterface;
|
|
+};
|
|
+
|
|
+struct usb_cdc_acm_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __u8 bmCapabilities;
|
|
+};
|
|
+
|
|
+struct usb_cdc_union_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __u8 bMasterInterface0;
|
|
+ __u8 bSlaveInterface0;
|
|
+};
|
|
+
|
|
+struct usb_cdc_country_functional_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __u8 iCountryCodeRelDate;
|
|
+ __le16 wCountyCode0;
|
|
+};
|
|
+
|
|
+struct usb_cdc_network_terminal_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __u8 bEntityId;
|
|
+ __u8 iName;
|
|
+ __u8 bChannelIndex;
|
|
+ __u8 bPhysicalInterface;
|
|
+};
|
|
+
|
|
+struct usb_cdc_ether_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __u8 iMACAddress;
|
|
+ __le32 bmEthernetStatistics;
|
|
+ __le16 wMaxSegmentSize;
|
|
+ __le16 wNumberMCFilters;
|
|
+ __u8 bNumberPowerFilters;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_cdc_dmm_desc {
|
|
+ __u8 bFunctionLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubtype;
|
|
+ __u16 bcdVersion;
|
|
+ __le16 wMaxCommand;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_cdc_mdlm_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __le16 bcdVersion;
|
|
+ __u8 bGUID[16];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_cdc_mdlm_detail_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __u8 bGuidDescriptorType;
|
|
+ __u8 bDetailData[0];
|
|
+};
|
|
+
|
|
+struct usb_cdc_obex_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __le16 bcdVersion;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_cdc_ncm_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __le16 bcdNcmVersion;
|
|
+ __u8 bmNetworkCapabilities;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_cdc_mbim_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __le16 bcdMBIMVersion;
|
|
+ __le16 wMaxControlMessage;
|
|
+ __u8 bNumberFilters;
|
|
+ __u8 bMaxFilterSize;
|
|
+ __le16 wMaxSegmentSize;
|
|
+ __u8 bmNetworkCapabilities;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_cdc_mbim_extended_desc {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDescriptorSubType;
|
|
+ __le16 bcdMBIMExtendedVersion;
|
|
+ __u8 bMaxOutstandingCommandMessages;
|
|
+ __le16 wMTU;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usb_cdc_parsed_header {
|
|
+ struct usb_cdc_union_desc *usb_cdc_union_desc;
|
|
+ struct usb_cdc_header_desc *usb_cdc_header_desc;
|
|
+ struct usb_cdc_call_mgmt_descriptor *usb_cdc_call_mgmt_descriptor;
|
|
+ struct usb_cdc_acm_descriptor *usb_cdc_acm_descriptor;
|
|
+ struct usb_cdc_country_functional_desc *usb_cdc_country_functional_desc;
|
|
+ struct usb_cdc_network_terminal_desc *usb_cdc_network_terminal_desc;
|
|
+ struct usb_cdc_ether_desc *usb_cdc_ether_desc;
|
|
+ struct usb_cdc_dmm_desc *usb_cdc_dmm_desc;
|
|
+ struct usb_cdc_mdlm_desc *usb_cdc_mdlm_desc;
|
|
+ struct usb_cdc_mdlm_detail_desc *usb_cdc_mdlm_detail_desc;
|
|
+ struct usb_cdc_obex_desc *usb_cdc_obex_desc;
|
|
+ struct usb_cdc_ncm_desc *usb_cdc_ncm_desc;
|
|
+ struct usb_cdc_mbim_desc *usb_cdc_mbim_desc;
|
|
+ struct usb_cdc_mbim_extended_desc *usb_cdc_mbim_extended_desc;
|
|
+ bool phonet_magic_present;
|
|
+};
|
|
+
|
|
+struct api_context {
|
|
+ struct completion done;
|
|
+ int status;
|
|
+};
|
|
+
|
|
+struct set_config_request {
|
|
+ struct usb_device *udev;
|
|
+ int config;
|
|
+ struct work_struct work;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct usb_dynid {
|
|
+ struct list_head node;
|
|
+ struct usb_device_id id;
|
|
+};
|
|
+
|
|
+struct usb_dev_cap_header {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDevCapabilityType;
|
|
+};
|
|
+
|
|
+struct usb_class_driver {
|
|
+ char *name;
|
|
+ char * (*devnode)(struct device *, umode_t *);
|
|
+ const struct file_operations *fops;
|
|
+ int minor_base;
|
|
+};
|
|
+
|
|
+struct usb_class {
|
|
+ struct kref kref;
|
|
+ struct class *class;
|
|
+};
|
|
+
|
|
+struct ep_device {
|
|
+ struct usb_endpoint_descriptor *desc;
|
|
+ struct usb_device *udev;
|
|
+ struct device dev;
|
|
+};
|
|
+
|
|
+struct usbdevfs_ctrltransfer {
|
|
+ __u8 bRequestType;
|
|
+ __u8 bRequest;
|
|
+ __u16 wValue;
|
|
+ __u16 wIndex;
|
|
+ __u16 wLength;
|
|
+ __u32 timeout;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct usbdevfs_bulktransfer {
|
|
+ unsigned int ep;
|
|
+ unsigned int len;
|
|
+ unsigned int timeout;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct usbdevfs_setinterface {
|
|
+ unsigned int interface;
|
|
+ unsigned int altsetting;
|
|
+};
|
|
+
|
|
+struct usbdevfs_disconnectsignal {
|
|
+ unsigned int signr;
|
|
+ void *context;
|
|
+};
|
|
+
|
|
+struct usbdevfs_getdriver {
|
|
+ unsigned int interface;
|
|
+ char driver[256];
|
|
+};
|
|
+
|
|
+struct usbdevfs_connectinfo {
|
|
+ unsigned int devnum;
|
|
+ unsigned char slow;
|
|
+};
|
|
+
|
|
+struct usbdevfs_iso_packet_desc {
|
|
+ unsigned int length;
|
|
+ unsigned int actual_length;
|
|
+ unsigned int status;
|
|
+};
|
|
+
|
|
+struct usbdevfs_urb {
|
|
+ unsigned char type;
|
|
+ unsigned char endpoint;
|
|
+ int status;
|
|
+ unsigned int flags;
|
|
+ void *buffer;
|
|
+ int buffer_length;
|
|
+ int actual_length;
|
|
+ int start_frame;
|
|
+ union {
|
|
+ int number_of_packets;
|
|
+ unsigned int stream_id;
|
|
+ };
|
|
+ int error_count;
|
|
+ unsigned int signr;
|
|
+ void *usercontext;
|
|
+ struct usbdevfs_iso_packet_desc iso_frame_desc[0];
|
|
+};
|
|
+
|
|
+struct usbdevfs_ioctl {
|
|
+ int ifno;
|
|
+ int ioctl_code;
|
|
+ void *data;
|
|
+};
|
|
+
|
|
+struct usbdevfs_disconnect_claim {
|
|
+ unsigned int interface;
|
|
+ unsigned int flags;
|
|
+ char driver[256];
|
|
+};
|
|
+
|
|
+struct usbdevfs_streams {
|
|
+ unsigned int num_streams;
|
|
+ unsigned int num_eps;
|
|
+ unsigned char eps[0];
|
|
+};
|
|
+
|
|
+struct usbdevfs_ctrltransfer32 {
|
|
+ u8 bRequestType;
|
|
+ u8 bRequest;
|
|
+ u16 wValue;
|
|
+ u16 wIndex;
|
|
+ u16 wLength;
|
|
+ u32 timeout;
|
|
+ compat_caddr_t data;
|
|
+};
|
|
+
|
|
+struct usbdevfs_bulktransfer32 {
|
|
+ compat_uint_t ep;
|
|
+ compat_uint_t len;
|
|
+ compat_uint_t timeout;
|
|
+ compat_caddr_t data;
|
|
+};
|
|
+
|
|
+struct usbdevfs_disconnectsignal32 {
|
|
+ compat_int_t signr;
|
|
+ compat_caddr_t context;
|
|
+};
|
|
+
|
|
+struct usbdevfs_urb32 {
|
|
+ unsigned char type;
|
|
+ unsigned char endpoint;
|
|
+ compat_int_t status;
|
|
+ compat_uint_t flags;
|
|
+ compat_caddr_t buffer;
|
|
+ compat_int_t buffer_length;
|
|
+ compat_int_t actual_length;
|
|
+ compat_int_t start_frame;
|
|
+ compat_int_t number_of_packets;
|
|
+ compat_int_t error_count;
|
|
+ compat_uint_t signr;
|
|
+ compat_caddr_t usercontext;
|
|
+ struct usbdevfs_iso_packet_desc iso_frame_desc[0];
|
|
+};
|
|
+
|
|
+struct usbdevfs_ioctl32 {
|
|
+ s32 ifno;
|
|
+ s32 ioctl_code;
|
|
+ compat_caddr_t data;
|
|
+};
|
|
+
|
|
+struct usb_dev_state___2 {
|
|
+ struct list_head list;
|
|
+ struct usb_device *dev;
|
|
+ struct file *file;
|
|
+ spinlock_t lock;
|
|
+ struct list_head async_pending;
|
|
+ struct list_head async_completed;
|
|
+ struct list_head memory_list;
|
|
+ wait_queue_head_t wait;
|
|
+ unsigned int discsignr;
|
|
+ struct pid *disc_pid;
|
|
+ const struct cred *cred;
|
|
+ void *disccontext;
|
|
+ long unsigned int ifclaimed;
|
|
+ u32 disabled_bulk_eps;
|
|
+ bool privileges_dropped;
|
|
+ long unsigned int interface_allowed_mask;
|
|
+};
|
|
+
|
|
+struct usb_memory {
|
|
+ struct list_head memlist;
|
|
+ int vma_use_count;
|
|
+ int urb_use_count;
|
|
+ u32 size;
|
|
+ void *mem;
|
|
+ dma_addr_t dma_handle;
|
|
+ long unsigned int vm_start;
|
|
+ struct usb_dev_state___2 *ps;
|
|
+};
|
|
+
|
|
+struct async {
|
|
+ struct list_head asynclist;
|
|
+ struct usb_dev_state___2 *ps;
|
|
+ struct pid *pid;
|
|
+ const struct cred *cred;
|
|
+ unsigned int signr;
|
|
+ unsigned int ifnum;
|
|
+ void *userbuffer;
|
|
+ void *userurb;
|
|
+ struct urb *urb;
|
|
+ struct usb_memory *usbm;
|
|
+ unsigned int mem_usage;
|
|
+ int status;
|
|
+ u8 bulk_addr;
|
|
+ u8 bulk_status;
|
|
+};
|
|
+
|
|
+enum snoop_when {
|
|
+ SUBMIT = 0,
|
|
+ COMPLETE = 1,
|
|
+};
|
|
+
|
|
+struct quirk_entry {
|
|
+ u16 vid;
|
|
+ u16 pid;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct device_connect_event {
|
|
+ atomic_t count;
|
|
+ wait_queue_head_t wait;
|
|
+};
|
|
+
|
|
+struct class_info {
|
|
+ int class;
|
|
+ char *class_name;
|
|
+};
|
|
+
|
|
+struct usb_phy_roothub___2 {
|
|
+ struct phy *phy;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+typedef void (*companion_fn)(struct pci_dev *, struct usb_hcd *, struct pci_dev *, struct usb_hcd *);
|
|
+
|
|
+struct mon_bus {
|
|
+ struct list_head bus_link;
|
|
+ spinlock_t lock;
|
|
+ struct usb_bus *u_bus;
|
|
+ int text_inited;
|
|
+ int bin_inited;
|
|
+ struct dentry *dent_s;
|
|
+ struct dentry *dent_t;
|
|
+ struct dentry *dent_u;
|
|
+ struct device *classdev;
|
|
+ int nreaders;
|
|
+ struct list_head r_list;
|
|
+ struct kref ref;
|
|
+ unsigned int cnt_events;
|
|
+ unsigned int cnt_text_lost;
|
|
+};
|
|
+
|
|
+struct mon_reader {
|
|
+ struct list_head r_link;
|
|
+ struct mon_bus *m_bus;
|
|
+ void *r_data;
|
|
+ void (*rnf_submit)(void *, struct urb *);
|
|
+ void (*rnf_error)(void *, struct urb *, int);
|
|
+ void (*rnf_complete)(void *, struct urb *, int);
|
|
+};
|
|
+
|
|
+struct snap {
|
|
+ int slen;
|
|
+ char str[80];
|
|
+};
|
|
+
|
|
+struct mon_iso_desc {
|
|
+ int status;
|
|
+ unsigned int offset;
|
|
+ unsigned int length;
|
|
+};
|
|
+
|
|
+struct mon_event_text {
|
|
+ struct list_head e_link;
|
|
+ int type;
|
|
+ long unsigned int id;
|
|
+ unsigned int tstamp;
|
|
+ int busnum;
|
|
+ char devnum;
|
|
+ char epnum;
|
|
+ char is_in;
|
|
+ char xfertype;
|
|
+ int length;
|
|
+ int status;
|
|
+ int interval;
|
|
+ int start_frame;
|
|
+ int error_count;
|
|
+ char setup_flag;
|
|
+ char data_flag;
|
|
+ int numdesc;
|
|
+ struct mon_iso_desc isodesc[5];
|
|
+ unsigned char setup[8];
|
|
+ unsigned char data[32];
|
|
+};
|
|
+
|
|
+struct mon_reader_text {
|
|
+ struct kmem_cache *e_slab;
|
|
+ int nevents;
|
|
+ struct list_head e_list;
|
|
+ struct mon_reader r;
|
|
+ wait_queue_head_t wait;
|
|
+ int printf_size;
|
|
+ size_t printf_offset;
|
|
+ size_t printf_togo;
|
|
+ char *printf_buf;
|
|
+ struct mutex printf_lock;
|
|
+ char slab_name[30];
|
|
+};
|
|
+
|
|
+struct mon_text_ptr {
|
|
+ int cnt;
|
|
+ int limit;
|
|
+ char *pbuf;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NAMESZ = 10,
|
|
+};
|
|
+
|
|
+struct iso_rec {
|
|
+ int error_count;
|
|
+ int numdesc;
|
|
+};
|
|
+
|
|
+struct mon_bin_hdr {
|
|
+ u64 id;
|
|
+ unsigned char type;
|
|
+ unsigned char xfer_type;
|
|
+ unsigned char epnum;
|
|
+ unsigned char devnum;
|
|
+ short unsigned int busnum;
|
|
+ char flag_setup;
|
|
+ char flag_data;
|
|
+ s64 ts_sec;
|
|
+ s32 ts_usec;
|
|
+ int status;
|
|
+ unsigned int len_urb;
|
|
+ unsigned int len_cap;
|
|
+ union {
|
|
+ unsigned char setup[8];
|
|
+ struct iso_rec iso;
|
|
+ } s;
|
|
+ int interval;
|
|
+ int start_frame;
|
|
+ unsigned int xfer_flags;
|
|
+ unsigned int ndesc;
|
|
+};
|
|
+
|
|
+struct mon_bin_isodesc {
|
|
+ int iso_status;
|
|
+ unsigned int iso_off;
|
|
+ unsigned int iso_len;
|
|
+ u32 _pad;
|
|
+};
|
|
+
|
|
+struct mon_bin_stats {
|
|
+ u32 queued;
|
|
+ u32 dropped;
|
|
+};
|
|
+
|
|
+struct mon_bin_get {
|
|
+ struct mon_bin_hdr *hdr;
|
|
+ void *data;
|
|
+ size_t alloc;
|
|
+};
|
|
+
|
|
+struct mon_bin_mfetch {
|
|
+ u32 *offvec;
|
|
+ u32 nfetch;
|
|
+ u32 nflush;
|
|
+};
|
|
+
|
|
+struct mon_bin_get32 {
|
|
+ u32 hdr32;
|
|
+ u32 data32;
|
|
+ u32 alloc32;
|
|
+};
|
|
+
|
|
+struct mon_bin_mfetch32 {
|
|
+ u32 offvec32;
|
|
+ u32 nfetch32;
|
|
+ u32 nflush32;
|
|
+};
|
|
+
|
|
+struct mon_pgmap {
|
|
+ struct page *pg;
|
|
+ unsigned char *ptr;
|
|
+};
|
|
+
|
|
+struct mon_reader_bin {
|
|
+ spinlock_t b_lock;
|
|
+ unsigned int b_size;
|
|
+ unsigned int b_cnt;
|
|
+ unsigned int b_in;
|
|
+ unsigned int b_out;
|
|
+ unsigned int b_read;
|
|
+ struct mon_pgmap *b_vec;
|
|
+ wait_queue_head_t b_wait;
|
|
+ struct mutex fetch_lock;
|
|
+ int mmap_active;
|
|
+ struct mon_reader r;
|
|
+ unsigned int cnt_lost;
|
|
+};
|
|
+
|
|
+enum amd_chipset_gen {
|
|
+ NOT_AMD_CHIPSET = 0,
|
|
+ AMD_CHIPSET_SB600 = 1,
|
|
+ AMD_CHIPSET_SB700 = 2,
|
|
+ AMD_CHIPSET_SB800 = 3,
|
|
+ AMD_CHIPSET_HUDSON2 = 4,
|
|
+ AMD_CHIPSET_BOLTON = 5,
|
|
+ AMD_CHIPSET_YANGTZE = 6,
|
|
+ AMD_CHIPSET_TAISHAN = 7,
|
|
+ AMD_CHIPSET_UNKNOWN = 8,
|
|
+};
|
|
+
|
|
+struct amd_chipset_type {
|
|
+ enum amd_chipset_gen gen;
|
|
+ u8 rev;
|
|
+};
|
|
+
|
|
+struct amd_chipset_info {
|
|
+ struct pci_dev *nb_dev;
|
|
+ struct pci_dev *smbus_dev;
|
|
+ int nb_type;
|
|
+ struct amd_chipset_type sb_type;
|
|
+ int isoc_reqs;
|
|
+ int probe_count;
|
|
+ int probe_result;
|
|
+};
|
|
+
|
|
+struct ehci_stats {
|
|
+ long unsigned int normal;
|
|
+ long unsigned int error;
|
|
+ long unsigned int iaa;
|
|
+ long unsigned int lost_iaa;
|
|
+ long unsigned int complete;
|
|
+ long unsigned int unlink;
|
|
+};
|
|
+
|
|
+struct ehci_per_sched {
|
|
+ struct usb_device *udev;
|
|
+ struct usb_host_endpoint *ep;
|
|
+ struct list_head ps_list;
|
|
+ u16 tt_usecs;
|
|
+ u16 cs_mask;
|
|
+ u16 period;
|
|
+ u16 phase;
|
|
+ u8 bw_phase;
|
|
+ u8 phase_uf;
|
|
+ u8 usecs;
|
|
+ u8 c_usecs;
|
|
+ u8 bw_uperiod;
|
|
+ u8 bw_period;
|
|
+};
|
|
+
|
|
+enum ehci_rh_state {
|
|
+ EHCI_RH_HALTED = 0,
|
|
+ EHCI_RH_SUSPENDED = 1,
|
|
+ EHCI_RH_RUNNING = 2,
|
|
+ EHCI_RH_STOPPING = 3,
|
|
+};
|
|
+
|
|
+enum ehci_hrtimer_event {
|
|
+ EHCI_HRTIMER_POLL_ASS = 0,
|
|
+ EHCI_HRTIMER_POLL_PSS = 1,
|
|
+ EHCI_HRTIMER_POLL_DEAD = 2,
|
|
+ EHCI_HRTIMER_UNLINK_INTR = 3,
|
|
+ EHCI_HRTIMER_FREE_ITDS = 4,
|
|
+ EHCI_HRTIMER_ACTIVE_UNLINK = 5,
|
|
+ EHCI_HRTIMER_START_UNLINK_INTR = 6,
|
|
+ EHCI_HRTIMER_ASYNC_UNLINKS = 7,
|
|
+ EHCI_HRTIMER_IAA_WATCHDOG = 8,
|
|
+ EHCI_HRTIMER_DISABLE_PERIODIC = 9,
|
|
+ EHCI_HRTIMER_DISABLE_ASYNC = 10,
|
|
+ EHCI_HRTIMER_IO_WATCHDOG = 11,
|
|
+ EHCI_HRTIMER_NUM_EVENTS = 12,
|
|
+};
|
|
+
|
|
+struct ehci_caps;
|
|
+
|
|
+struct ehci_regs;
|
|
+
|
|
+struct ehci_dbg_port;
|
|
+
|
|
+struct ehci_qh;
|
|
+
|
|
+union ehci_shadow;
|
|
+
|
|
+struct ehci_itd;
|
|
+
|
|
+struct ehci_sitd;
|
|
+
|
|
+struct ehci_hcd {
|
|
+ enum ehci_hrtimer_event next_hrtimer_event;
|
|
+ unsigned int enabled_hrtimer_events;
|
|
+ ktime_t hr_timeouts[12];
|
|
+ struct hrtimer hrtimer;
|
|
+ int PSS_poll_count;
|
|
+ int ASS_poll_count;
|
|
+ int died_poll_count;
|
|
+ struct ehci_caps *caps;
|
|
+ struct ehci_regs *regs;
|
|
+ struct ehci_dbg_port *debug;
|
|
+ __u32 hcs_params;
|
|
+ spinlock_t lock;
|
|
+ enum ehci_rh_state rh_state;
|
|
+ bool scanning: 1;
|
|
+ bool need_rescan: 1;
|
|
+ bool intr_unlinking: 1;
|
|
+ bool iaa_in_progress: 1;
|
|
+ bool async_unlinking: 1;
|
|
+ bool shutdown: 1;
|
|
+ struct ehci_qh *qh_scan_next;
|
|
+ struct ehci_qh *async;
|
|
+ struct ehci_qh *dummy;
|
|
+ struct list_head async_unlink;
|
|
+ struct list_head async_idle;
|
|
+ unsigned int async_unlink_cycle;
|
|
+ unsigned int async_count;
|
|
+ __le32 old_current;
|
|
+ __le32 old_token;
|
|
+ unsigned int periodic_size;
|
|
+ __le32 *periodic;
|
|
+ dma_addr_t periodic_dma;
|
|
+ struct list_head intr_qh_list;
|
|
+ unsigned int i_thresh;
|
|
+ union ehci_shadow *pshadow;
|
|
+ struct list_head intr_unlink_wait;
|
|
+ struct list_head intr_unlink;
|
|
+ unsigned int intr_unlink_wait_cycle;
|
|
+ unsigned int intr_unlink_cycle;
|
|
+ unsigned int now_frame;
|
|
+ unsigned int last_iso_frame;
|
|
+ unsigned int intr_count;
|
|
+ unsigned int isoc_count;
|
|
+ unsigned int periodic_count;
|
|
+ unsigned int uframe_periodic_max;
|
|
+ struct list_head cached_itd_list;
|
|
+ struct ehci_itd *last_itd_to_free;
|
|
+ struct list_head cached_sitd_list;
|
|
+ struct ehci_sitd *last_sitd_to_free;
|
|
+ long unsigned int reset_done[15];
|
|
+ long unsigned int bus_suspended;
|
|
+ long unsigned int companion_ports;
|
|
+ long unsigned int owned_ports;
|
|
+ long unsigned int port_c_suspend;
|
|
+ long unsigned int suspended_ports;
|
|
+ long unsigned int resuming_ports;
|
|
+ struct dma_pool___2 *qh_pool;
|
|
+ struct dma_pool___2 *qtd_pool;
|
|
+ struct dma_pool___2 *itd_pool;
|
|
+ struct dma_pool___2 *sitd_pool;
|
|
+ unsigned int random_frame;
|
|
+ long unsigned int next_statechange;
|
|
+ ktime_t last_periodic_enable;
|
|
+ u32 command;
|
|
+ unsigned int no_selective_suspend: 1;
|
|
+ unsigned int has_fsl_port_bug: 1;
|
|
+ unsigned int has_fsl_hs_errata: 1;
|
|
+ unsigned int has_fsl_susp_errata: 1;
|
|
+ unsigned int big_endian_mmio: 1;
|
|
+ unsigned int big_endian_desc: 1;
|
|
+ unsigned int big_endian_capbase: 1;
|
|
+ unsigned int has_amcc_usb23: 1;
|
|
+ unsigned int need_io_watchdog: 1;
|
|
+ unsigned int amd_pll_fix: 1;
|
|
+ unsigned int use_dummy_qh: 1;
|
|
+ unsigned int has_synopsys_hc_bug: 1;
|
|
+ unsigned int frame_index_bug: 1;
|
|
+ unsigned int need_oc_pp_cycle: 1;
|
|
+ unsigned int imx28_write_fix: 1;
|
|
+ __le32 *ohci_hcctrl_reg;
|
|
+ unsigned int has_hostpc: 1;
|
|
+ unsigned int has_tdi_phy_lpm: 1;
|
|
+ unsigned int has_ppcd: 1;
|
|
+ u8 sbrn;
|
|
+ struct ehci_stats stats;
|
|
+ struct dentry *debug_dir;
|
|
+ u8 bandwidth[64];
|
|
+ u8 tt_budget[64];
|
|
+ struct list_head tt_list;
|
|
+ long unsigned int priv[0];
|
|
+};
|
|
+
|
|
+struct ehci_caps {
|
|
+ u32 hc_capbase;
|
|
+ u32 hcs_params;
|
|
+ u32 hcc_params;
|
|
+ u8 portroute[8];
|
|
+};
|
|
+
|
|
+struct ehci_regs {
|
|
+ u32 command;
|
|
+ u32 status;
|
|
+ u32 intr_enable;
|
|
+ u32 frame_index;
|
|
+ u32 segment;
|
|
+ u32 frame_list;
|
|
+ u32 async_next;
|
|
+ u32 reserved1[2];
|
|
+ u32 txfill_tuning;
|
|
+ u32 reserved2[6];
|
|
+ u32 configured_flag;
|
|
+ u32 port_status[0];
|
|
+ u32 reserved3[9];
|
|
+ u32 usbmode;
|
|
+ u32 reserved4[6];
|
|
+ u32 hostpc[0];
|
|
+ u32 reserved5[17];
|
|
+ u32 usbmode_ex;
|
|
+};
|
|
+
|
|
+struct ehci_dbg_port {
|
|
+ u32 control;
|
|
+ u32 pids;
|
|
+ u32 data03;
|
|
+ u32 data47;
|
|
+ u32 address;
|
|
+};
|
|
+
|
|
+struct ehci_fstn;
|
|
+
|
|
+union ehci_shadow {
|
|
+ struct ehci_qh *qh;
|
|
+ struct ehci_itd *itd;
|
|
+ struct ehci_sitd *sitd;
|
|
+ struct ehci_fstn *fstn;
|
|
+ __le32 *hw_next;
|
|
+ void *ptr;
|
|
+};
|
|
+
|
|
+struct ehci_qh_hw;
|
|
+
|
|
+struct ehci_qtd;
|
|
+
|
|
+struct ehci_qh {
|
|
+ struct ehci_qh_hw *hw;
|
|
+ dma_addr_t qh_dma;
|
|
+ union ehci_shadow qh_next;
|
|
+ struct list_head qtd_list;
|
|
+ struct list_head intr_node;
|
|
+ struct ehci_qtd *dummy;
|
|
+ struct list_head unlink_node;
|
|
+ struct ehci_per_sched ps;
|
|
+ unsigned int unlink_cycle;
|
|
+ u8 qh_state;
|
|
+ u8 xacterrs;
|
|
+ u8 unlink_reason;
|
|
+ u8 gap_uf;
|
|
+ unsigned int is_out: 1;
|
|
+ unsigned int clearing_tt: 1;
|
|
+ unsigned int dequeue_during_giveback: 1;
|
|
+ unsigned int should_be_inactive: 1;
|
|
+};
|
|
+
|
|
+struct ehci_iso_stream;
|
|
+
|
|
+struct ehci_itd {
|
|
+ __le32 hw_next;
|
|
+ __le32 hw_transaction[8];
|
|
+ __le32 hw_bufp[7];
|
|
+ __le32 hw_bufp_hi[7];
|
|
+ dma_addr_t itd_dma;
|
|
+ union ehci_shadow itd_next;
|
|
+ struct urb *urb;
|
|
+ struct ehci_iso_stream *stream;
|
|
+ struct list_head itd_list;
|
|
+ unsigned int frame;
|
|
+ unsigned int pg;
|
|
+ unsigned int index[8];
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct ehci_sitd {
|
|
+ __le32 hw_next;
|
|
+ __le32 hw_fullspeed_ep;
|
|
+ __le32 hw_uframe;
|
|
+ __le32 hw_results;
|
|
+ __le32 hw_buf[2];
|
|
+ __le32 hw_backpointer;
|
|
+ __le32 hw_buf_hi[2];
|
|
+ dma_addr_t sitd_dma;
|
|
+ union ehci_shadow sitd_next;
|
|
+ struct urb *urb;
|
|
+ struct ehci_iso_stream *stream;
|
|
+ struct list_head sitd_list;
|
|
+ unsigned int frame;
|
|
+ unsigned int index;
|
|
+};
|
|
+
|
|
+struct ehci_qtd {
|
|
+ __le32 hw_next;
|
|
+ __le32 hw_alt_next;
|
|
+ __le32 hw_token;
|
|
+ __le32 hw_buf[5];
|
|
+ __le32 hw_buf_hi[5];
|
|
+ dma_addr_t qtd_dma;
|
|
+ struct list_head qtd_list;
|
|
+ struct urb *urb;
|
|
+ size_t length;
|
|
+};
|
|
+
|
|
+struct ehci_fstn {
|
|
+ __le32 hw_next;
|
|
+ __le32 hw_prev;
|
|
+ dma_addr_t fstn_dma;
|
|
+ union ehci_shadow fstn_next;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct ehci_qh_hw {
|
|
+ __le32 hw_next;
|
|
+ __le32 hw_info1;
|
|
+ __le32 hw_info2;
|
|
+ __le32 hw_current;
|
|
+ __le32 hw_qtd_next;
|
|
+ __le32 hw_alt_next;
|
|
+ __le32 hw_token;
|
|
+ __le32 hw_buf[5];
|
|
+ __le32 hw_buf_hi[5];
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct ehci_iso_packet {
|
|
+ u64 bufp;
|
|
+ __le32 transaction;
|
|
+ u8 cross;
|
|
+ u32 buf1;
|
|
+};
|
|
+
|
|
+struct ehci_iso_sched {
|
|
+ struct list_head td_list;
|
|
+ unsigned int span;
|
|
+ unsigned int first_packet;
|
|
+ struct ehci_iso_packet packet[0];
|
|
+};
|
|
+
|
|
+struct ehci_iso_stream {
|
|
+ struct ehci_qh_hw *hw;
|
|
+ u8 bEndpointAddress;
|
|
+ u8 highspeed;
|
|
+ struct list_head td_list;
|
|
+ struct list_head free_list;
|
|
+ struct ehci_per_sched ps;
|
|
+ unsigned int next_uframe;
|
|
+ __le32 splits;
|
|
+ u16 uperiod;
|
|
+ u16 maxp;
|
|
+ unsigned int bandwidth;
|
|
+ __le32 buf0;
|
|
+ __le32 buf1;
|
|
+ __le32 buf2;
|
|
+ __le32 address;
|
|
+};
|
|
+
|
|
+struct ehci_tt {
|
|
+ u16 bandwidth[8];
|
|
+ struct list_head tt_list;
|
|
+ struct list_head ps_list;
|
|
+ struct usb_tt *usb_tt;
|
|
+ int tt_port;
|
|
+};
|
|
+
|
|
+struct ehci_driver_overrides {
|
|
+ size_t extra_priv_size;
|
|
+ int (*reset)(struct usb_hcd *);
|
|
+ int (*port_power)(struct usb_hcd *, int, bool);
|
|
+};
|
|
+
|
|
+struct debug_buffer {
|
|
+ ssize_t (*fill_func)(struct debug_buffer *);
|
|
+ struct usb_bus *bus;
|
|
+ struct mutex mutex;
|
|
+ size_t count;
|
|
+ char *output_buf;
|
|
+ size_t alloc_size;
|
|
+};
|
|
+
|
|
+typedef __u32 __hc32;
|
|
+
|
|
+typedef __u16 __hc16;
|
|
+
|
|
+struct td;
|
|
+
|
|
+struct ed {
|
|
+ __hc32 hwINFO;
|
|
+ __hc32 hwTailP;
|
|
+ __hc32 hwHeadP;
|
|
+ __hc32 hwNextED;
|
|
+ dma_addr_t dma;
|
|
+ struct td *dummy;
|
|
+ struct ed *ed_next;
|
|
+ struct ed *ed_prev;
|
|
+ struct list_head td_list;
|
|
+ struct list_head in_use_list;
|
|
+ u8 state;
|
|
+ u8 type;
|
|
+ u8 branch;
|
|
+ u16 interval;
|
|
+ u16 load;
|
|
+ u16 last_iso;
|
|
+ u16 tick;
|
|
+ unsigned int takeback_wdh_cnt;
|
|
+ struct td *pending_td;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct td {
|
|
+ __hc32 hwINFO;
|
|
+ __hc32 hwCBP;
|
|
+ __hc32 hwNextTD;
|
|
+ __hc32 hwBE;
|
|
+ __hc16 hwPSW[2];
|
|
+ __u8 index;
|
|
+ struct ed *ed;
|
|
+ struct td *td_hash;
|
|
+ struct td *next_dl_td;
|
|
+ struct urb *urb;
|
|
+ dma_addr_t td_dma;
|
|
+ dma_addr_t data_dma;
|
|
+ struct list_head td_list;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct ohci_hcca {
|
|
+ __hc32 int_table[32];
|
|
+ __hc32 frame_no;
|
|
+ __hc32 done_head;
|
|
+ u8 reserved_for_hc[116];
|
|
+ u8 what[4];
|
|
+};
|
|
+
|
|
+struct ohci_roothub_regs {
|
|
+ __hc32 a;
|
|
+ __hc32 b;
|
|
+ __hc32 status;
|
|
+ __hc32 portstatus[15];
|
|
+};
|
|
+
|
|
+struct ohci_regs {
|
|
+ __hc32 revision;
|
|
+ __hc32 control;
|
|
+ __hc32 cmdstatus;
|
|
+ __hc32 intrstatus;
|
|
+ __hc32 intrenable;
|
|
+ __hc32 intrdisable;
|
|
+ __hc32 hcca;
|
|
+ __hc32 ed_periodcurrent;
|
|
+ __hc32 ed_controlhead;
|
|
+ __hc32 ed_controlcurrent;
|
|
+ __hc32 ed_bulkhead;
|
|
+ __hc32 ed_bulkcurrent;
|
|
+ __hc32 donehead;
|
|
+ __hc32 fminterval;
|
|
+ __hc32 fmremaining;
|
|
+ __hc32 fmnumber;
|
|
+ __hc32 periodicstart;
|
|
+ __hc32 lsthresh;
|
|
+ struct ohci_roothub_regs roothub;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct urb_priv {
|
|
+ struct ed *ed;
|
|
+ u16 length;
|
|
+ u16 td_cnt;
|
|
+ struct list_head pending;
|
|
+ struct td *td[0];
|
|
+};
|
|
+
|
|
+typedef struct urb_priv urb_priv_t;
|
|
+
|
|
+enum ohci_rh_state {
|
|
+ OHCI_RH_HALTED = 0,
|
|
+ OHCI_RH_SUSPENDED = 1,
|
|
+ OHCI_RH_RUNNING = 2,
|
|
+};
|
|
+
|
|
+struct ohci_hcd {
|
|
+ spinlock_t lock;
|
|
+ struct ohci_regs *regs;
|
|
+ struct ohci_hcca *hcca;
|
|
+ dma_addr_t hcca_dma;
|
|
+ struct ed *ed_rm_list;
|
|
+ struct ed *ed_bulktail;
|
|
+ struct ed *ed_controltail;
|
|
+ struct ed *periodic[32];
|
|
+ void (*start_hnp)(struct ohci_hcd *);
|
|
+ struct dma_pool___2 *td_cache;
|
|
+ struct dma_pool___2 *ed_cache;
|
|
+ struct td *td_hash[64];
|
|
+ struct td *dl_start;
|
|
+ struct td *dl_end;
|
|
+ struct list_head pending;
|
|
+ struct list_head eds_in_use;
|
|
+ enum ohci_rh_state rh_state;
|
|
+ int num_ports;
|
|
+ int load[32];
|
|
+ u32 hc_control;
|
|
+ long unsigned int next_statechange;
|
|
+ u32 fminterval;
|
|
+ unsigned int autostop: 1;
|
|
+ unsigned int working: 1;
|
|
+ unsigned int restart_work: 1;
|
|
+ long unsigned int flags;
|
|
+ unsigned int prev_frame_no;
|
|
+ unsigned int wdh_cnt;
|
|
+ unsigned int prev_wdh_cnt;
|
|
+ u32 prev_donehead;
|
|
+ struct timer_list io_watchdog;
|
|
+ struct work_struct nec_work;
|
|
+ struct dentry *debug_dir;
|
|
+ long unsigned int priv[0];
|
|
+};
|
|
+
|
|
+struct ohci_driver_overrides {
|
|
+ const char *product_desc;
|
|
+ size_t extra_priv_size;
|
|
+ int (*reset)(struct usb_hcd *);
|
|
+};
|
|
+
|
|
+struct debug_buffer___2 {
|
|
+ ssize_t (*fill_func)(struct debug_buffer___2 *);
|
|
+ struct ohci_hcd *ohci;
|
|
+ struct mutex mutex;
|
|
+ size_t count;
|
|
+ char *page;
|
|
+};
|
|
+
|
|
+struct uhci_td;
|
|
+
|
|
+struct uhci_qh {
|
|
+ __le32 link;
|
|
+ __le32 element;
|
|
+ dma_addr_t dma_handle;
|
|
+ struct list_head node;
|
|
+ struct usb_host_endpoint *hep;
|
|
+ struct usb_device *udev;
|
|
+ struct list_head queue;
|
|
+ struct uhci_td *dummy_td;
|
|
+ struct uhci_td *post_td;
|
|
+ struct usb_iso_packet_descriptor *iso_packet_desc;
|
|
+ long unsigned int advance_jiffies;
|
|
+ unsigned int unlink_frame;
|
|
+ unsigned int period;
|
|
+ short int phase;
|
|
+ short int load;
|
|
+ unsigned int iso_frame;
|
|
+ int state;
|
|
+ int type;
|
|
+ int skel;
|
|
+ unsigned int initial_toggle: 1;
|
|
+ unsigned int needs_fixup: 1;
|
|
+ unsigned int is_stopped: 1;
|
|
+ unsigned int wait_expired: 1;
|
|
+ unsigned int bandwidth_reserved: 1;
|
|
+};
|
|
+
|
|
+struct uhci_td {
|
|
+ __le32 link;
|
|
+ __le32 status;
|
|
+ __le32 token;
|
|
+ __le32 buffer;
|
|
+ dma_addr_t dma_handle;
|
|
+ struct list_head list;
|
|
+ int frame;
|
|
+ struct list_head fl_list;
|
|
+};
|
|
+
|
|
+enum uhci_rh_state {
|
|
+ UHCI_RH_RESET = 0,
|
|
+ UHCI_RH_SUSPENDED = 1,
|
|
+ UHCI_RH_AUTO_STOPPED = 2,
|
|
+ UHCI_RH_RESUMING = 3,
|
|
+ UHCI_RH_SUSPENDING = 4,
|
|
+ UHCI_RH_RUNNING = 5,
|
|
+ UHCI_RH_RUNNING_NODEVS = 6,
|
|
+};
|
|
+
|
|
+struct uhci_hcd {
|
|
+ struct dentry *dentry;
|
|
+ long unsigned int io_addr;
|
|
+ void *regs;
|
|
+ struct dma_pool___2 *qh_pool;
|
|
+ struct dma_pool___2 *td_pool;
|
|
+ struct uhci_td *term_td;
|
|
+ struct uhci_qh *skelqh[11];
|
|
+ struct uhci_qh *next_qh;
|
|
+ spinlock_t lock;
|
|
+ dma_addr_t frame_dma_handle;
|
|
+ __le32 *frame;
|
|
+ void **frame_cpu;
|
|
+ enum uhci_rh_state rh_state;
|
|
+ long unsigned int auto_stop_time;
|
|
+ unsigned int frame_number;
|
|
+ unsigned int is_stopped;
|
|
+ unsigned int last_iso_frame;
|
|
+ unsigned int cur_iso_frame;
|
|
+ unsigned int scan_in_progress: 1;
|
|
+ unsigned int need_rescan: 1;
|
|
+ unsigned int dead: 1;
|
|
+ unsigned int RD_enable: 1;
|
|
+ unsigned int is_initialized: 1;
|
|
+ unsigned int fsbr_is_on: 1;
|
|
+ unsigned int fsbr_is_wanted: 1;
|
|
+ unsigned int fsbr_expiring: 1;
|
|
+ struct timer_list fsbr_timer;
|
|
+ unsigned int oc_low: 1;
|
|
+ unsigned int wait_for_hp: 1;
|
|
+ unsigned int big_endian_mmio: 1;
|
|
+ unsigned int big_endian_desc: 1;
|
|
+ unsigned int is_aspeed: 1;
|
|
+ long unsigned int port_c_suspend;
|
|
+ long unsigned int resuming_ports;
|
|
+ long unsigned int ports_timeout;
|
|
+ struct list_head idle_qh_list;
|
|
+ int rh_numports;
|
|
+ wait_queue_head_t waitqh;
|
|
+ int num_waiting;
|
|
+ int total_load;
|
|
+ short int load[32];
|
|
+ struct clk *clk;
|
|
+ void (*reset_hc)(struct uhci_hcd *);
|
|
+ int (*check_and_reset_hc)(struct uhci_hcd *);
|
|
+ void (*configure_hc)(struct uhci_hcd *);
|
|
+ int (*resume_detect_interrupts_are_broken)(struct uhci_hcd *);
|
|
+ int (*global_suspend_mode_is_broken)(struct uhci_hcd *);
|
|
+};
|
|
+
|
|
+struct urb_priv___2 {
|
|
+ struct list_head node;
|
|
+ struct urb *urb;
|
|
+ struct uhci_qh *qh;
|
|
+ struct list_head td_list;
|
|
+ unsigned int fsbr: 1;
|
|
+};
|
|
+
|
|
+struct uhci_debug {
|
|
+ int size;
|
|
+ char *data;
|
|
+};
|
|
+
|
|
+struct xhci_cap_regs {
|
|
+ __le32 hc_capbase;
|
|
+ __le32 hcs_params1;
|
|
+ __le32 hcs_params2;
|
|
+ __le32 hcs_params3;
|
|
+ __le32 hcc_params;
|
|
+ __le32 db_off;
|
|
+ __le32 run_regs_off;
|
|
+ __le32 hcc_params2;
|
|
+};
|
|
+
|
|
+struct xhci_op_regs {
|
|
+ __le32 command;
|
|
+ __le32 status;
|
|
+ __le32 page_size;
|
|
+ __le32 reserved1;
|
|
+ __le32 reserved2;
|
|
+ __le32 dev_notification;
|
|
+ __le64 cmd_ring;
|
|
+ __le32 reserved3[4];
|
|
+ __le64 dcbaa_ptr;
|
|
+ __le32 config_reg;
|
|
+ __le32 reserved4[241];
|
|
+ __le32 port_status_base;
|
|
+ __le32 port_power_base;
|
|
+ __le32 port_link_base;
|
|
+ __le32 reserved5;
|
|
+ __le32 reserved6[1016];
|
|
+};
|
|
+
|
|
+struct xhci_intr_reg {
|
|
+ __le32 irq_pending;
|
|
+ __le32 irq_control;
|
|
+ __le32 erst_size;
|
|
+ __le32 rsvd;
|
|
+ __le64 erst_base;
|
|
+ __le64 erst_dequeue;
|
|
+};
|
|
+
|
|
+struct xhci_run_regs {
|
|
+ __le32 microframe_index;
|
|
+ __le32 rsvd[7];
|
|
+ struct xhci_intr_reg ir_set[128];
|
|
+};
|
|
+
|
|
+struct xhci_doorbell_array {
|
|
+ __le32 doorbell[256];
|
|
+};
|
|
+
|
|
+struct xhci_container_ctx {
|
|
+ unsigned int type;
|
|
+ int size;
|
|
+ u8 *bytes;
|
|
+ dma_addr_t dma;
|
|
+};
|
|
+
|
|
+struct xhci_slot_ctx {
|
|
+ __le32 dev_info;
|
|
+ __le32 dev_info2;
|
|
+ __le32 tt_info;
|
|
+ __le32 dev_state;
|
|
+ __le32 reserved[4];
|
|
+};
|
|
+
|
|
+struct xhci_ep_ctx {
|
|
+ __le32 ep_info;
|
|
+ __le32 ep_info2;
|
|
+ __le64 deq;
|
|
+ __le32 tx_info;
|
|
+ __le32 reserved[3];
|
|
+};
|
|
+
|
|
+struct xhci_input_control_ctx {
|
|
+ __le32 drop_flags;
|
|
+ __le32 add_flags;
|
|
+ __le32 rsvd2[6];
|
|
+};
|
|
+
|
|
+union xhci_trb;
|
|
+
|
|
+struct xhci_command {
|
|
+ struct xhci_container_ctx *in_ctx;
|
|
+ u32 status;
|
|
+ int slot_id;
|
|
+ struct completion *completion;
|
|
+ union xhci_trb *command_trb;
|
|
+ struct list_head cmd_list;
|
|
+};
|
|
+
|
|
+struct xhci_link_trb {
|
|
+ __le64 segment_ptr;
|
|
+ __le32 intr_target;
|
|
+ __le32 control;
|
|
+};
|
|
+
|
|
+struct xhci_transfer_event {
|
|
+ __le64 buffer;
|
|
+ __le32 transfer_len;
|
|
+ __le32 flags;
|
|
+};
|
|
+
|
|
+struct xhci_event_cmd {
|
|
+ __le64 cmd_trb;
|
|
+ __le32 status;
|
|
+ __le32 flags;
|
|
+};
|
|
+
|
|
+struct xhci_generic_trb {
|
|
+ __le32 field[4];
|
|
+};
|
|
+
|
|
+union xhci_trb {
|
|
+ struct xhci_link_trb link;
|
|
+ struct xhci_transfer_event trans_event;
|
|
+ struct xhci_event_cmd event_cmd;
|
|
+ struct xhci_generic_trb generic;
|
|
+};
|
|
+
|
|
+struct xhci_stream_ctx {
|
|
+ __le64 stream_ring;
|
|
+ __le32 reserved[2];
|
|
+};
|
|
+
|
|
+struct xhci_ring;
|
|
+
|
|
+struct xhci_stream_info {
|
|
+ struct xhci_ring **stream_rings;
|
|
+ unsigned int num_streams;
|
|
+ struct xhci_stream_ctx *stream_ctx_array;
|
|
+ unsigned int num_stream_ctxs;
|
|
+ dma_addr_t ctx_array_dma;
|
|
+ struct radix_tree_root trb_address_map;
|
|
+ struct xhci_command *free_streams_command;
|
|
+};
|
|
+
|
|
+enum xhci_ring_type {
|
|
+ TYPE_CTRL = 0,
|
|
+ TYPE_ISOC = 1,
|
|
+ TYPE_BULK = 2,
|
|
+ TYPE_INTR = 3,
|
|
+ TYPE_STREAM = 4,
|
|
+ TYPE_COMMAND = 5,
|
|
+ TYPE_EVENT = 6,
|
|
+};
|
|
+
|
|
+struct xhci_segment;
|
|
+
|
|
+struct xhci_ring {
|
|
+ struct xhci_segment *first_seg;
|
|
+ struct xhci_segment *last_seg;
|
|
+ union xhci_trb *enqueue;
|
|
+ struct xhci_segment *enq_seg;
|
|
+ union xhci_trb *dequeue;
|
|
+ struct xhci_segment *deq_seg;
|
|
+ struct list_head td_list;
|
|
+ u32 cycle_state;
|
|
+ unsigned int stream_id;
|
|
+ unsigned int num_segs;
|
|
+ unsigned int num_trbs_free;
|
|
+ unsigned int num_trbs_free_temp;
|
|
+ unsigned int bounce_buf_len;
|
|
+ enum xhci_ring_type type;
|
|
+ bool last_td_was_short;
|
|
+ struct radix_tree_root *trb_address_map;
|
|
+};
|
|
+
|
|
+struct xhci_bw_info {
|
|
+ unsigned int ep_interval;
|
|
+ unsigned int mult;
|
|
+ unsigned int num_packets;
|
|
+ unsigned int max_packet_size;
|
|
+ unsigned int max_esit_payload;
|
|
+ unsigned int type;
|
|
+};
|
|
+
|
|
+struct xhci_hcd;
|
|
+
|
|
+struct xhci_virt_ep {
|
|
+ struct xhci_ring *ring;
|
|
+ struct xhci_stream_info *stream_info;
|
|
+ struct xhci_ring *new_ring;
|
|
+ unsigned int ep_state;
|
|
+ struct list_head cancelled_td_list;
|
|
+ struct timer_list stop_cmd_timer;
|
|
+ struct xhci_hcd *xhci;
|
|
+ struct xhci_segment *queued_deq_seg;
|
|
+ union xhci_trb *queued_deq_ptr;
|
|
+ bool skip;
|
|
+ struct xhci_bw_info bw_info;
|
|
+ struct list_head bw_endpoint_list;
|
|
+ int next_frame_id;
|
|
+ bool use_extended_tbc;
|
|
+};
|
|
+
|
|
+struct xhci_erst_entry;
|
|
+
|
|
+struct xhci_erst {
|
|
+ struct xhci_erst_entry *entries;
|
|
+ unsigned int num_entries;
|
|
+ dma_addr_t erst_dma_addr;
|
|
+ unsigned int erst_size;
|
|
+};
|
|
+
|
|
+struct s3_save {
|
|
+ u32 command;
|
|
+ u32 dev_nt;
|
|
+ u64 dcbaa_ptr;
|
|
+ u32 config_reg;
|
|
+ u32 irq_pending;
|
|
+ u32 irq_control;
|
|
+ u32 erst_size;
|
|
+ u64 erst_base;
|
|
+ u64 erst_dequeue;
|
|
+};
|
|
+
|
|
+struct xhci_bus_state {
|
|
+ long unsigned int bus_suspended;
|
|
+ long unsigned int next_statechange;
|
|
+ u32 port_c_suspend;
|
|
+ u32 suspended_ports;
|
|
+ u32 port_remote_wakeup;
|
|
+ long unsigned int resume_done[31];
|
|
+ long unsigned int resuming_ports;
|
|
+ long unsigned int rexit_ports;
|
|
+ struct completion rexit_done[31];
|
|
+};
|
|
+
|
|
+struct xhci_port;
|
|
+
|
|
+struct xhci_hub {
|
|
+ struct xhci_port **ports;
|
|
+ unsigned int num_ports;
|
|
+ struct usb_hcd *hcd;
|
|
+ u8 maj_rev;
|
|
+ u8 min_rev;
|
|
+};
|
|
+
|
|
+struct xhci_device_context_array;
|
|
+
|
|
+struct xhci_scratchpad;
|
|
+
|
|
+struct xhci_virt_device;
|
|
+
|
|
+struct xhci_root_port_bw_info;
|
|
+
|
|
+struct xhci_port_cap;
|
|
+
|
|
+struct xhci_hcd {
|
|
+ struct usb_hcd *main_hcd;
|
|
+ struct usb_hcd *shared_hcd;
|
|
+ struct xhci_cap_regs *cap_regs;
|
|
+ struct xhci_op_regs *op_regs;
|
|
+ struct xhci_run_regs *run_regs;
|
|
+ struct xhci_doorbell_array *dba;
|
|
+ struct xhci_intr_reg *ir_set;
|
|
+ __u32 hcs_params1;
|
|
+ __u32 hcs_params2;
|
|
+ __u32 hcs_params3;
|
|
+ __u32 hcc_params;
|
|
+ __u32 hcc_params2;
|
|
+ spinlock_t lock;
|
|
+ u8 sbrn;
|
|
+ u16 hci_version;
|
|
+ u8 max_slots;
|
|
+ u8 max_interrupters;
|
|
+ u8 max_ports;
|
|
+ u8 isoc_threshold;
|
|
+ u32 imod_interval;
|
|
+ int event_ring_max;
|
|
+ int page_size;
|
|
+ int page_shift;
|
|
+ int msix_count;
|
|
+ struct clk *clk;
|
|
+ struct clk *reg_clk;
|
|
+ struct xhci_device_context_array *dcbaa;
|
|
+ struct xhci_ring *cmd_ring;
|
|
+ unsigned int cmd_ring_state;
|
|
+ struct list_head cmd_list;
|
|
+ unsigned int cmd_ring_reserved_trbs;
|
|
+ struct delayed_work cmd_timer;
|
|
+ struct completion cmd_ring_stop_completion;
|
|
+ struct xhci_command *current_cmd;
|
|
+ struct xhci_ring *event_ring;
|
|
+ struct xhci_erst erst;
|
|
+ struct xhci_scratchpad *scratchpad;
|
|
+ struct list_head lpm_failed_devs;
|
|
+ struct mutex mutex;
|
|
+ struct xhci_command *lpm_command;
|
|
+ struct xhci_virt_device *devs[256];
|
|
+ struct xhci_root_port_bw_info *rh_bw;
|
|
+ struct dma_pool___2 *device_pool;
|
|
+ struct dma_pool___2 *segment_pool;
|
|
+ struct dma_pool___2 *small_streams_pool;
|
|
+ struct dma_pool___2 *medium_streams_pool;
|
|
+ unsigned int xhc_state;
|
|
+ u32 command;
|
|
+ struct s3_save s3;
|
|
+ long long unsigned int quirks;
|
|
+ unsigned int num_active_eps;
|
|
+ unsigned int limit_active_eps;
|
|
+ struct xhci_bus_state bus_state[2];
|
|
+ struct xhci_port *hw_ports;
|
|
+ struct xhci_hub usb2_rhub;
|
|
+ struct xhci_hub usb3_rhub;
|
|
+ unsigned int sw_lpm_support: 1;
|
|
+ unsigned int hw_lpm_support: 1;
|
|
+ unsigned int broken_suspend: 1;
|
|
+ u32 *ext_caps;
|
|
+ unsigned int num_ext_caps;
|
|
+ struct xhci_port_cap *port_caps;
|
|
+ unsigned int num_port_caps;
|
|
+ struct timer_list comp_mode_recovery_timer;
|
|
+ u32 port_status_u0;
|
|
+ u16 test_mode;
|
|
+ struct dentry *debugfs_root;
|
|
+ struct dentry *debugfs_slots;
|
|
+ struct list_head regset_list;
|
|
+ void *dbc;
|
|
+ long unsigned int priv[0];
|
|
+};
|
|
+
|
|
+struct xhci_segment {
|
|
+ union xhci_trb *trbs;
|
|
+ struct xhci_segment *next;
|
|
+ dma_addr_t dma;
|
|
+ dma_addr_t bounce_dma;
|
|
+ void *bounce_buf;
|
|
+ unsigned int bounce_offs;
|
|
+ unsigned int bounce_len;
|
|
+};
|
|
+
|
|
+enum xhci_overhead_type {
|
|
+ LS_OVERHEAD_TYPE = 0,
|
|
+ FS_OVERHEAD_TYPE = 1,
|
|
+ HS_OVERHEAD_TYPE = 2,
|
|
+};
|
|
+
|
|
+struct xhci_interval_bw {
|
|
+ unsigned int num_packets;
|
|
+ struct list_head endpoints;
|
|
+ unsigned int overhead[3];
|
|
+};
|
|
+
|
|
+struct xhci_interval_bw_table {
|
|
+ unsigned int interval0_esit_payload;
|
|
+ struct xhci_interval_bw interval_bw[16];
|
|
+ unsigned int bw_used;
|
|
+ unsigned int ss_bw_in;
|
|
+ unsigned int ss_bw_out;
|
|
+};
|
|
+
|
|
+struct xhci_tt_bw_info;
|
|
+
|
|
+struct xhci_virt_device {
|
|
+ struct usb_device *udev;
|
|
+ struct xhci_container_ctx *out_ctx;
|
|
+ struct xhci_container_ctx *in_ctx;
|
|
+ struct xhci_virt_ep eps[31];
|
|
+ u8 fake_port;
|
|
+ u8 real_port;
|
|
+ struct xhci_interval_bw_table *bw_table;
|
|
+ struct xhci_tt_bw_info *tt_info;
|
|
+ long unsigned int flags;
|
|
+ u16 current_mel;
|
|
+ void *debugfs_private;
|
|
+};
|
|
+
|
|
+struct xhci_tt_bw_info {
|
|
+ struct list_head tt_list;
|
|
+ int slot_id;
|
|
+ int ttport;
|
|
+ struct xhci_interval_bw_table bw_table;
|
|
+ int active_eps;
|
|
+};
|
|
+
|
|
+struct xhci_root_port_bw_info {
|
|
+ struct list_head tts;
|
|
+ unsigned int num_active_tts;
|
|
+ struct xhci_interval_bw_table bw_table;
|
|
+};
|
|
+
|
|
+struct xhci_device_context_array {
|
|
+ __le64 dev_context_ptrs[256];
|
|
+ dma_addr_t dma;
|
|
+};
|
|
+
|
|
+enum xhci_setup_dev {
|
|
+ SETUP_CONTEXT_ONLY = 0,
|
|
+ SETUP_CONTEXT_ADDRESS = 1,
|
|
+};
|
|
+
|
|
+struct xhci_td {
|
|
+ struct list_head td_list;
|
|
+ struct list_head cancelled_td_list;
|
|
+ struct urb *urb;
|
|
+ struct xhci_segment *start_seg;
|
|
+ union xhci_trb *first_trb;
|
|
+ union xhci_trb *last_trb;
|
|
+ struct xhci_segment *bounce_seg;
|
|
+ bool urb_length_set;
|
|
+};
|
|
+
|
|
+struct xhci_dequeue_state {
|
|
+ struct xhci_segment *new_deq_seg;
|
|
+ union xhci_trb *new_deq_ptr;
|
|
+ int new_cycle_state;
|
|
+ unsigned int stream_id;
|
|
+};
|
|
+
|
|
+struct xhci_erst_entry {
|
|
+ __le64 seg_addr;
|
|
+ __le32 seg_size;
|
|
+ __le32 rsvd;
|
|
+};
|
|
+
|
|
+struct xhci_scratchpad {
|
|
+ u64 *sp_array;
|
|
+ dma_addr_t sp_dma;
|
|
+ void **sp_buffers;
|
|
+};
|
|
+
|
|
+struct urb_priv___3 {
|
|
+ int num_tds;
|
|
+ int num_tds_done;
|
|
+ struct xhci_td td[0];
|
|
+};
|
|
+
|
|
+struct xhci_port_cap {
|
|
+ u32 *psi;
|
|
+ u8 psi_count;
|
|
+ u8 psi_uid_count;
|
|
+ u8 maj_rev;
|
|
+ u8 min_rev;
|
|
+};
|
|
+
|
|
+struct xhci_port {
|
|
+ __le32 *addr;
|
|
+ int hw_portnum;
|
|
+ int hcd_portnum;
|
|
+ struct xhci_hub *rhub;
|
|
+ struct xhci_port_cap *port_cap;
|
|
+};
|
|
+
|
|
+struct xhci_driver_overrides {
|
|
+ size_t extra_priv_size;
|
|
+ int (*reset)(struct usb_hcd *);
|
|
+ int (*start)(struct usb_hcd *);
|
|
+};
|
|
+
|
|
+typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
|
|
+
|
|
+enum xhci_ep_reset_type {
|
|
+ EP_HARD_RESET = 0,
|
|
+ EP_SOFT_RESET = 1,
|
|
+};
|
|
+
|
|
+struct kfifo {
|
|
+ union {
|
|
+ struct __kfifo kfifo;
|
|
+ unsigned char *type;
|
|
+ const unsigned char *const_type;
|
|
+ char (*rectype)[0];
|
|
+ void *ptr;
|
|
+ const void *ptr_const;
|
|
+ };
|
|
+ unsigned char buf[0];
|
|
+};
|
|
+
|
|
+struct dbc_regs {
|
|
+ __le32 capability;
|
|
+ __le32 doorbell;
|
|
+ __le32 ersts;
|
|
+ __le32 __reserved_0;
|
|
+ __le64 erstba;
|
|
+ __le64 erdp;
|
|
+ __le32 control;
|
|
+ __le32 status;
|
|
+ __le32 portsc;
|
|
+ __le32 __reserved_1;
|
|
+ __le64 dccp;
|
|
+ __le32 devinfo1;
|
|
+ __le32 devinfo2;
|
|
+};
|
|
+
|
|
+struct dbc_str_descs {
|
|
+ char string0[64];
|
|
+ char manufacturer[64];
|
|
+ char product[64];
|
|
+ char serial[64];
|
|
+};
|
|
+
|
|
+enum dbc_state {
|
|
+ DS_DISABLED = 0,
|
|
+ DS_INITIALIZED = 1,
|
|
+ DS_ENABLED = 2,
|
|
+ DS_CONNECTED = 3,
|
|
+ DS_CONFIGURED = 4,
|
|
+ DS_STALLED = 5,
|
|
+};
|
|
+
|
|
+struct dbc_ep;
|
|
+
|
|
+struct dbc_request {
|
|
+ void *buf;
|
|
+ unsigned int length;
|
|
+ dma_addr_t dma;
|
|
+ void (*complete)(struct xhci_hcd *, struct dbc_request *);
|
|
+ struct list_head list_pool;
|
|
+ int status;
|
|
+ unsigned int actual;
|
|
+ struct dbc_ep *dep;
|
|
+ struct list_head list_pending;
|
|
+ dma_addr_t trb_dma;
|
|
+ union xhci_trb *trb;
|
|
+ unsigned int direction: 1;
|
|
+};
|
|
+
|
|
+struct xhci_dbc;
|
|
+
|
|
+struct dbc_ep {
|
|
+ struct xhci_dbc *dbc;
|
|
+ struct list_head list_pending;
|
|
+ struct xhci_ring *ring;
|
|
+ unsigned int direction: 1;
|
|
+};
|
|
+
|
|
+struct dbc_port {
|
|
+ struct tty_port port;
|
|
+ spinlock_t port_lock;
|
|
+ struct list_head read_pool;
|
|
+ struct list_head read_queue;
|
|
+ unsigned int n_read;
|
|
+ struct tasklet_struct push;
|
|
+ struct list_head write_pool;
|
|
+ struct kfifo write_fifo;
|
|
+ bool registered;
|
|
+ struct dbc_ep *in;
|
|
+ struct dbc_ep *out;
|
|
+};
|
|
+
|
|
+struct xhci_dbc {
|
|
+ spinlock_t lock;
|
|
+ struct xhci_hcd *xhci;
|
|
+ struct dbc_regs *regs;
|
|
+ struct xhci_ring *ring_evt;
|
|
+ struct xhci_ring *ring_in;
|
|
+ struct xhci_ring *ring_out;
|
|
+ struct xhci_erst erst;
|
|
+ struct xhci_container_ctx *ctx;
|
|
+ struct dbc_str_descs *string;
|
|
+ dma_addr_t string_dma;
|
|
+ size_t string_size;
|
|
+ enum dbc_state state;
|
|
+ struct delayed_work event_work;
|
|
+ unsigned int resume_required: 1;
|
|
+ struct dbc_ep eps[2];
|
|
+ struct dbc_port port;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_msg {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_msg;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_ctx {
|
|
+ struct trace_entry ent;
|
|
+ int ctx_64;
|
|
+ unsigned int ctx_type;
|
|
+ dma_addr_t ctx_dma;
|
|
+ u8 *ctx_va;
|
|
+ unsigned int ctx_ep_num;
|
|
+ int slot_id;
|
|
+ u32 __data_loc_ctx_data;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_trb {
|
|
+ struct trace_entry ent;
|
|
+ u32 type;
|
|
+ u32 field0;
|
|
+ u32 field1;
|
|
+ u32 field2;
|
|
+ u32 field3;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_free_virt_dev {
|
|
+ struct trace_entry ent;
|
|
+ void *vdev;
|
|
+ long long unsigned int out_ctx;
|
|
+ long long unsigned int in_ctx;
|
|
+ u8 fake_port;
|
|
+ u8 real_port;
|
|
+ u16 current_mel;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_virt_dev {
|
|
+ struct trace_entry ent;
|
|
+ void *vdev;
|
|
+ long long unsigned int out_ctx;
|
|
+ long long unsigned int in_ctx;
|
|
+ int devnum;
|
|
+ int state;
|
|
+ int speed;
|
|
+ u8 portnum;
|
|
+ u8 level;
|
|
+ int slot_id;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_urb {
|
|
+ struct trace_entry ent;
|
|
+ void *urb;
|
|
+ unsigned int pipe;
|
|
+ unsigned int stream;
|
|
+ int status;
|
|
+ unsigned int flags;
|
|
+ int num_mapped_sgs;
|
|
+ int num_sgs;
|
|
+ int length;
|
|
+ int actual;
|
|
+ int epnum;
|
|
+ int dir_in;
|
|
+ int type;
|
|
+ int slot_id;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_ep_ctx {
|
|
+ struct trace_entry ent;
|
|
+ u32 info;
|
|
+ u32 info2;
|
|
+ u64 deq;
|
|
+ u32 tx_info;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_slot_ctx {
|
|
+ struct trace_entry ent;
|
|
+ u32 info;
|
|
+ u32 info2;
|
|
+ u32 tt_info;
|
|
+ u32 state;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_ring {
|
|
+ struct trace_entry ent;
|
|
+ u32 type;
|
|
+ void *ring;
|
|
+ dma_addr_t enq;
|
|
+ dma_addr_t deq;
|
|
+ dma_addr_t enq_seg;
|
|
+ dma_addr_t deq_seg;
|
|
+ unsigned int num_segs;
|
|
+ unsigned int stream_id;
|
|
+ unsigned int cycle_state;
|
|
+ unsigned int num_trbs_free;
|
|
+ unsigned int bounce_buf_len;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_log_portsc {
|
|
+ struct trace_entry ent;
|
|
+ u32 portnum;
|
|
+ u32 portsc;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_xhci_dbc_log_request {
|
|
+ struct trace_entry ent;
|
|
+ struct dbc_request *req;
|
|
+ bool dir;
|
|
+ unsigned int actual;
|
|
+ unsigned int length;
|
|
+ int status;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_msg {
|
|
+ u32 msg;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_ctx {
|
|
+ u32 ctx_data;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_trb {};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_free_virt_dev {};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_virt_dev {};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_urb {};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_ep_ctx {};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_slot_ctx {};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_ring {};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_log_portsc {};
|
|
+
|
|
+struct trace_event_data_offsets_xhci_dbc_log_request {};
|
|
+
|
|
+struct usb_string_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __le16 wData[1];
|
|
+};
|
|
+
|
|
+struct dbc_info_context {
|
|
+ __le64 string0;
|
|
+ __le64 manufacturer;
|
|
+ __le64 product;
|
|
+ __le64 serial;
|
|
+ __le32 length;
|
|
+ __le32 __reserved_0[7];
|
|
+};
|
|
+
|
|
+enum evtreturn {
|
|
+ EVT_ERR = -1,
|
|
+ EVT_DONE = 0,
|
|
+ EVT_GSER = 1,
|
|
+ EVT_DISC = 2,
|
|
+};
|
|
+
|
|
+struct xhci_regset {
|
|
+ char name[32];
|
|
+ struct debugfs_regset32 regset;
|
|
+ size_t nregs;
|
|
+ struct dentry *parent;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct xhci_file_map {
|
|
+ const char *name;
|
|
+ int (*show)(struct seq_file *, void *);
|
|
+};
|
|
+
|
|
+struct xhci_ep_priv {
|
|
+ char name[32];
|
|
+ struct dentry *root;
|
|
+};
|
|
+
|
|
+struct xhci_slot_priv {
|
|
+ char name[32];
|
|
+ struct dentry *root;
|
|
+ struct xhci_ep_priv *eps[31];
|
|
+ struct xhci_virt_device *dev;
|
|
+};
|
|
+
|
|
+struct async_icount {
|
|
+ __u32 cts;
|
|
+ __u32 dsr;
|
|
+ __u32 rng;
|
|
+ __u32 dcd;
|
|
+ __u32 tx;
|
|
+ __u32 rx;
|
|
+ __u32 frame;
|
|
+ __u32 parity;
|
|
+ __u32 overrun;
|
|
+ __u32 brk;
|
|
+ __u32 buf_overrun;
|
|
+};
|
|
+
|
|
+struct usb_serial;
|
|
+
|
|
+struct usb_serial_port {
|
|
+ struct usb_serial *serial;
|
|
+ struct tty_port port;
|
|
+ spinlock_t lock;
|
|
+ u32 minor;
|
|
+ u8 port_number;
|
|
+ unsigned char *interrupt_in_buffer;
|
|
+ struct urb *interrupt_in_urb;
|
|
+ __u8 interrupt_in_endpointAddress;
|
|
+ unsigned char *interrupt_out_buffer;
|
|
+ int interrupt_out_size;
|
|
+ struct urb *interrupt_out_urb;
|
|
+ __u8 interrupt_out_endpointAddress;
|
|
+ unsigned char *bulk_in_buffer;
|
|
+ int bulk_in_size;
|
|
+ struct urb *read_urb;
|
|
+ __u8 bulk_in_endpointAddress;
|
|
+ unsigned char *bulk_in_buffers[2];
|
|
+ struct urb *read_urbs[2];
|
|
+ long unsigned int read_urbs_free;
|
|
+ unsigned char *bulk_out_buffer;
|
|
+ int bulk_out_size;
|
|
+ struct urb *write_urb;
|
|
+ struct kfifo write_fifo;
|
|
+ unsigned char *bulk_out_buffers[2];
|
|
+ struct urb *write_urbs[2];
|
|
+ long unsigned int write_urbs_free;
|
|
+ __u8 bulk_out_endpointAddress;
|
|
+ struct async_icount icount;
|
|
+ int tx_bytes;
|
|
+ long unsigned int flags;
|
|
+ wait_queue_head_t write_wait;
|
|
+ struct work_struct work;
|
|
+ char throttled;
|
|
+ char throttle_req;
|
|
+ long unsigned int sysrq;
|
|
+ struct device dev;
|
|
+};
|
|
+
|
|
+struct usb_serial_driver;
|
|
+
|
|
+struct usb_serial {
|
|
+ struct usb_device *dev;
|
|
+ struct usb_serial_driver *type;
|
|
+ struct usb_interface *interface;
|
|
+ unsigned char disconnected: 1;
|
|
+ unsigned char suspending: 1;
|
|
+ unsigned char attached: 1;
|
|
+ unsigned char minors_reserved: 1;
|
|
+ unsigned char num_ports;
|
|
+ unsigned char num_port_pointers;
|
|
+ unsigned char num_interrupt_in;
|
|
+ unsigned char num_interrupt_out;
|
|
+ unsigned char num_bulk_in;
|
|
+ unsigned char num_bulk_out;
|
|
+ struct usb_serial_port *port[16];
|
|
+ struct kref kref;
|
|
+ struct mutex disc_mutex;
|
|
+ void *private;
|
|
+};
|
|
+
|
|
+struct usb_serial_endpoints;
|
|
+
|
|
+struct usb_serial_driver {
|
|
+ const char *description;
|
|
+ const struct usb_device_id *id_table;
|
|
+ struct list_head driver_list;
|
|
+ struct device_driver driver;
|
|
+ struct usb_driver *usb_driver;
|
|
+ struct usb_dynids dynids;
|
|
+ unsigned char num_ports;
|
|
+ unsigned char num_bulk_in;
|
|
+ unsigned char num_bulk_out;
|
|
+ unsigned char num_interrupt_in;
|
|
+ unsigned char num_interrupt_out;
|
|
+ size_t bulk_in_size;
|
|
+ size_t bulk_out_size;
|
|
+ int (*probe)(struct usb_serial *, const struct usb_device_id *);
|
|
+ int (*attach)(struct usb_serial *);
|
|
+ int (*calc_num_ports)(struct usb_serial *, struct usb_serial_endpoints *);
|
|
+ void (*disconnect)(struct usb_serial *);
|
|
+ void (*release)(struct usb_serial *);
|
|
+ int (*port_probe)(struct usb_serial_port *);
|
|
+ int (*port_remove)(struct usb_serial_port *);
|
|
+ int (*suspend)(struct usb_serial *, pm_message_t);
|
|
+ int (*resume)(struct usb_serial *);
|
|
+ int (*reset_resume)(struct usb_serial *);
|
|
+ int (*open)(struct tty_struct *, struct usb_serial_port *);
|
|
+ void (*close)(struct usb_serial_port *);
|
|
+ int (*write)(struct tty_struct *, struct usb_serial_port *, const unsigned char *, int);
|
|
+ int (*write_room)(struct tty_struct *);
|
|
+ int (*ioctl)(struct tty_struct *, unsigned int, long unsigned int);
|
|
+ void (*set_termios)(struct tty_struct *, struct usb_serial_port *, struct ktermios *);
|
|
+ void (*break_ctl)(struct tty_struct *, int);
|
|
+ int (*chars_in_buffer)(struct tty_struct *);
|
|
+ void (*wait_until_sent)(struct tty_struct *, long int);
|
|
+ bool (*tx_empty)(struct usb_serial_port *);
|
|
+ void (*throttle)(struct tty_struct *);
|
|
+ void (*unthrottle)(struct tty_struct *);
|
|
+ int (*tiocmget)(struct tty_struct *);
|
|
+ int (*tiocmset)(struct tty_struct *, unsigned int, unsigned int);
|
|
+ int (*tiocmiwait)(struct tty_struct *, long unsigned int);
|
|
+ int (*get_icount)(struct tty_struct *, struct serial_icounter_struct *);
|
|
+ void (*dtr_rts)(struct usb_serial_port *, int);
|
|
+ int (*carrier_raised)(struct usb_serial_port *);
|
|
+ void (*init_termios)(struct tty_struct *);
|
|
+ void (*read_int_callback)(struct urb *);
|
|
+ void (*write_int_callback)(struct urb *);
|
|
+ void (*read_bulk_callback)(struct urb *);
|
|
+ void (*write_bulk_callback)(struct urb *);
|
|
+ void (*process_read_urb)(struct urb *);
|
|
+ int (*prepare_write_buffer)(struct usb_serial_port *, void *, size_t);
|
|
+};
|
|
+
|
|
+struct usb_serial_endpoints {
|
|
+ unsigned char num_bulk_in;
|
|
+ unsigned char num_bulk_out;
|
|
+ unsigned char num_interrupt_in;
|
|
+ unsigned char num_interrupt_out;
|
|
+ struct usb_endpoint_descriptor *bulk_in[16];
|
|
+ struct usb_endpoint_descriptor *bulk_out[16];
|
|
+ struct usb_endpoint_descriptor *interrupt_in[16];
|
|
+ struct usb_endpoint_descriptor *interrupt_out[16];
|
|
+};
|
|
+
|
|
+struct usbcons_info {
|
|
+ int magic;
|
|
+ int break_flag;
|
|
+ struct usb_serial_port *port;
|
|
+};
|
|
+
|
|
+struct usb_debug_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __u8 bDebugInEndpoint;
|
|
+ __u8 bDebugOutEndpoint;
|
|
+};
|
|
+
|
|
+struct ehci_dev {
|
|
+ u32 bus;
|
|
+ u32 slot;
|
|
+ u32 func;
|
|
+};
|
|
+
|
|
+typedef void (*set_debug_port_t)(int);
|
|
+
|
|
+struct usb_hcd___2;
|
|
+
|
|
+struct xdbc_regs {
|
|
+ __le32 capability;
|
|
+ __le32 doorbell;
|
|
+ __le32 ersts;
|
|
+ __le32 __reserved_0;
|
|
+ __le64 erstba;
|
|
+ __le64 erdp;
|
|
+ __le32 control;
|
|
+ __le32 status;
|
|
+ __le32 portsc;
|
|
+ __le32 __reserved_1;
|
|
+ __le64 dccp;
|
|
+ __le32 devinfo1;
|
|
+ __le32 devinfo2;
|
|
+};
|
|
+
|
|
+struct xdbc_trb {
|
|
+ __le32 field[4];
|
|
+};
|
|
+
|
|
+struct xdbc_erst_entry {
|
|
+ __le64 seg_addr;
|
|
+ __le32 seg_size;
|
|
+ __le32 __reserved_0;
|
|
+};
|
|
+
|
|
+struct xdbc_info_context {
|
|
+ __le64 string0;
|
|
+ __le64 manufacturer;
|
|
+ __le64 product;
|
|
+ __le64 serial;
|
|
+ __le32 length;
|
|
+ __le32 __reserved_0[7];
|
|
+};
|
|
+
|
|
+struct xdbc_ep_context {
|
|
+ __le32 ep_info1;
|
|
+ __le32 ep_info2;
|
|
+ __le64 deq;
|
|
+ __le32 tx_info;
|
|
+ __le32 __reserved_0[11];
|
|
+};
|
|
+
|
|
+struct xdbc_context {
|
|
+ struct xdbc_info_context info;
|
|
+ struct xdbc_ep_context out;
|
|
+ struct xdbc_ep_context in;
|
|
+};
|
|
+
|
|
+struct xdbc_strings {
|
|
+ char string0[64];
|
|
+ char manufacturer[64];
|
|
+ char product[64];
|
|
+ char serial[64];
|
|
+};
|
|
+
|
|
+struct xdbc_segment {
|
|
+ struct xdbc_trb *trbs;
|
|
+ dma_addr_t dma;
|
|
+};
|
|
+
|
|
+struct xdbc_ring {
|
|
+ struct xdbc_segment *segment;
|
|
+ struct xdbc_trb *enqueue;
|
|
+ struct xdbc_trb *dequeue;
|
|
+ u32 cycle_state;
|
|
+};
|
|
+
|
|
+struct xdbc_state {
|
|
+ u16 vendor;
|
|
+ u16 device;
|
|
+ u32 bus;
|
|
+ u32 dev;
|
|
+ u32 func;
|
|
+ void *xhci_base;
|
|
+ u64 xhci_start;
|
|
+ size_t xhci_length;
|
|
+ int port_number;
|
|
+ struct xdbc_regs *xdbc_reg;
|
|
+ dma_addr_t table_dma;
|
|
+ void *table_base;
|
|
+ dma_addr_t erst_dma;
|
|
+ size_t erst_size;
|
|
+ void *erst_base;
|
|
+ struct xdbc_ring evt_ring;
|
|
+ struct xdbc_segment evt_seg;
|
|
+ dma_addr_t dbcc_dma;
|
|
+ size_t dbcc_size;
|
|
+ void *dbcc_base;
|
|
+ dma_addr_t string_dma;
|
|
+ size_t string_size;
|
|
+ void *string_base;
|
|
+ struct xdbc_ring out_ring;
|
|
+ struct xdbc_segment out_seg;
|
|
+ void *out_buf;
|
|
+ dma_addr_t out_dma;
|
|
+ struct xdbc_ring in_ring;
|
|
+ struct xdbc_segment in_seg;
|
|
+ void *in_buf;
|
|
+ dma_addr_t in_dma;
|
|
+ u32 flags;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+enum usb_dr_mode {
|
|
+ USB_DR_MODE_UNKNOWN = 0,
|
|
+ USB_DR_MODE_HOST = 1,
|
|
+ USB_DR_MODE_PERIPHERAL = 2,
|
|
+ USB_DR_MODE_OTG = 3,
|
|
+};
|
|
+
|
|
+struct typec_device_id {
|
|
+ __u16 svid;
|
|
+ __u8 mode;
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+enum typec_port_type {
|
|
+ TYPEC_PORT_SRC = 0,
|
|
+ TYPEC_PORT_SNK = 1,
|
|
+ TYPEC_PORT_DRP = 2,
|
|
+};
|
|
+
|
|
+enum typec_port_data {
|
|
+ TYPEC_PORT_DFP = 0,
|
|
+ TYPEC_PORT_UFP = 1,
|
|
+ TYPEC_PORT_DRD = 2,
|
|
+};
|
|
+
|
|
+enum typec_plug_type {
|
|
+ USB_PLUG_NONE = 0,
|
|
+ USB_PLUG_TYPE_A = 1,
|
|
+ USB_PLUG_TYPE_B = 2,
|
|
+ USB_PLUG_TYPE_C = 3,
|
|
+ USB_PLUG_CAPTIVE = 4,
|
|
+};
|
|
+
|
|
+enum typec_data_role {
|
|
+ TYPEC_DEVICE = 0,
|
|
+ TYPEC_HOST = 1,
|
|
+};
|
|
+
|
|
+enum typec_role {
|
|
+ TYPEC_SINK = 0,
|
|
+ TYPEC_SOURCE = 1,
|
|
+};
|
|
+
|
|
+enum typec_pwr_opmode {
|
|
+ TYPEC_PWR_MODE_USB = 0,
|
|
+ TYPEC_PWR_MODE_1_5A = 1,
|
|
+ TYPEC_PWR_MODE_3_0A = 2,
|
|
+ TYPEC_PWR_MODE_PD = 3,
|
|
+};
|
|
+
|
|
+enum typec_accessory {
|
|
+ TYPEC_ACCESSORY_NONE = 0,
|
|
+ TYPEC_ACCESSORY_AUDIO = 1,
|
|
+ TYPEC_ACCESSORY_DEBUG = 2,
|
|
+};
|
|
+
|
|
+enum typec_orientation {
|
|
+ TYPEC_ORIENTATION_NONE = 0,
|
|
+ TYPEC_ORIENTATION_NORMAL = 1,
|
|
+ TYPEC_ORIENTATION_REVERSE = 2,
|
|
+};
|
|
+
|
|
+struct usb_pd_identity {
|
|
+ u32 id_header;
|
|
+ u32 cert_stat;
|
|
+ u32 product;
|
|
+};
|
|
+
|
|
+struct typec_altmode_desc {
|
|
+ u16 svid;
|
|
+ u8 mode;
|
|
+ u32 vdo;
|
|
+ enum typec_port_data roles;
|
|
+};
|
|
+
|
|
+enum typec_plug_index {
|
|
+ TYPEC_PLUG_SOP_P = 0,
|
|
+ TYPEC_PLUG_SOP_PP = 1,
|
|
+};
|
|
+
|
|
+struct typec_plug_desc {
|
|
+ enum typec_plug_index index;
|
|
+};
|
|
+
|
|
+struct typec_cable_desc {
|
|
+ enum typec_plug_type type;
|
|
+ unsigned int active: 1;
|
|
+ struct usb_pd_identity *identity;
|
|
+};
|
|
+
|
|
+struct typec_partner_desc {
|
|
+ unsigned int usb_pd: 1;
|
|
+ enum typec_accessory accessory;
|
|
+ struct usb_pd_identity *identity;
|
|
+};
|
|
+
|
|
+struct typec_switch;
|
|
+
|
|
+struct typec_mux;
|
|
+
|
|
+struct typec_capability {
|
|
+ enum typec_port_type type;
|
|
+ enum typec_port_data data;
|
|
+ u16 revision;
|
|
+ u16 pd_revision;
|
|
+ int prefer_role;
|
|
+ enum typec_accessory accessory[3];
|
|
+ struct typec_switch *sw;
|
|
+ struct typec_mux *mux;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ int (*try_role)(const struct typec_capability *, int);
|
|
+ int (*dr_set)(const struct typec_capability *, enum typec_data_role);
|
|
+ int (*pr_set)(const struct typec_capability *, enum typec_role);
|
|
+ int (*vconn_set)(const struct typec_capability *, enum typec_role);
|
|
+ int (*port_type_set)(const struct typec_capability *, enum typec_port_type);
|
|
+};
|
|
+
|
|
+struct typec_switch {
|
|
+ struct device *dev;
|
|
+ struct list_head entry;
|
|
+ int (*set)(struct typec_switch *, enum typec_orientation);
|
|
+};
|
|
+
|
|
+struct typec_mux {
|
|
+ struct device *dev;
|
|
+ struct list_head entry;
|
|
+ int (*set)(struct typec_mux *, int);
|
|
+};
|
|
+
|
|
+struct typec_altmode_ops;
|
|
+
|
|
+struct typec_altmode {
|
|
+ struct device dev;
|
|
+ u16 svid;
|
|
+ int mode;
|
|
+ u32 vdo;
|
|
+ unsigned int active: 1;
|
|
+ char *desc;
|
|
+ const struct typec_altmode_ops *ops;
|
|
+};
|
|
+
|
|
+struct typec_altmode_ops {
|
|
+ int (*enter)(struct typec_altmode *);
|
|
+ int (*exit)(struct typec_altmode *);
|
|
+ void (*attention)(struct typec_altmode *, u32);
|
|
+ int (*vdm)(struct typec_altmode *, const u32, const u32 *, int);
|
|
+ int (*notify)(struct typec_altmode *, long unsigned int, void *);
|
|
+ int (*activate)(struct typec_altmode *, int);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TYPEC_STATE_SAFE = 0,
|
|
+ TYPEC_STATE_USB = 1,
|
|
+ TYPEC_STATE_MODAL = 2,
|
|
+};
|
|
+
|
|
+struct altmode {
|
|
+ unsigned int id;
|
|
+ struct typec_altmode adev;
|
|
+ struct typec_mux *mux;
|
|
+ enum typec_port_data roles;
|
|
+ struct attribute *attrs[5];
|
|
+ char group_name[8];
|
|
+ struct attribute_group group;
|
|
+ const struct attribute_group *groups[2];
|
|
+ struct altmode *partner;
|
|
+ struct altmode *plug[2];
|
|
+ struct blocking_notifier_head nh;
|
|
+};
|
|
+
|
|
+struct typec_plug {
|
|
+ struct device dev;
|
|
+ enum typec_plug_index index;
|
|
+ struct ida mode_ids;
|
|
+};
|
|
+
|
|
+struct typec_cable {
|
|
+ struct device dev;
|
|
+ enum typec_plug_type type;
|
|
+ struct usb_pd_identity *identity;
|
|
+ unsigned int active: 1;
|
|
+};
|
|
+
|
|
+struct typec_partner {
|
|
+ struct device dev;
|
|
+ unsigned int usb_pd: 1;
|
|
+ struct usb_pd_identity *identity;
|
|
+ enum typec_accessory accessory;
|
|
+ struct ida mode_ids;
|
|
+};
|
|
+
|
|
+struct typec_port {
|
|
+ unsigned int id;
|
|
+ struct device dev;
|
|
+ struct ida mode_ids;
|
|
+ int prefer_role;
|
|
+ enum typec_data_role data_role;
|
|
+ enum typec_role pwr_role;
|
|
+ enum typec_role vconn_role;
|
|
+ enum typec_pwr_opmode pwr_opmode;
|
|
+ enum typec_port_type port_type;
|
|
+ struct mutex port_type_lock;
|
|
+ enum typec_orientation orientation;
|
|
+ struct typec_switch *sw;
|
|
+ struct typec_mux *mux;
|
|
+ const struct typec_capability *cap;
|
|
+};
|
|
+
|
|
+struct typec_altmode_driver {
|
|
+ const struct typec_device_id *id_table;
|
|
+ int (*probe)(struct typec_altmode *);
|
|
+ void (*remove)(struct typec_altmode *);
|
|
+ struct device_driver driver;
|
|
+};
|
|
+
|
|
+struct typec_displayport_data {
|
|
+ u32 status;
|
|
+ u32 conf;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DP_PIN_ASSIGN_A = 0,
|
|
+ DP_PIN_ASSIGN_B = 1,
|
|
+ DP_PIN_ASSIGN_C = 2,
|
|
+ DP_PIN_ASSIGN_D = 3,
|
|
+ DP_PIN_ASSIGN_E = 4,
|
|
+ DP_PIN_ASSIGN_F = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DP_CONF_USB = 0,
|
|
+ DP_CONF_DFP_D = 1,
|
|
+ DP_CONF_UFP_D = 2,
|
|
+ DP_CONF_DUAL_D = 3,
|
|
+};
|
|
+
|
|
+enum dp_state {
|
|
+ DP_STATE_IDLE = 0,
|
|
+ DP_STATE_ENTER = 1,
|
|
+ DP_STATE_UPDATE = 2,
|
|
+ DP_STATE_CONFIGURE = 3,
|
|
+ DP_STATE_EXIT = 4,
|
|
+};
|
|
+
|
|
+struct dp_altmode {
|
|
+ struct typec_displayport_data data;
|
|
+ enum dp_state state;
|
|
+ struct mutex lock;
|
|
+ struct work_struct work;
|
|
+ struct typec_altmode *alt;
|
|
+ const struct typec_altmode *port;
|
|
+};
|
|
+
|
|
+enum pd_ctrl_msg_type {
|
|
+ PD_CTRL_GOOD_CRC = 1,
|
|
+ PD_CTRL_GOTO_MIN = 2,
|
|
+ PD_CTRL_ACCEPT = 3,
|
|
+ PD_CTRL_REJECT = 4,
|
|
+ PD_CTRL_PING = 5,
|
|
+ PD_CTRL_PS_RDY = 6,
|
|
+ PD_CTRL_GET_SOURCE_CAP = 7,
|
|
+ PD_CTRL_GET_SINK_CAP = 8,
|
|
+ PD_CTRL_DR_SWAP = 9,
|
|
+ PD_CTRL_PR_SWAP = 10,
|
|
+ PD_CTRL_VCONN_SWAP = 11,
|
|
+ PD_CTRL_WAIT = 12,
|
|
+ PD_CTRL_SOFT_RESET = 13,
|
|
+ PD_CTRL_NOT_SUPP = 16,
|
|
+ PD_CTRL_GET_SOURCE_CAP_EXT = 17,
|
|
+ PD_CTRL_GET_STATUS = 18,
|
|
+ PD_CTRL_FR_SWAP = 19,
|
|
+ PD_CTRL_GET_PPS_STATUS = 20,
|
|
+ PD_CTRL_GET_COUNTRY_CODES = 21,
|
|
+};
|
|
+
|
|
+enum pd_data_msg_type {
|
|
+ PD_DATA_SOURCE_CAP = 1,
|
|
+ PD_DATA_REQUEST = 2,
|
|
+ PD_DATA_BIST = 3,
|
|
+ PD_DATA_SINK_CAP = 4,
|
|
+ PD_DATA_BATT_STATUS = 5,
|
|
+ PD_DATA_ALERT = 6,
|
|
+ PD_DATA_GET_COUNTRY_INFO = 7,
|
|
+ PD_DATA_VENDOR_DEF = 15,
|
|
+};
|
|
+
|
|
+enum pd_ext_msg_type {
|
|
+ PD_EXT_SOURCE_CAP_EXT = 1,
|
|
+ PD_EXT_STATUS = 2,
|
|
+ PD_EXT_GET_BATT_CAP = 3,
|
|
+ PD_EXT_GET_BATT_STATUS = 4,
|
|
+ PD_EXT_BATT_CAP = 5,
|
|
+ PD_EXT_GET_MANUFACTURER_INFO = 6,
|
|
+ PD_EXT_MANUFACTURER_INFO = 7,
|
|
+ PD_EXT_SECURITY_REQUEST = 8,
|
|
+ PD_EXT_SECURITY_RESPONSE = 9,
|
|
+ PD_EXT_FW_UPDATE_REQUEST = 10,
|
|
+ PD_EXT_FW_UPDATE_RESPONSE = 11,
|
|
+ PD_EXT_PPS_STATUS = 12,
|
|
+ PD_EXT_COUNTRY_INFO = 13,
|
|
+ PD_EXT_COUNTRY_CODES = 14,
|
|
+};
|
|
+
|
|
+struct pd_chunked_ext_message_data {
|
|
+ __le16 header;
|
|
+ u8 data[26];
|
|
+};
|
|
+
|
|
+struct pd_message {
|
|
+ __le16 header;
|
|
+ union {
|
|
+ __le32 payload[7];
|
|
+ struct pd_chunked_ext_message_data ext_msg;
|
|
+ };
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum pd_pdo_type {
|
|
+ PDO_TYPE_FIXED = 0,
|
|
+ PDO_TYPE_BATT = 1,
|
|
+ PDO_TYPE_VAR = 2,
|
|
+ PDO_TYPE_APDO = 3,
|
|
+};
|
|
+
|
|
+enum pd_apdo_type {
|
|
+ APDO_TYPE_PPS = 0,
|
|
+};
|
|
+
|
|
+enum usb_pd_ext_sdb_fields {
|
|
+ USB_PD_EXT_SDB_INTERNAL_TEMP = 0,
|
|
+ USB_PD_EXT_SDB_PRESENT_INPUT = 1,
|
|
+ USB_PD_EXT_SDB_PRESENT_BATT_INPUT = 2,
|
|
+ USB_PD_EXT_SDB_EVENT_FLAGS = 3,
|
|
+ USB_PD_EXT_SDB_TEMP_STATUS = 4,
|
|
+ USB_PD_EXT_SDB_DATA_SIZE = 5,
|
|
+};
|
|
+
|
|
+enum usb_role {
|
|
+ USB_ROLE_NONE = 0,
|
|
+ USB_ROLE_HOST = 1,
|
|
+ USB_ROLE_DEVICE = 2,
|
|
+};
|
|
+
|
|
+enum typec_cc_status {
|
|
+ TYPEC_CC_OPEN = 0,
|
|
+ TYPEC_CC_RA = 1,
|
|
+ TYPEC_CC_RD = 2,
|
|
+ TYPEC_CC_RP_DEF = 3,
|
|
+ TYPEC_CC_RP_1_5 = 4,
|
|
+ TYPEC_CC_RP_3_0 = 5,
|
|
+};
|
|
+
|
|
+enum typec_cc_polarity {
|
|
+ TYPEC_POLARITY_CC1 = 0,
|
|
+ TYPEC_POLARITY_CC2 = 1,
|
|
+};
|
|
+
|
|
+enum tcpm_transmit_status {
|
|
+ TCPC_TX_SUCCESS = 0,
|
|
+ TCPC_TX_DISCARDED = 1,
|
|
+ TCPC_TX_FAILED = 2,
|
|
+};
|
|
+
|
|
+enum tcpm_transmit_type {
|
|
+ TCPC_TX_SOP = 0,
|
|
+ TCPC_TX_SOP_PRIME = 1,
|
|
+ TCPC_TX_SOP_PRIME_PRIME = 2,
|
|
+ TCPC_TX_SOP_DEBUG_PRIME = 3,
|
|
+ TCPC_TX_SOP_DEBUG_PRIME_PRIME = 4,
|
|
+ TCPC_TX_HARD_RESET = 5,
|
|
+ TCPC_TX_CABLE_RESET = 6,
|
|
+ TCPC_TX_BIST_MODE_2 = 7,
|
|
+};
|
|
+
|
|
+struct tcpc_config {
|
|
+ const u32 *src_pdo;
|
|
+ unsigned int nr_src_pdo;
|
|
+ const u32 *snk_pdo;
|
|
+ unsigned int nr_snk_pdo;
|
|
+ const u32 *snk_vdo;
|
|
+ unsigned int nr_snk_vdo;
|
|
+ unsigned int operating_snk_mw;
|
|
+ enum typec_port_type type;
|
|
+ enum typec_port_data data;
|
|
+ enum typec_role default_role;
|
|
+ bool try_role_hw;
|
|
+ bool self_powered;
|
|
+ const struct typec_altmode_desc *alt_modes;
|
|
+};
|
|
+
|
|
+struct tcpc_dev {
|
|
+ const struct tcpc_config *config;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ int (*init)(struct tcpc_dev *);
|
|
+ int (*get_vbus)(struct tcpc_dev *);
|
|
+ int (*get_current_limit)(struct tcpc_dev *);
|
|
+ int (*set_cc)(struct tcpc_dev *, enum typec_cc_status);
|
|
+ int (*get_cc)(struct tcpc_dev *, enum typec_cc_status *, enum typec_cc_status *);
|
|
+ int (*set_polarity)(struct tcpc_dev *, enum typec_cc_polarity);
|
|
+ int (*set_vconn)(struct tcpc_dev *, bool);
|
|
+ int (*set_vbus)(struct tcpc_dev *, bool, bool);
|
|
+ int (*set_current_limit)(struct tcpc_dev *, u32, u32);
|
|
+ int (*set_pd_rx)(struct tcpc_dev *, bool);
|
|
+ int (*set_roles)(struct tcpc_dev *, bool, enum typec_role, enum typec_data_role);
|
|
+ int (*start_drp_toggling)(struct tcpc_dev *, enum typec_cc_status);
|
|
+ int (*try_role)(struct tcpc_dev *, int);
|
|
+ int (*pd_transmit)(struct tcpc_dev *, enum tcpm_transmit_type, const struct pd_message *);
|
|
+};
|
|
+
|
|
+enum tcpm_state {
|
|
+ INVALID_STATE = 0,
|
|
+ DRP_TOGGLING = 1,
|
|
+ SRC_UNATTACHED = 2,
|
|
+ SRC_ATTACH_WAIT = 3,
|
|
+ SRC_ATTACHED = 4,
|
|
+ SRC_STARTUP = 5,
|
|
+ SRC_SEND_CAPABILITIES = 6,
|
|
+ SRC_SEND_CAPABILITIES_TIMEOUT = 7,
|
|
+ SRC_NEGOTIATE_CAPABILITIES = 8,
|
|
+ SRC_TRANSITION_SUPPLY = 9,
|
|
+ SRC_READY = 10,
|
|
+ SRC_WAIT_NEW_CAPABILITIES = 11,
|
|
+ SNK_UNATTACHED = 12,
|
|
+ SNK_ATTACH_WAIT = 13,
|
|
+ SNK_DEBOUNCED = 14,
|
|
+ SNK_ATTACHED = 15,
|
|
+ SNK_STARTUP = 16,
|
|
+ SNK_DISCOVERY = 17,
|
|
+ SNK_DISCOVERY_DEBOUNCE = 18,
|
|
+ SNK_DISCOVERY_DEBOUNCE_DONE = 19,
|
|
+ SNK_WAIT_CAPABILITIES = 20,
|
|
+ SNK_NEGOTIATE_CAPABILITIES = 21,
|
|
+ SNK_NEGOTIATE_PPS_CAPABILITIES = 22,
|
|
+ SNK_TRANSITION_SINK = 23,
|
|
+ SNK_TRANSITION_SINK_VBUS = 24,
|
|
+ SNK_READY = 25,
|
|
+ ACC_UNATTACHED = 26,
|
|
+ DEBUG_ACC_ATTACHED = 27,
|
|
+ AUDIO_ACC_ATTACHED = 28,
|
|
+ AUDIO_ACC_DEBOUNCE = 29,
|
|
+ HARD_RESET_SEND = 30,
|
|
+ HARD_RESET_START = 31,
|
|
+ SRC_HARD_RESET_VBUS_OFF = 32,
|
|
+ SRC_HARD_RESET_VBUS_ON = 33,
|
|
+ SNK_HARD_RESET_SINK_OFF = 34,
|
|
+ SNK_HARD_RESET_WAIT_VBUS = 35,
|
|
+ SNK_HARD_RESET_SINK_ON = 36,
|
|
+ SOFT_RESET = 37,
|
|
+ SOFT_RESET_SEND = 38,
|
|
+ DR_SWAP_ACCEPT = 39,
|
|
+ DR_SWAP_SEND = 40,
|
|
+ DR_SWAP_SEND_TIMEOUT = 41,
|
|
+ DR_SWAP_CANCEL = 42,
|
|
+ DR_SWAP_CHANGE_DR = 43,
|
|
+ PR_SWAP_ACCEPT = 44,
|
|
+ PR_SWAP_SEND = 45,
|
|
+ PR_SWAP_SEND_TIMEOUT = 46,
|
|
+ PR_SWAP_CANCEL = 47,
|
|
+ PR_SWAP_START = 48,
|
|
+ PR_SWAP_SRC_SNK_TRANSITION_OFF = 49,
|
|
+ PR_SWAP_SRC_SNK_SOURCE_OFF = 50,
|
|
+ PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED = 51,
|
|
+ PR_SWAP_SRC_SNK_SINK_ON = 52,
|
|
+ PR_SWAP_SNK_SRC_SINK_OFF = 53,
|
|
+ PR_SWAP_SNK_SRC_SOURCE_ON = 54,
|
|
+ PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP = 55,
|
|
+ VCONN_SWAP_ACCEPT = 56,
|
|
+ VCONN_SWAP_SEND = 57,
|
|
+ VCONN_SWAP_SEND_TIMEOUT = 58,
|
|
+ VCONN_SWAP_CANCEL = 59,
|
|
+ VCONN_SWAP_START = 60,
|
|
+ VCONN_SWAP_WAIT_FOR_VCONN = 61,
|
|
+ VCONN_SWAP_TURN_ON_VCONN = 62,
|
|
+ VCONN_SWAP_TURN_OFF_VCONN = 63,
|
|
+ SNK_TRY = 64,
|
|
+ SNK_TRY_WAIT = 65,
|
|
+ SNK_TRY_WAIT_DEBOUNCE = 66,
|
|
+ SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS = 67,
|
|
+ SRC_TRYWAIT = 68,
|
|
+ SRC_TRYWAIT_DEBOUNCE = 69,
|
|
+ SRC_TRYWAIT_UNATTACHED = 70,
|
|
+ SRC_TRY = 71,
|
|
+ SRC_TRY_WAIT = 72,
|
|
+ SRC_TRY_DEBOUNCE = 73,
|
|
+ SNK_TRYWAIT = 74,
|
|
+ SNK_TRYWAIT_DEBOUNCE = 75,
|
|
+ SNK_TRYWAIT_VBUS = 76,
|
|
+ BIST_RX = 77,
|
|
+ GET_STATUS_SEND = 78,
|
|
+ GET_STATUS_SEND_TIMEOUT = 79,
|
|
+ GET_PPS_STATUS_SEND = 80,
|
|
+ GET_PPS_STATUS_SEND_TIMEOUT = 81,
|
|
+ ERROR_RECOVERY = 82,
|
|
+ PORT_RESET = 83,
|
|
+ PORT_RESET_WAIT_OFF = 84,
|
|
+};
|
|
+
|
|
+enum vdm_states {
|
|
+ VDM_STATE_ERR_BUSY = -3,
|
|
+ VDM_STATE_ERR_SEND = -2,
|
|
+ VDM_STATE_ERR_TMOUT = -1,
|
|
+ VDM_STATE_DONE = 0,
|
|
+ VDM_STATE_READY = 1,
|
|
+ VDM_STATE_BUSY = 2,
|
|
+ VDM_STATE_WAIT_RSP_BUSY = 3,
|
|
+};
|
|
+
|
|
+enum pd_msg_request {
|
|
+ PD_MSG_NONE = 0,
|
|
+ PD_MSG_CTRL_REJECT = 1,
|
|
+ PD_MSG_CTRL_WAIT = 2,
|
|
+ PD_MSG_CTRL_NOT_SUPP = 3,
|
|
+ PD_MSG_DATA_SINK_CAP = 4,
|
|
+ PD_MSG_DATA_SOURCE_CAP = 5,
|
|
+};
|
|
+
|
|
+struct pd_mode_data {
|
|
+ int svid_index;
|
|
+ int nsvids;
|
|
+ u16 svids[16];
|
|
+ int altmodes;
|
|
+ struct typec_altmode_desc altmode_desc[96];
|
|
+};
|
|
+
|
|
+struct pd_pps_data {
|
|
+ u32 min_volt;
|
|
+ u32 max_volt;
|
|
+ u32 max_curr;
|
|
+ u32 out_volt;
|
|
+ u32 op_curr;
|
|
+ bool supported;
|
|
+ bool active;
|
|
+};
|
|
+
|
|
+struct typec_port___2;
|
|
+
|
|
+struct usb_role_switch;
|
|
+
|
|
+struct typec_partner___2;
|
|
+
|
|
+struct tcpm_port {
|
|
+ struct device *dev;
|
|
+ struct mutex lock;
|
|
+ struct workqueue_struct *wq;
|
|
+ struct typec_capability typec_caps;
|
|
+ struct typec_port___2 *typec_port;
|
|
+ struct tcpc_dev *tcpc;
|
|
+ struct usb_role_switch *role_sw;
|
|
+ enum typec_role vconn_role;
|
|
+ enum typec_role pwr_role;
|
|
+ enum typec_data_role data_role;
|
|
+ enum typec_pwr_opmode pwr_opmode;
|
|
+ struct usb_pd_identity partner_ident;
|
|
+ struct typec_partner_desc partner_desc;
|
|
+ struct typec_partner___2 *partner;
|
|
+ enum typec_cc_status cc_req;
|
|
+ enum typec_cc_status cc1;
|
|
+ enum typec_cc_status cc2;
|
|
+ enum typec_cc_polarity polarity;
|
|
+ bool attached;
|
|
+ bool connected;
|
|
+ enum typec_port_type port_type;
|
|
+ bool vbus_present;
|
|
+ bool vbus_never_low;
|
|
+ bool vbus_source;
|
|
+ bool vbus_charge;
|
|
+ bool send_discover;
|
|
+ bool op_vsafe5v;
|
|
+ int try_role;
|
|
+ int try_snk_count;
|
|
+ int try_src_count;
|
|
+ enum pd_msg_request queued_message;
|
|
+ enum tcpm_state enter_state;
|
|
+ enum tcpm_state prev_state;
|
|
+ enum tcpm_state state;
|
|
+ enum tcpm_state delayed_state;
|
|
+ long unsigned int delayed_runtime;
|
|
+ long unsigned int delay_ms;
|
|
+ spinlock_t pd_event_lock;
|
|
+ u32 pd_events;
|
|
+ struct work_struct event_work;
|
|
+ struct delayed_work state_machine;
|
|
+ struct delayed_work vdm_state_machine;
|
|
+ bool state_machine_running;
|
|
+ struct completion tx_complete;
|
|
+ enum tcpm_transmit_status tx_status;
|
|
+ struct mutex swap_lock;
|
|
+ bool swap_pending;
|
|
+ bool non_pd_role_swap;
|
|
+ struct completion swap_complete;
|
|
+ int swap_status;
|
|
+ unsigned int negotiated_rev;
|
|
+ unsigned int message_id;
|
|
+ unsigned int caps_count;
|
|
+ unsigned int hard_reset_count;
|
|
+ bool pd_capable;
|
|
+ bool explicit_contract;
|
|
+ unsigned int rx_msgid;
|
|
+ u32 sink_request;
|
|
+ u32 source_caps[7];
|
|
+ unsigned int nr_source_caps;
|
|
+ u32 sink_caps[7];
|
|
+ unsigned int nr_sink_caps;
|
|
+ u32 src_pdo[7];
|
|
+ unsigned int nr_src_pdo;
|
|
+ u32 snk_pdo[7];
|
|
+ unsigned int nr_snk_pdo;
|
|
+ u32 snk_vdo[6];
|
|
+ unsigned int nr_snk_vdo;
|
|
+ unsigned int operating_snk_mw;
|
|
+ bool update_sink_caps;
|
|
+ u32 current_limit;
|
|
+ u32 supply_voltage;
|
|
+ struct power_supply *psy;
|
|
+ struct power_supply_desc psy_desc;
|
|
+ enum power_supply_usb_type usb_type;
|
|
+ u32 bist_request;
|
|
+ enum vdm_states vdm_state;
|
|
+ u32 vdm_retries;
|
|
+ u32 vdo_data[7];
|
|
+ u8 vdo_count;
|
|
+ u32 vdo_retry;
|
|
+ struct pd_pps_data pps_data;
|
|
+ struct completion pps_complete;
|
|
+ bool pps_pending;
|
|
+ int pps_status;
|
|
+ struct pd_mode_data mode_data;
|
|
+ struct typec_altmode *partner_altmode[96];
|
|
+ struct typec_altmode *port_altmode[96];
|
|
+ long unsigned int max_wait;
|
|
+ bool self_powered;
|
|
+ struct dentry *dentry;
|
|
+ struct mutex logbuffer_lock;
|
|
+ int logbuffer_head;
|
|
+ int logbuffer_tail;
|
|
+ u8 *logbuffer[1024];
|
|
+};
|
|
+
|
|
+struct pd_rx_event {
|
|
+ struct work_struct work;
|
|
+ struct tcpm_port *port;
|
|
+ struct pd_message msg;
|
|
+};
|
|
+
|
|
+enum pdo_err {
|
|
+ PDO_NO_ERR = 0,
|
|
+ PDO_ERR_NO_VSAFE5V = 1,
|
|
+ PDO_ERR_VSAFE5V_NOT_FIRST = 2,
|
|
+ PDO_ERR_PDO_TYPE_NOT_IN_ORDER = 3,
|
|
+ PDO_ERR_FIXED_NOT_SORTED = 4,
|
|
+ PDO_ERR_VARIABLE_BATT_NOT_SORTED = 5,
|
|
+ PDO_ERR_DUPE_PDO = 6,
|
|
+ PDO_ERR_PPS_APDO_NOT_SORTED = 7,
|
|
+ PDO_ERR_DUPE_PPS_APDO = 8,
|
|
+};
|
|
+
|
|
+enum tcpm_psy_online_states {
|
|
+ TCPM_PSY_OFFLINE = 0,
|
|
+ TCPM_PSY_FIXED_ONLINE = 1,
|
|
+ TCPM_PSY_PROG_ONLINE = 2,
|
|
+};
|
|
+
|
|
+struct ucsi_cci {
|
|
+ char: 1;
|
|
+ u8 connector_change: 7;
|
|
+ u8 data_length;
|
|
+ short: 9;
|
|
+ u16 not_supported: 1;
|
|
+ u16 cancel_complete: 1;
|
|
+ u16 reset_complete: 1;
|
|
+ u16 busy: 1;
|
|
+ u16 ack_complete: 1;
|
|
+ u16 error: 1;
|
|
+ u16 cmd_complete: 1;
|
|
+};
|
|
+
|
|
+struct ucsi_command {
|
|
+ u8 cmd;
|
|
+ u8 length;
|
|
+ u64 data: 48;
|
|
+};
|
|
+
|
|
+struct ucsi_ack_cmd {
|
|
+ u8 cmd;
|
|
+ u8 length;
|
|
+ u8 cci_ack: 1;
|
|
+ u8 cmd_ack: 1;
|
|
+};
|
|
+
|
|
+struct ucsi_con_rst {
|
|
+ u8 cmd;
|
|
+ u8 length;
|
|
+ u8 con_num: 7;
|
|
+ u8 hard_reset: 1;
|
|
+};
|
|
+
|
|
+struct ucsi_uor_cmd {
|
|
+ u8 cmd;
|
|
+ u8 length;
|
|
+ u16 con_num: 7;
|
|
+ u16 role: 3;
|
|
+};
|
|
+
|
|
+struct ucsi_control {
|
|
+ union {
|
|
+ u64 raw_cmd;
|
|
+ struct ucsi_command cmd;
|
|
+ struct ucsi_uor_cmd uor;
|
|
+ struct ucsi_ack_cmd ack;
|
|
+ struct ucsi_con_rst con_rst;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct ucsi_capability {
|
|
+ u32 attributes;
|
|
+ u32 num_connectors: 8;
|
|
+ u32 features: 24;
|
|
+ u8 num_alt_modes;
|
|
+ u8 reserved;
|
|
+ u16 bc_version;
|
|
+ u16 pd_version;
|
|
+ u16 typec_version;
|
|
+};
|
|
+
|
|
+struct ucsi_connector_capability {
|
|
+ u8 op_mode;
|
|
+ u8 provider: 1;
|
|
+ u8 consumer: 1;
|
|
+};
|
|
+
|
|
+struct ucsi_connector_status {
|
|
+ u16 change;
|
|
+ u16 pwr_op_mode: 3;
|
|
+ u16 connected: 1;
|
|
+ u16 pwr_dir: 1;
|
|
+ u16 partner_flags: 8;
|
|
+ u16 partner_type: 3;
|
|
+ u32 request_data_obj;
|
|
+ u8 bc_status: 2;
|
|
+ u8 provider_cap_limit_reason: 4;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct ucsi_data {
|
|
+ u16 version;
|
|
+ u16 reserved;
|
|
+ union {
|
|
+ u32 raw_cci;
|
|
+ struct ucsi_cci cci;
|
|
+ };
|
|
+ struct ucsi_control ctrl;
|
|
+ u32 message_in[4];
|
|
+ u32 message_out[4];
|
|
+};
|
|
+
|
|
+struct ucsi_ppm {
|
|
+ struct ucsi_data *data;
|
|
+ int (*cmd)(struct ucsi_ppm *, struct ucsi_control *);
|
|
+ int (*sync)(struct ucsi_ppm *);
|
|
+};
|
|
+
|
|
+enum ucsi_status {
|
|
+ UCSI_IDLE = 0,
|
|
+ UCSI_BUSY = 1,
|
|
+ UCSI_ERROR = 2,
|
|
+};
|
|
+
|
|
+struct ucsi;
|
|
+
|
|
+struct ucsi_connector {
|
|
+ int num;
|
|
+ struct ucsi *ucsi;
|
|
+ struct work_struct work;
|
|
+ struct completion complete;
|
|
+ struct typec_port___2 *port;
|
|
+ struct typec_partner___2 *partner;
|
|
+ struct typec_capability typec_cap;
|
|
+ struct ucsi_connector_status status;
|
|
+ struct ucsi_connector_capability cap;
|
|
+};
|
|
+
|
|
+struct ucsi {
|
|
+ struct device *dev;
|
|
+ struct ucsi_ppm *ppm;
|
|
+ enum ucsi_status status;
|
|
+ struct completion complete;
|
|
+ struct ucsi_capability cap;
|
|
+ struct ucsi_connector *connector;
|
|
+ struct work_struct work;
|
|
+ struct mutex ppm_lock;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_ucsi_log_ack {
|
|
+ struct trace_entry ent;
|
|
+ u8 ack;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_ucsi_log_control {
|
|
+ struct trace_entry ent;
|
|
+ u64 ctrl;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_ucsi_log_command {
|
|
+ struct trace_entry ent;
|
|
+ u64 ctrl;
|
|
+ int ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_ucsi_log_cci {
|
|
+ struct trace_entry ent;
|
|
+ u32 cci;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_ucsi_log_connector_status {
|
|
+ struct trace_entry ent;
|
|
+ int port;
|
|
+ u16 change;
|
|
+ u8 opmode;
|
|
+ u8 connected;
|
|
+ u8 pwr_dir;
|
|
+ u8 partner_flags;
|
|
+ u8 partner_type;
|
|
+ u32 request_data_obj;
|
|
+ u8 bc_status;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_ucsi_log_ack {};
|
|
+
|
|
+struct trace_event_data_offsets_ucsi_log_control {};
|
|
+
|
|
+struct trace_event_data_offsets_ucsi_log_command {};
|
|
+
|
|
+struct trace_event_data_offsets_ucsi_log_cci {};
|
|
+
|
|
+struct trace_event_data_offsets_ucsi_log_connector_status {};
|
|
+
|
|
+struct ucsi___2;
|
|
+
|
|
+struct ucsi_acpi {
|
|
+ struct device *dev;
|
|
+ struct ucsi___2 *ucsi;
|
|
+ struct ucsi_ppm ppm;
|
|
+ guid_t guid;
|
|
+};
|
|
+
|
|
+struct i2c_device_id {
|
|
+ char name[20];
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+enum i2c_alert_protocol {
|
|
+ I2C_PROTOCOL_SMBUS_ALERT = 0,
|
|
+ I2C_PROTOCOL_SMBUS_HOST_NOTIFY = 1,
|
|
+};
|
|
+
|
|
+struct i2c_board_info;
|
|
+
|
|
+struct i2c_driver {
|
|
+ unsigned int class;
|
|
+ int (*probe)(struct i2c_client *, const struct i2c_device_id *);
|
|
+ int (*remove)(struct i2c_client *);
|
|
+ int (*probe_new)(struct i2c_client *);
|
|
+ void (*shutdown)(struct i2c_client *);
|
|
+ void (*alert)(struct i2c_client *, enum i2c_alert_protocol, unsigned int);
|
|
+ int (*command)(struct i2c_client *, unsigned int, void *);
|
|
+ struct device_driver driver;
|
|
+ const struct i2c_device_id *id_table;
|
|
+ int (*detect)(struct i2c_client *, struct i2c_board_info *);
|
|
+ const short unsigned int *address_list;
|
|
+ struct list_head clients;
|
|
+ bool disable_i2c_core_irq_mapping;
|
|
+};
|
|
+
|
|
+struct i2c_board_info {
|
|
+ char type[20];
|
|
+ short unsigned int flags;
|
|
+ short unsigned int addr;
|
|
+ const char *dev_name;
|
|
+ void *platform_data;
|
|
+ struct device_node *of_node;
|
|
+ struct fwnode_handle *fwnode;
|
|
+ const struct property_entry *properties;
|
|
+ const struct resource *resources;
|
|
+ unsigned int num_resources;
|
|
+ int irq;
|
|
+};
|
|
+
|
|
+struct tcpci;
|
|
+
|
|
+struct tcpci_data {
|
|
+ struct regmap *regmap;
|
|
+ int (*init)(struct tcpci *, struct tcpci_data *);
|
|
+ int (*set_vconn)(struct tcpci *, struct tcpci_data *, bool);
|
|
+ int (*start_drp_toggling)(struct tcpci *, struct tcpci_data *, enum typec_cc_status);
|
|
+};
|
|
+
|
|
+struct tcpm_port___2;
|
|
+
|
|
+struct tcpci {
|
|
+ struct device *dev;
|
|
+ struct tcpm_port___2 *port;
|
|
+ struct regmap *regmap;
|
|
+ bool controls_vbus;
|
|
+ struct tcpc_dev tcpc;
|
|
+ struct tcpci_data *data;
|
|
+};
|
|
+
|
|
+struct tcpci_chip {
|
|
+ struct tcpci *tcpci;
|
|
+ struct tcpci_data data;
|
|
+};
|
|
+
|
|
+struct rt1711h_chip {
|
|
+ struct tcpci_data data;
|
|
+ struct tcpci *tcpci;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+typedef int (*usb_role_switch_set_t)(struct device *, enum usb_role);
|
|
+
|
|
+typedef enum usb_role (*usb_role_switch_get_t)(struct device *);
|
|
+
|
|
+struct usb_role_switch_desc {
|
|
+ struct device *usb2_port;
|
|
+ struct device *usb3_port;
|
|
+ struct device *udc;
|
|
+ usb_role_switch_set_t set;
|
|
+ usb_role_switch_get_t get;
|
|
+ bool allow_userspace_control;
|
|
+};
|
|
+
|
|
+struct usb_role_switch___2 {
|
|
+ struct device dev;
|
|
+ struct mutex lock;
|
|
+ enum usb_role role;
|
|
+ struct device *usb2_port;
|
|
+ struct device *usb3_port;
|
|
+ struct device *udc;
|
|
+ usb_role_switch_set_t set;
|
|
+ usb_role_switch_get_t get;
|
|
+ bool allow_userspace_control;
|
|
+};
|
|
+
|
|
+struct intel_xhci_usb_data {
|
|
+ struct usb_role_switch *role_sw;
|
|
+ void *base;
|
|
+};
|
|
+
|
|
+struct serio_device_id {
|
|
+ __u8 type;
|
|
+ __u8 extra;
|
|
+ __u8 id;
|
|
+ __u8 proto;
|
|
+};
|
|
+
|
|
+struct serio_driver;
|
|
+
|
|
+struct serio {
|
|
+ void *port_data;
|
|
+ char name[32];
|
|
+ char phys[32];
|
|
+ char firmware_id[128];
|
|
+ bool manual_bind;
|
|
+ struct serio_device_id id;
|
|
+ spinlock_t lock;
|
|
+ int (*write)(struct serio *, unsigned char);
|
|
+ int (*open)(struct serio *);
|
|
+ void (*close)(struct serio *);
|
|
+ int (*start)(struct serio *);
|
|
+ void (*stop)(struct serio *);
|
|
+ struct serio *parent;
|
|
+ struct list_head child_node;
|
|
+ struct list_head children;
|
|
+ unsigned int depth;
|
|
+ struct serio_driver *drv;
|
|
+ struct mutex drv_mutex;
|
|
+ struct device dev;
|
|
+ struct list_head node;
|
|
+ struct mutex *ps2_cmd_mutex;
|
|
+};
|
|
+
|
|
+struct serio_driver {
|
|
+ const char *description;
|
|
+ const struct serio_device_id *id_table;
|
|
+ bool manual_bind;
|
|
+ void (*write_wakeup)(struct serio *);
|
|
+ irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int);
|
|
+ int (*connect)(struct serio *, struct serio_driver *);
|
|
+ int (*reconnect)(struct serio *);
|
|
+ int (*fast_reconnect)(struct serio *);
|
|
+ void (*disconnect)(struct serio *);
|
|
+ void (*cleanup)(struct serio *);
|
|
+ struct device_driver driver;
|
|
+};
|
|
+
|
|
+enum serio_event_type {
|
|
+ SERIO_RESCAN_PORT = 0,
|
|
+ SERIO_RECONNECT_PORT = 1,
|
|
+ SERIO_RECONNECT_SUBTREE = 2,
|
|
+ SERIO_REGISTER_PORT = 3,
|
|
+ SERIO_ATTACH_DRIVER = 4,
|
|
+};
|
|
+
|
|
+struct serio_event {
|
|
+ enum serio_event_type type;
|
|
+ void *object;
|
|
+ struct module *owner;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+enum i8042_controller_reset_mode {
|
|
+ I8042_RESET_NEVER = 0,
|
|
+ I8042_RESET_ALWAYS = 1,
|
|
+ I8042_RESET_ON_S2RAM = 2,
|
|
+};
|
|
+
|
|
+struct i8042_port {
|
|
+ struct serio *serio;
|
|
+ int irq;
|
|
+ bool exists;
|
|
+ bool driver_bound;
|
|
+ signed char mux;
|
|
+};
|
|
+
|
|
+struct serport {
|
|
+ struct tty_struct *tty;
|
|
+ wait_queue_head_t wait;
|
|
+ struct serio *serio;
|
|
+ struct serio_device_id id;
|
|
+ spinlock_t lock;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct ps2dev {
|
|
+ struct serio *serio;
|
|
+ struct mutex cmd_mutex;
|
|
+ wait_queue_head_t wait;
|
|
+ long unsigned int flags;
|
|
+ u8 cmdbuf[8];
|
|
+ u8 cmdcnt;
|
|
+ u8 nak;
|
|
+};
|
|
+
|
|
+struct input_mt_slot {
|
|
+ int abs[14];
|
|
+ unsigned int frame;
|
|
+ unsigned int key;
|
|
+};
|
|
+
|
|
+struct input_mt {
|
|
+ int trkid;
|
|
+ int num_slots;
|
|
+ int slot;
|
|
+ unsigned int flags;
|
|
+ unsigned int frame;
|
|
+ int *red;
|
|
+ struct input_mt_slot slots[0];
|
|
+};
|
|
+
|
|
+union input_seq_state {
|
|
+ struct {
|
|
+ short unsigned int pos;
|
|
+ bool mutex_acquired;
|
|
+ };
|
|
+ void *p;
|
|
+};
|
|
+
|
|
+struct input_devres {
|
|
+ struct input_dev *input;
|
|
+};
|
|
+
|
|
+struct input_event {
|
|
+ __kernel_ulong_t __sec;
|
|
+ __kernel_ulong_t __usec;
|
|
+ __u16 type;
|
|
+ __u16 code;
|
|
+ __s32 value;
|
|
+};
|
|
+
|
|
+struct input_event_compat {
|
|
+ compat_ulong_t sec;
|
|
+ compat_ulong_t usec;
|
|
+ __u16 type;
|
|
+ __u16 code;
|
|
+ __s32 value;
|
|
+};
|
|
+
|
|
+struct ff_periodic_effect_compat {
|
|
+ __u16 waveform;
|
|
+ __u16 period;
|
|
+ __s16 magnitude;
|
|
+ __s16 offset;
|
|
+ __u16 phase;
|
|
+ struct ff_envelope envelope;
|
|
+ __u32 custom_len;
|
|
+ compat_uptr_t custom_data;
|
|
+};
|
|
+
|
|
+struct ff_effect_compat {
|
|
+ __u16 type;
|
|
+ __s16 id;
|
|
+ __u16 direction;
|
|
+ struct ff_trigger trigger;
|
|
+ struct ff_replay replay;
|
|
+ union {
|
|
+ struct ff_constant_effect constant;
|
|
+ struct ff_ramp_effect ramp;
|
|
+ struct ff_periodic_effect_compat periodic;
|
|
+ struct ff_condition_effect condition[2];
|
|
+ struct ff_rumble_effect rumble;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct input_mt_pos {
|
|
+ s16 x;
|
|
+ s16 y;
|
|
+};
|
|
+
|
|
+struct input_led {
|
|
+ struct led_classdev cdev;
|
|
+ struct input_handle *handle;
|
|
+ unsigned int code;
|
|
+};
|
|
+
|
|
+struct input_leds {
|
|
+ struct input_handle handle;
|
|
+ unsigned int num_leds;
|
|
+ struct input_led leds[0];
|
|
+};
|
|
+
|
|
+struct mousedev_hw_data {
|
|
+ int dx;
|
|
+ int dy;
|
|
+ int dz;
|
|
+ int x;
|
|
+ int y;
|
|
+ int abs_event;
|
|
+ long unsigned int buttons;
|
|
+};
|
|
+
|
|
+struct mousedev {
|
|
+ int open;
|
|
+ struct input_handle handle;
|
|
+ wait_queue_head_t wait;
|
|
+ struct list_head client_list;
|
|
+ spinlock_t client_lock;
|
|
+ struct mutex mutex;
|
|
+ struct device dev;
|
|
+ struct cdev cdev;
|
|
+ bool exist;
|
|
+ struct list_head mixdev_node;
|
|
+ bool opened_by_mixdev;
|
|
+ struct mousedev_hw_data packet;
|
|
+ unsigned int pkt_count;
|
|
+ int old_x[4];
|
|
+ int old_y[4];
|
|
+ int frac_dx;
|
|
+ int frac_dy;
|
|
+ long unsigned int touch;
|
|
+ int (*open_device)(struct mousedev *);
|
|
+ void (*close_device)(struct mousedev *);
|
|
+};
|
|
+
|
|
+enum mousedev_emul {
|
|
+ MOUSEDEV_EMUL_PS2 = 0,
|
|
+ MOUSEDEV_EMUL_IMPS = 1,
|
|
+ MOUSEDEV_EMUL_EXPS = 2,
|
|
+};
|
|
+
|
|
+struct mousedev_motion {
|
|
+ int dx;
|
|
+ int dy;
|
|
+ int dz;
|
|
+ long unsigned int buttons;
|
|
+};
|
|
+
|
|
+struct mousedev_client {
|
|
+ struct fasync_struct *fasync;
|
|
+ struct mousedev *mousedev;
|
|
+ struct list_head node;
|
|
+ struct mousedev_motion packets[16];
|
|
+ unsigned int head;
|
|
+ unsigned int tail;
|
|
+ spinlock_t packet_lock;
|
|
+ int pos_x;
|
|
+ int pos_y;
|
|
+ u8 ps2[6];
|
|
+ unsigned char ready;
|
|
+ unsigned char buffer;
|
|
+ unsigned char bufsiz;
|
|
+ unsigned char imexseq;
|
|
+ unsigned char impsseq;
|
|
+ enum mousedev_emul mode;
|
|
+ long unsigned int last_buttons;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FRACTION_DENOM = 128,
|
|
+};
|
|
+
|
|
+struct input_mask {
|
|
+ __u32 type;
|
|
+ __u32 codes_size;
|
|
+ __u64 codes_ptr;
|
|
+};
|
|
+
|
|
+enum evdev_clock_type {
|
|
+ EV_CLK_REAL = 0,
|
|
+ EV_CLK_MONO = 1,
|
|
+ EV_CLK_BOOT = 2,
|
|
+ EV_CLK_MAX = 3,
|
|
+};
|
|
+
|
|
+struct evdev_client;
|
|
+
|
|
+struct evdev {
|
|
+ int open;
|
|
+ struct input_handle handle;
|
|
+ wait_queue_head_t wait;
|
|
+ struct evdev_client *grab;
|
|
+ struct list_head client_list;
|
|
+ spinlock_t client_lock;
|
|
+ struct mutex mutex;
|
|
+ struct device dev;
|
|
+ struct cdev cdev;
|
|
+ bool exist;
|
|
+};
|
|
+
|
|
+struct evdev_client {
|
|
+ unsigned int head;
|
|
+ unsigned int tail;
|
|
+ unsigned int packet_head;
|
|
+ spinlock_t buffer_lock;
|
|
+ struct fasync_struct *fasync;
|
|
+ struct evdev *evdev;
|
|
+ struct list_head node;
|
|
+ unsigned int clk_type;
|
|
+ bool revoked;
|
|
+ long unsigned int *evmasks[32];
|
|
+ unsigned int bufsize;
|
|
+ struct input_event buffer[0];
|
|
+};
|
|
+
|
|
+struct atkbd {
|
|
+ struct ps2dev ps2dev;
|
|
+ struct input_dev *dev;
|
|
+ char name[64];
|
|
+ char phys[32];
|
|
+ short unsigned int id;
|
|
+ short unsigned int keycode[512];
|
|
+ long unsigned int force_release_mask[8];
|
|
+ unsigned char set;
|
|
+ bool translated;
|
|
+ bool extra;
|
|
+ bool write;
|
|
+ bool softrepeat;
|
|
+ bool softraw;
|
|
+ bool scroll;
|
|
+ bool enabled;
|
|
+ unsigned char emul;
|
|
+ bool resend;
|
|
+ bool release;
|
|
+ long unsigned int xl_bit;
|
|
+ unsigned int last;
|
|
+ long unsigned int time;
|
|
+ long unsigned int err_count;
|
|
+ struct delayed_work event_work;
|
|
+ long unsigned int event_jiffies;
|
|
+ long unsigned int event_mask;
|
|
+ struct mutex mutex;
|
|
+};
|
|
+
|
|
+enum psmouse_state {
|
|
+ PSMOUSE_IGNORE = 0,
|
|
+ PSMOUSE_INITIALIZING = 1,
|
|
+ PSMOUSE_RESYNCING = 2,
|
|
+ PSMOUSE_CMD_MODE = 3,
|
|
+ PSMOUSE_ACTIVATED = 4,
|
|
+};
|
|
+
|
|
+typedef enum {
|
|
+ PSMOUSE_BAD_DATA = 0,
|
|
+ PSMOUSE_GOOD_DATA = 1,
|
|
+ PSMOUSE_FULL_PACKET = 2,
|
|
+} psmouse_ret_t;
|
|
+
|
|
+enum psmouse_scale {
|
|
+ PSMOUSE_SCALE11 = 0,
|
|
+ PSMOUSE_SCALE21 = 1,
|
|
+};
|
|
+
|
|
+enum psmouse_type {
|
|
+ PSMOUSE_NONE = 0,
|
|
+ PSMOUSE_PS2 = 1,
|
|
+ PSMOUSE_PS2PP = 2,
|
|
+ PSMOUSE_THINKPS = 3,
|
|
+ PSMOUSE_GENPS = 4,
|
|
+ PSMOUSE_IMPS = 5,
|
|
+ PSMOUSE_IMEX = 6,
|
|
+ PSMOUSE_SYNAPTICS = 7,
|
|
+ PSMOUSE_ALPS = 8,
|
|
+ PSMOUSE_LIFEBOOK = 9,
|
|
+ PSMOUSE_TRACKPOINT = 10,
|
|
+ PSMOUSE_TOUCHKIT_PS2 = 11,
|
|
+ PSMOUSE_CORTRON = 12,
|
|
+ PSMOUSE_HGPK = 13,
|
|
+ PSMOUSE_ELANTECH = 14,
|
|
+ PSMOUSE_FSP = 15,
|
|
+ PSMOUSE_SYNAPTICS_RELATIVE = 16,
|
|
+ PSMOUSE_CYPRESS = 17,
|
|
+ PSMOUSE_FOCALTECH = 18,
|
|
+ PSMOUSE_VMMOUSE = 19,
|
|
+ PSMOUSE_BYD = 20,
|
|
+ PSMOUSE_SYNAPTICS_SMBUS = 21,
|
|
+ PSMOUSE_ELANTECH_SMBUS = 22,
|
|
+ PSMOUSE_AUTO = 23,
|
|
+};
|
|
+
|
|
+struct psmouse;
|
|
+
|
|
+struct psmouse_protocol {
|
|
+ enum psmouse_type type;
|
|
+ bool maxproto;
|
|
+ bool ignore_parity;
|
|
+ bool try_passthru;
|
|
+ bool smbus_companion;
|
|
+ const char *name;
|
|
+ const char *alias;
|
|
+ int (*detect)(struct psmouse *, bool);
|
|
+ int (*init)(struct psmouse *);
|
|
+};
|
|
+
|
|
+struct psmouse {
|
|
+ void *private;
|
|
+ struct input_dev *dev;
|
|
+ struct ps2dev ps2dev;
|
|
+ struct delayed_work resync_work;
|
|
+ const char *vendor;
|
|
+ const char *name;
|
|
+ const struct psmouse_protocol *protocol;
|
|
+ unsigned char packet[8];
|
|
+ unsigned char badbyte;
|
|
+ unsigned char pktcnt;
|
|
+ unsigned char pktsize;
|
|
+ unsigned char oob_data_type;
|
|
+ unsigned char extra_buttons;
|
|
+ bool acks_disable_command;
|
|
+ unsigned int model;
|
|
+ long unsigned int last;
|
|
+ long unsigned int out_of_sync_cnt;
|
|
+ long unsigned int num_resyncs;
|
|
+ enum psmouse_state state;
|
|
+ char devname[64];
|
|
+ char phys[32];
|
|
+ unsigned int rate;
|
|
+ unsigned int resolution;
|
|
+ unsigned int resetafter;
|
|
+ unsigned int resync_time;
|
|
+ bool smartscroll;
|
|
+ psmouse_ret_t (*protocol_handler)(struct psmouse *);
|
|
+ void (*set_rate)(struct psmouse *, unsigned int);
|
|
+ void (*set_resolution)(struct psmouse *, unsigned int);
|
|
+ void (*set_scale)(struct psmouse *, enum psmouse_scale);
|
|
+ int (*reconnect)(struct psmouse *);
|
|
+ int (*fast_reconnect)(struct psmouse *);
|
|
+ void (*disconnect)(struct psmouse *);
|
|
+ void (*cleanup)(struct psmouse *);
|
|
+ int (*poll)(struct psmouse *);
|
|
+ void (*pt_activate)(struct psmouse *);
|
|
+ void (*pt_deactivate)(struct psmouse *);
|
|
+};
|
|
+
|
|
+struct psmouse_attribute {
|
|
+ struct device_attribute dattr;
|
|
+ void *data;
|
|
+ ssize_t (*show)(struct psmouse *, void *, char *);
|
|
+ ssize_t (*set)(struct psmouse *, void *, const char *, size_t);
|
|
+ bool protect;
|
|
+};
|
|
+
|
|
+struct rmi_2d_axis_alignment {
|
|
+ bool swap_axes;
|
|
+ bool flip_x;
|
|
+ bool flip_y;
|
|
+ u16 clip_x_low;
|
|
+ u16 clip_y_low;
|
|
+ u16 clip_x_high;
|
|
+ u16 clip_y_high;
|
|
+ u16 offset_x;
|
|
+ u16 offset_y;
|
|
+ u8 delta_x_threshold;
|
|
+ u8 delta_y_threshold;
|
|
+};
|
|
+
|
|
+enum rmi_sensor_type {
|
|
+ rmi_sensor_default = 0,
|
|
+ rmi_sensor_touchscreen = 1,
|
|
+ rmi_sensor_touchpad = 2,
|
|
+};
|
|
+
|
|
+struct rmi_2d_sensor_platform_data {
|
|
+ struct rmi_2d_axis_alignment axis_align;
|
|
+ enum rmi_sensor_type sensor_type;
|
|
+ int x_mm;
|
|
+ int y_mm;
|
|
+ int disable_report_mask;
|
|
+ u16 rezero_wait;
|
|
+ bool topbuttonpad;
|
|
+ bool kernel_tracking;
|
|
+ int dmax;
|
|
+ int dribble;
|
|
+ int palm_detect;
|
|
+};
|
|
+
|
|
+struct rmi_f30_data {
|
|
+ bool buttonpad;
|
|
+ bool trackstick_buttons;
|
|
+ bool disable;
|
|
+};
|
|
+
|
|
+enum rmi_reg_state {
|
|
+ RMI_REG_STATE_DEFAULT = 0,
|
|
+ RMI_REG_STATE_OFF = 1,
|
|
+ RMI_REG_STATE_ON = 2,
|
|
+};
|
|
+
|
|
+struct rmi_f01_power_management {
|
|
+ enum rmi_reg_state nosleep;
|
|
+ u8 wakeup_threshold;
|
|
+ u8 doze_holdoff;
|
|
+ u8 doze_interval;
|
|
+};
|
|
+
|
|
+struct rmi_device_platform_data_spi {
|
|
+ u32 block_delay_us;
|
|
+ u32 split_read_block_delay_us;
|
|
+ u32 read_delay_us;
|
|
+ u32 write_delay_us;
|
|
+ u32 split_read_byte_delay_us;
|
|
+ u32 pre_delay_us;
|
|
+ u32 post_delay_us;
|
|
+ u8 bits_per_word;
|
|
+ u16 mode;
|
|
+ void *cs_assert_data;
|
|
+ int (*cs_assert)(const void *, const bool);
|
|
+};
|
|
+
|
|
+struct rmi_device_platform_data {
|
|
+ int reset_delay_ms;
|
|
+ int irq;
|
|
+ struct rmi_device_platform_data_spi spi_data;
|
|
+ struct rmi_2d_sensor_platform_data sensor_pdata;
|
|
+ struct rmi_f01_power_management power_management;
|
|
+ struct rmi_f30_data f30_data;
|
|
+};
|
|
+
|
|
+enum synaptics_pkt_type {
|
|
+ SYN_NEWABS = 0,
|
|
+ SYN_NEWABS_STRICT = 1,
|
|
+ SYN_NEWABS_RELAXED = 2,
|
|
+ SYN_OLDABS = 3,
|
|
+};
|
|
+
|
|
+struct synaptics_hw_state {
|
|
+ int x;
|
|
+ int y;
|
|
+ int z;
|
|
+ int w;
|
|
+ unsigned int left: 1;
|
|
+ unsigned int right: 1;
|
|
+ unsigned int middle: 1;
|
|
+ unsigned int up: 1;
|
|
+ unsigned int down: 1;
|
|
+ u8 ext_buttons;
|
|
+ s8 scroll;
|
|
+};
|
|
+
|
|
+struct synaptics_device_info {
|
|
+ u32 model_id;
|
|
+ u32 firmware_id;
|
|
+ u32 board_id;
|
|
+ u32 capabilities;
|
|
+ u32 ext_cap;
|
|
+ u32 ext_cap_0c;
|
|
+ u32 ext_cap_10;
|
|
+ u32 identity;
|
|
+ u32 x_res;
|
|
+ u32 y_res;
|
|
+ u32 x_max;
|
|
+ u32 y_max;
|
|
+ u32 x_min;
|
|
+ u32 y_min;
|
|
+};
|
|
+
|
|
+struct synaptics_data {
|
|
+ struct synaptics_device_info info;
|
|
+ enum synaptics_pkt_type pkt_type;
|
|
+ u8 mode;
|
|
+ int scroll;
|
|
+ bool absolute_mode;
|
|
+ bool disable_gesture;
|
|
+ struct serio *pt_port;
|
|
+ struct synaptics_hw_state agm;
|
|
+ unsigned int agm_count;
|
|
+ long unsigned int press_start;
|
|
+ bool press;
|
|
+ bool report_press;
|
|
+ bool is_forcepad;
|
|
+};
|
|
+
|
|
+struct min_max_quirk {
|
|
+ const char * const *pnp_ids;
|
|
+ struct {
|
|
+ u32 min;
|
|
+ u32 max;
|
|
+ } board_id;
|
|
+ u32 x_min;
|
|
+ u32 x_max;
|
|
+ u32 y_min;
|
|
+ u32 y_max;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SYNAPTICS_INTERTOUCH_NOT_SET = -1,
|
|
+ SYNAPTICS_INTERTOUCH_OFF = 0,
|
|
+ SYNAPTICS_INTERTOUCH_ON = 1,
|
|
+};
|
|
+
|
|
+struct focaltech_finger_state {
|
|
+ bool active;
|
|
+ bool valid;
|
|
+ unsigned int x;
|
|
+ unsigned int y;
|
|
+};
|
|
+
|
|
+struct focaltech_hw_state {
|
|
+ struct focaltech_finger_state fingers[5];
|
|
+ unsigned int width;
|
|
+ bool pressed;
|
|
+};
|
|
+
|
|
+struct focaltech_data {
|
|
+ unsigned int x_max;
|
|
+ unsigned int y_max;
|
|
+ struct focaltech_hw_state state;
|
|
+};
|
|
+
|
|
+enum SS4_PACKET_ID {
|
|
+ SS4_PACKET_ID_IDLE = 0,
|
|
+ SS4_PACKET_ID_ONE = 1,
|
|
+ SS4_PACKET_ID_TWO = 2,
|
|
+ SS4_PACKET_ID_MULTI = 3,
|
|
+ SS4_PACKET_ID_STICK = 4,
|
|
+};
|
|
+
|
|
+enum V7_PACKET_ID {
|
|
+ V7_PACKET_ID_IDLE = 0,
|
|
+ V7_PACKET_ID_TWO = 1,
|
|
+ V7_PACKET_ID_MULTI = 2,
|
|
+ V7_PACKET_ID_NEW = 3,
|
|
+ V7_PACKET_ID_UNKNOWN = 4,
|
|
+};
|
|
+
|
|
+struct alps_protocol_info {
|
|
+ u16 version;
|
|
+ u8 byte0;
|
|
+ u8 mask0;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct alps_model_info {
|
|
+ u8 signature[3];
|
|
+ struct alps_protocol_info protocol_info;
|
|
+};
|
|
+
|
|
+struct alps_nibble_commands {
|
|
+ int command;
|
|
+ unsigned char data;
|
|
+};
|
|
+
|
|
+struct alps_bitmap_point {
|
|
+ int start_bit;
|
|
+ int num_bits;
|
|
+};
|
|
+
|
|
+struct alps_fields {
|
|
+ unsigned int x_map;
|
|
+ unsigned int y_map;
|
|
+ unsigned int fingers;
|
|
+ int pressure;
|
|
+ struct input_mt_pos st;
|
|
+ struct input_mt_pos mt[4];
|
|
+ unsigned int first_mp: 1;
|
|
+ unsigned int is_mp: 1;
|
|
+ unsigned int left: 1;
|
|
+ unsigned int right: 1;
|
|
+ unsigned int middle: 1;
|
|
+ unsigned int ts_left: 1;
|
|
+ unsigned int ts_right: 1;
|
|
+ unsigned int ts_middle: 1;
|
|
+};
|
|
+
|
|
+struct alps_data {
|
|
+ struct psmouse *psmouse;
|
|
+ struct input_dev *dev2;
|
|
+ struct input_dev *dev3;
|
|
+ char phys2[32];
|
|
+ char phys3[32];
|
|
+ struct delayed_work dev3_register_work;
|
|
+ const struct alps_nibble_commands *nibble_commands;
|
|
+ int addr_command;
|
|
+ u16 proto_version;
|
|
+ u8 byte0;
|
|
+ u8 mask0;
|
|
+ u8 dev_id[3];
|
|
+ u8 fw_ver[3];
|
|
+ int flags;
|
|
+ int x_max;
|
|
+ int y_max;
|
|
+ int x_bits;
|
|
+ int y_bits;
|
|
+ unsigned int x_res;
|
|
+ unsigned int y_res;
|
|
+ int (*hw_init)(struct psmouse *);
|
|
+ void (*process_packet)(struct psmouse *);
|
|
+ int (*decode_fields)(struct alps_fields *, unsigned char *, struct psmouse *);
|
|
+ void (*set_abs_params)(struct alps_data *, struct input_dev *);
|
|
+ int prev_fin;
|
|
+ int multi_packet;
|
|
+ int second_touch;
|
|
+ unsigned char multi_data[6];
|
|
+ struct alps_fields f;
|
|
+ u8 quirks;
|
|
+ struct timer_list timer;
|
|
+};
|
|
+
|
|
+struct byd_data {
|
|
+ struct timer_list timer;
|
|
+ struct psmouse *psmouse;
|
|
+ s32 abs_x;
|
|
+ s32 abs_y;
|
|
+ volatile long unsigned int last_touch_time;
|
|
+ bool btn_left;
|
|
+ bool btn_right;
|
|
+ bool touch;
|
|
+};
|
|
+
|
|
+struct finger_pos {
|
|
+ unsigned int x;
|
|
+ unsigned int y;
|
|
+};
|
|
+
|
|
+struct elantech_device_info {
|
|
+ unsigned char capabilities[3];
|
|
+ unsigned char samples[3];
|
|
+ unsigned char debug;
|
|
+ unsigned char hw_version;
|
|
+ unsigned int fw_version;
|
|
+ unsigned int x_res;
|
|
+ unsigned int y_res;
|
|
+ unsigned int bus;
|
|
+ bool paritycheck;
|
|
+ bool jumpy_cursor;
|
|
+ bool reports_pressure;
|
|
+ bool crc_enabled;
|
|
+ bool set_hw_resolution;
|
|
+ bool has_trackpoint;
|
|
+ int (*send_cmd)(struct psmouse *, unsigned char, unsigned char *);
|
|
+};
|
|
+
|
|
+struct elantech_data {
|
|
+ struct input_dev *tp_dev;
|
|
+ char tp_phys[32];
|
|
+ unsigned char reg_07;
|
|
+ unsigned char reg_10;
|
|
+ unsigned char reg_11;
|
|
+ unsigned char reg_20;
|
|
+ unsigned char reg_21;
|
|
+ unsigned char reg_22;
|
|
+ unsigned char reg_23;
|
|
+ unsigned char reg_24;
|
|
+ unsigned char reg_25;
|
|
+ unsigned char reg_26;
|
|
+ unsigned int single_finger_reports;
|
|
+ unsigned int y_max;
|
|
+ unsigned int width;
|
|
+ struct finger_pos mt[5];
|
|
+ unsigned char parity[256];
|
|
+ struct elantech_device_info info;
|
|
+ void (*original_set_rate)(struct psmouse *, unsigned int);
|
|
+};
|
|
+
|
|
+enum tp_mode {
|
|
+ IAP_MODE = 1,
|
|
+ MAIN_MODE = 2,
|
|
+};
|
|
+
|
|
+struct elan_transport_ops {
|
|
+ int (*initialize)(struct i2c_client *);
|
|
+ int (*sleep_control)(struct i2c_client *, bool);
|
|
+ int (*power_control)(struct i2c_client *, bool);
|
|
+ int (*set_mode)(struct i2c_client *, u8);
|
|
+ int (*calibrate)(struct i2c_client *);
|
|
+ int (*calibrate_result)(struct i2c_client *, u8 *);
|
|
+ int (*get_baseline_data)(struct i2c_client *, bool, u8 *);
|
|
+ int (*get_version)(struct i2c_client *, bool, u8 *);
|
|
+ int (*get_sm_version)(struct i2c_client *, u16 *, u8 *, u8 *);
|
|
+ int (*get_checksum)(struct i2c_client *, bool, u16 *);
|
|
+ int (*get_product_id)(struct i2c_client *, u16 *);
|
|
+ int (*get_max)(struct i2c_client *, unsigned int *, unsigned int *);
|
|
+ int (*get_resolution)(struct i2c_client *, u8 *, u8 *);
|
|
+ int (*get_num_traces)(struct i2c_client *, unsigned int *, unsigned int *);
|
|
+ int (*iap_get_mode)(struct i2c_client *, enum tp_mode *);
|
|
+ int (*iap_reset)(struct i2c_client *);
|
|
+ int (*prepare_fw_update)(struct i2c_client *);
|
|
+ int (*write_fw_block)(struct i2c_client *, const u8 *, u16, int);
|
|
+ int (*finish_fw_update)(struct i2c_client *, struct completion *);
|
|
+ int (*get_report)(struct i2c_client *, u8 *);
|
|
+ int (*get_pressure_adjustment)(struct i2c_client *, int *);
|
|
+ int (*get_pattern)(struct i2c_client *, u8 *);
|
|
+};
|
|
+
|
|
+struct elantech_attr_data {
|
|
+ size_t field_offset;
|
|
+ unsigned char reg;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ELANTECH_SMBUS_NOT_SET = -1,
|
|
+ ELANTECH_SMBUS_OFF = 0,
|
|
+ ELANTECH_SMBUS_ON = 1,
|
|
+};
|
|
+
|
|
+struct ps2pp_info {
|
|
+ u8 model;
|
|
+ u8 kind;
|
|
+ u16 features;
|
|
+};
|
|
+
|
|
+struct lifebook_data {
|
|
+ struct input_dev *dev2;
|
|
+ char phys[32];
|
|
+};
|
|
+
|
|
+struct fsp_data {
|
|
+ unsigned char ver;
|
|
+ unsigned char rev;
|
|
+ unsigned int buttons;
|
|
+ unsigned int flags;
|
|
+ bool vscroll;
|
|
+ bool hscroll;
|
|
+ unsigned char last_reg;
|
|
+ unsigned char last_val;
|
|
+ unsigned int last_mt_fgr;
|
|
+};
|
|
+
|
|
+struct trackpoint_data {
|
|
+ u8 variant_id;
|
|
+ u8 firmware_id;
|
|
+ u8 sensitivity;
|
|
+ u8 speed;
|
|
+ u8 inertia;
|
|
+ u8 reach;
|
|
+ u8 draghys;
|
|
+ u8 mindrag;
|
|
+ u8 thresh;
|
|
+ u8 upthresh;
|
|
+ u8 ztime;
|
|
+ u8 jenks;
|
|
+ u8 drift_time;
|
|
+ bool press_to_select;
|
|
+ bool skipback;
|
|
+ bool ext_dev;
|
|
+};
|
|
+
|
|
+struct trackpoint_attr_data {
|
|
+ size_t field_offset;
|
|
+ u8 command;
|
|
+ u8 mask;
|
|
+ bool inverted;
|
|
+ u8 power_on_default;
|
|
+};
|
|
+
|
|
+struct cytp_contact {
|
|
+ int x;
|
|
+ int y;
|
|
+ int z;
|
|
+};
|
|
+
|
|
+struct cytp_report_data {
|
|
+ int contact_cnt;
|
|
+ struct cytp_contact contacts[2];
|
|
+ unsigned int left: 1;
|
|
+ unsigned int right: 1;
|
|
+ unsigned int middle: 1;
|
|
+ unsigned int tap: 1;
|
|
+};
|
|
+
|
|
+struct cytp_data {
|
|
+ int fw_version;
|
|
+ int pkt_size;
|
|
+ int mode;
|
|
+ int tp_min_pressure;
|
|
+ int tp_max_pressure;
|
|
+ int tp_width;
|
|
+ int tp_high;
|
|
+ int tp_max_abs_x;
|
|
+ int tp_max_abs_y;
|
|
+ int tp_res_x;
|
|
+ int tp_res_y;
|
|
+ int tp_metrics_supported;
|
|
+};
|
|
+
|
|
+struct vmmouse_data {
|
|
+ struct input_dev *abs_dev;
|
|
+ char phys[32];
|
|
+ char dev_name[128];
|
|
+};
|
|
+
|
|
+struct psmouse_smbus_dev {
|
|
+ struct i2c_board_info board;
|
|
+ struct psmouse *psmouse;
|
|
+ struct i2c_client *client;
|
|
+ struct list_head node;
|
|
+ bool dead;
|
|
+ bool need_deactivate;
|
|
+};
|
|
+
|
|
+struct psmouse_smbus_removal_work {
|
|
+ struct work_struct work;
|
|
+ struct i2c_client *client;
|
|
+};
|
|
+
|
|
+struct touchscreen_properties {
|
|
+ unsigned int max_x;
|
|
+ unsigned int max_y;
|
|
+ bool invert_x;
|
|
+ bool invert_y;
|
|
+ bool swap_x_y;
|
|
+};
|
|
+
|
|
+struct rtc;
|
|
+
|
|
+struct trace_event_raw_rtc_time_alarm_class {
|
|
+ struct trace_entry ent;
|
|
+ time64_t secs;
|
|
+ int err;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rtc_irq_set_freq {
|
|
+ struct trace_entry ent;
|
|
+ int freq;
|
|
+ int err;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rtc_irq_set_state {
|
|
+ struct trace_entry ent;
|
|
+ int enabled;
|
|
+ int err;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rtc_alarm_irq_enable {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int enabled;
|
|
+ int err;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rtc_offset_class {
|
|
+ struct trace_entry ent;
|
|
+ long int offset;
|
|
+ int err;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_rtc_timer_class {
|
|
+ struct trace_entry ent;
|
|
+ struct rtc_timer *timer;
|
|
+ ktime_t expires;
|
|
+ ktime_t period;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_rtc_time_alarm_class {};
|
|
+
|
|
+struct trace_event_data_offsets_rtc_irq_set_freq {};
|
|
+
|
|
+struct trace_event_data_offsets_rtc_irq_set_state {};
|
|
+
|
|
+struct trace_event_data_offsets_rtc_alarm_irq_enable {};
|
|
+
|
|
+struct trace_event_data_offsets_rtc_offset_class {};
|
|
+
|
|
+struct trace_event_data_offsets_rtc_timer_class {};
|
|
+
|
|
+enum {
|
|
+ none = 0,
|
|
+ day = 1,
|
|
+ month = 2,
|
|
+ year = 3,
|
|
+};
|
|
+
|
|
+struct nvmem_cell_info {
|
|
+ const char *name;
|
|
+ unsigned int offset;
|
|
+ unsigned int bytes;
|
|
+ unsigned int bit_offset;
|
|
+ unsigned int nbits;
|
|
+};
|
|
+
|
|
+typedef int (*nvmem_reg_read_t)(void *, unsigned int, void *, size_t);
|
|
+
|
|
+typedef int (*nvmem_reg_write_t)(void *, unsigned int, void *, size_t);
|
|
+
|
|
+struct nvmem_config {
|
|
+ struct device *dev;
|
|
+ const char *name;
|
|
+ int id;
|
|
+ struct module *owner;
|
|
+ const struct nvmem_cell_info *cells;
|
|
+ int ncells;
|
|
+ bool read_only;
|
|
+ bool root_only;
|
|
+ nvmem_reg_read_t reg_read;
|
|
+ nvmem_reg_write_t reg_write;
|
|
+ int size;
|
|
+ int word_size;
|
|
+ int stride;
|
|
+ void *priv;
|
|
+ bool compat;
|
|
+ struct device *base_dev;
|
|
+};
|
|
+
|
|
+struct cmos_rtc_board_info {
|
|
+ void (*wake_on)(struct device *);
|
|
+ void (*wake_off)(struct device *);
|
|
+ u32 flags;
|
|
+ int address_space;
|
|
+ u8 rtc_day_alarm;
|
|
+ u8 rtc_mon_alarm;
|
|
+ u8 rtc_century;
|
|
+};
|
|
+
|
|
+struct cmos_rtc {
|
|
+ struct rtc_device *rtc;
|
|
+ struct device *dev;
|
|
+ int irq;
|
|
+ struct resource *iomem;
|
|
+ time64_t alarm_expires;
|
|
+ void (*wake_on)(struct device *);
|
|
+ void (*wake_off)(struct device *);
|
|
+ u8 enabled_wake;
|
|
+ u8 suspend_ctrl;
|
|
+ u8 day_alrm;
|
|
+ u8 mon_alrm;
|
|
+ u8 century;
|
|
+ struct rtc_wkalrm saved_wkalrm;
|
|
+};
|
|
+
|
|
+struct i2c_devinfo {
|
|
+ struct list_head list;
|
|
+ int busnum;
|
|
+ struct i2c_board_info board_info;
|
|
+};
|
|
+
|
|
+struct i2c_device_identity {
|
|
+ u16 manufacturer_id;
|
|
+ u16 part_id;
|
|
+ u8 die_revision;
|
|
+};
|
|
+
|
|
+struct i2c_timings {
|
|
+ u32 bus_freq_hz;
|
|
+ u32 scl_rise_ns;
|
|
+ u32 scl_fall_ns;
|
|
+ u32 scl_int_delay_ns;
|
|
+ u32 sda_fall_ns;
|
|
+ u32 sda_hold_ns;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_i2c_write {
|
|
+ struct trace_entry ent;
|
|
+ int adapter_nr;
|
|
+ __u16 msg_nr;
|
|
+ __u16 addr;
|
|
+ __u16 flags;
|
|
+ __u16 len;
|
|
+ u32 __data_loc_buf;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_i2c_read {
|
|
+ struct trace_entry ent;
|
|
+ int adapter_nr;
|
|
+ __u16 msg_nr;
|
|
+ __u16 addr;
|
|
+ __u16 flags;
|
|
+ __u16 len;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_i2c_reply {
|
|
+ struct trace_entry ent;
|
|
+ int adapter_nr;
|
|
+ __u16 msg_nr;
|
|
+ __u16 addr;
|
|
+ __u16 flags;
|
|
+ __u16 len;
|
|
+ u32 __data_loc_buf;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_i2c_result {
|
|
+ struct trace_entry ent;
|
|
+ int adapter_nr;
|
|
+ __u16 nr_msgs;
|
|
+ __s16 ret;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_i2c_write {
|
|
+ u32 buf;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_i2c_read {};
|
|
+
|
|
+struct trace_event_data_offsets_i2c_reply {
|
|
+ u32 buf;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_i2c_result {};
|
|
+
|
|
+struct class_compat___2;
|
|
+
|
|
+struct i2c_cmd_arg {
|
|
+ unsigned int cmd;
|
|
+ void *arg;
|
|
+};
|
|
+
|
|
+struct i2c_smbus_alert_setup {
|
|
+ int irq;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_smbus_write {
|
|
+ struct trace_entry ent;
|
|
+ int adapter_nr;
|
|
+ __u16 addr;
|
|
+ __u16 flags;
|
|
+ __u8 command;
|
|
+ __u8 len;
|
|
+ __u32 protocol;
|
|
+ __u8 buf[34];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_smbus_read {
|
|
+ struct trace_entry ent;
|
|
+ int adapter_nr;
|
|
+ __u16 flags;
|
|
+ __u16 addr;
|
|
+ __u8 command;
|
|
+ __u32 protocol;
|
|
+ __u8 buf[34];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_smbus_reply {
|
|
+ struct trace_entry ent;
|
|
+ int adapter_nr;
|
|
+ __u16 addr;
|
|
+ __u16 flags;
|
|
+ __u8 command;
|
|
+ __u8 len;
|
|
+ __u32 protocol;
|
|
+ __u8 buf[34];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_smbus_result {
|
|
+ struct trace_entry ent;
|
|
+ int adapter_nr;
|
|
+ __u16 addr;
|
|
+ __u16 flags;
|
|
+ __u8 read_write;
|
|
+ __u8 command;
|
|
+ __s16 res;
|
|
+ __u32 protocol;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_smbus_write {};
|
|
+
|
|
+struct trace_event_data_offsets_smbus_read {};
|
|
+
|
|
+struct trace_event_data_offsets_smbus_reply {};
|
|
+
|
|
+struct trace_event_data_offsets_smbus_result {};
|
|
+
|
|
+struct i2c_acpi_handler_data {
|
|
+ struct acpi_connection_info info;
|
|
+ struct i2c_adapter *adapter;
|
|
+};
|
|
+
|
|
+struct gsb_buffer {
|
|
+ u8 status;
|
|
+ u8 len;
|
|
+ union {
|
|
+ u16 wdata;
|
|
+ u8 bdata;
|
|
+ u8 data[0];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct i2c_acpi_lookup {
|
|
+ struct i2c_board_info *info;
|
|
+ acpi_handle adapter_handle;
|
|
+ acpi_handle device_handle;
|
|
+ acpi_handle search_handle;
|
|
+ int n;
|
|
+ int index;
|
|
+ u32 speed;
|
|
+ u32 min_speed;
|
|
+ u32 force_speed;
|
|
+};
|
|
+
|
|
+struct cec_msg {
|
|
+ __u64 tx_ts;
|
|
+ __u64 rx_ts;
|
|
+ __u32 len;
|
|
+ __u32 timeout;
|
|
+ __u32 sequence;
|
|
+ __u32 flags;
|
|
+ __u8 msg[16];
|
|
+ __u8 reply;
|
|
+ __u8 rx_status;
|
|
+ __u8 tx_status;
|
|
+ __u8 tx_arb_lost_cnt;
|
|
+ __u8 tx_nack_cnt;
|
|
+ __u8 tx_low_drive_cnt;
|
|
+ __u8 tx_error_cnt;
|
|
+};
|
|
+
|
|
+struct cec_log_addrs {
|
|
+ __u8 log_addr[4];
|
|
+ __u16 log_addr_mask;
|
|
+ __u8 cec_version;
|
|
+ __u8 num_log_addrs;
|
|
+ __u32 vendor_id;
|
|
+ __u32 flags;
|
|
+ char osd_name[15];
|
|
+ __u8 primary_device_type[4];
|
|
+ __u8 log_addr_type[4];
|
|
+ __u8 all_device_types[4];
|
|
+ __u8 features[48];
|
|
+};
|
|
+
|
|
+struct cec_event_state_change {
|
|
+ __u16 phys_addr;
|
|
+ __u16 log_addr_mask;
|
|
+};
|
|
+
|
|
+struct cec_event_lost_msgs {
|
|
+ __u32 lost_msgs;
|
|
+};
|
|
+
|
|
+struct cec_event {
|
|
+ __u64 ts;
|
|
+ __u32 event;
|
|
+ __u32 flags;
|
|
+ union {
|
|
+ struct cec_event_state_change state_change;
|
|
+ struct cec_event_lost_msgs lost_msgs;
|
|
+ __u32 raw[16];
|
|
+ };
|
|
+};
|
|
+
|
|
+enum rc_proto {
|
|
+ RC_PROTO_UNKNOWN = 0,
|
|
+ RC_PROTO_OTHER = 1,
|
|
+ RC_PROTO_RC5 = 2,
|
|
+ RC_PROTO_RC5X_20 = 3,
|
|
+ RC_PROTO_RC5_SZ = 4,
|
|
+ RC_PROTO_JVC = 5,
|
|
+ RC_PROTO_SONY12 = 6,
|
|
+ RC_PROTO_SONY15 = 7,
|
|
+ RC_PROTO_SONY20 = 8,
|
|
+ RC_PROTO_NEC = 9,
|
|
+ RC_PROTO_NECX = 10,
|
|
+ RC_PROTO_NEC32 = 11,
|
|
+ RC_PROTO_SANYO = 12,
|
|
+ RC_PROTO_MCIR2_KBD = 13,
|
|
+ RC_PROTO_MCIR2_MSE = 14,
|
|
+ RC_PROTO_RC6_0 = 15,
|
|
+ RC_PROTO_RC6_6A_20 = 16,
|
|
+ RC_PROTO_RC6_6A_24 = 17,
|
|
+ RC_PROTO_RC6_6A_32 = 18,
|
|
+ RC_PROTO_RC6_MCE = 19,
|
|
+ RC_PROTO_SHARP = 20,
|
|
+ RC_PROTO_XMP = 21,
|
|
+ RC_PROTO_CEC = 22,
|
|
+ RC_PROTO_IMON = 23,
|
|
+};
|
|
+
|
|
+struct rc_map_table {
|
|
+ u32 scancode;
|
|
+ u32 keycode;
|
|
+};
|
|
+
|
|
+struct rc_map {
|
|
+ struct rc_map_table *scan;
|
|
+ unsigned int size;
|
|
+ unsigned int len;
|
|
+ unsigned int alloc;
|
|
+ enum rc_proto rc_proto;
|
|
+ const char *name;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+enum rc_driver_type {
|
|
+ RC_DRIVER_SCANCODE = 0,
|
|
+ RC_DRIVER_IR_RAW = 1,
|
|
+ RC_DRIVER_IR_RAW_TX = 2,
|
|
+};
|
|
+
|
|
+struct rc_scancode_filter {
|
|
+ u32 data;
|
|
+ u32 mask;
|
|
+};
|
|
+
|
|
+struct ir_raw_event_ctrl;
|
|
+
|
|
+struct rc_dev {
|
|
+ struct device dev;
|
|
+ bool managed_alloc;
|
|
+ const struct attribute_group *sysfs_groups[5];
|
|
+ const char *device_name;
|
|
+ const char *input_phys;
|
|
+ struct input_id input_id;
|
|
+ const char *driver_name;
|
|
+ const char *map_name;
|
|
+ struct rc_map rc_map;
|
|
+ struct mutex lock;
|
|
+ unsigned int minor;
|
|
+ struct ir_raw_event_ctrl *raw;
|
|
+ struct input_dev *input_dev;
|
|
+ enum rc_driver_type driver_type;
|
|
+ bool idle;
|
|
+ bool encode_wakeup;
|
|
+ u64 allowed_protocols;
|
|
+ u64 enabled_protocols;
|
|
+ u64 allowed_wakeup_protocols;
|
|
+ enum rc_proto wakeup_protocol;
|
|
+ struct rc_scancode_filter scancode_filter;
|
|
+ struct rc_scancode_filter scancode_wakeup_filter;
|
|
+ u32 scancode_mask;
|
|
+ u32 users;
|
|
+ void *priv;
|
|
+ spinlock_t keylock;
|
|
+ bool keypressed;
|
|
+ long unsigned int keyup_jiffies;
|
|
+ struct timer_list timer_keyup;
|
|
+ struct timer_list timer_repeat;
|
|
+ u32 last_keycode;
|
|
+ enum rc_proto last_protocol;
|
|
+ u32 last_scancode;
|
|
+ u8 last_toggle;
|
|
+ u32 timeout;
|
|
+ u32 min_timeout;
|
|
+ u32 max_timeout;
|
|
+ u32 rx_resolution;
|
|
+ u32 tx_resolution;
|
|
+ struct device lirc_dev;
|
|
+ struct cdev lirc_cdev;
|
|
+ ktime_t gap_start;
|
|
+ u64 gap_duration;
|
|
+ bool gap;
|
|
+ spinlock_t lirc_fh_lock;
|
|
+ struct list_head lirc_fh;
|
|
+ bool registered;
|
|
+ int (*change_protocol)(struct rc_dev *, u64 *);
|
|
+ int (*open)(struct rc_dev *);
|
|
+ void (*close)(struct rc_dev *);
|
|
+ int (*s_tx_mask)(struct rc_dev *, u32);
|
|
+ int (*s_tx_carrier)(struct rc_dev *, u32);
|
|
+ int (*s_tx_duty_cycle)(struct rc_dev *, u32);
|
|
+ int (*s_rx_carrier_range)(struct rc_dev *, u32, u32);
|
|
+ int (*tx_ir)(struct rc_dev *, unsigned int *, unsigned int);
|
|
+ void (*s_idle)(struct rc_dev *, bool);
|
|
+ int (*s_learning_mode)(struct rc_dev *, int);
|
|
+ int (*s_carrier_report)(struct rc_dev *, int);
|
|
+ int (*s_filter)(struct rc_dev *, struct rc_scancode_filter *);
|
|
+ int (*s_wakeup_filter)(struct rc_dev *, struct rc_scancode_filter *);
|
|
+ int (*s_timeout)(struct rc_dev *, unsigned int);
|
|
+};
|
|
+
|
|
+struct cec_devnode {
|
|
+ struct device dev;
|
|
+ struct cdev cdev;
|
|
+ int minor;
|
|
+ bool registered;
|
|
+ bool unregistered;
|
|
+ struct list_head fhs;
|
|
+ struct mutex lock;
|
|
+};
|
|
+
|
|
+struct cec_adapter;
|
|
+
|
|
+struct cec_fh;
|
|
+
|
|
+struct cec_data {
|
|
+ struct list_head list;
|
|
+ struct list_head xfer_list;
|
|
+ struct cec_adapter *adap;
|
|
+ struct cec_msg msg;
|
|
+ struct cec_fh *fh;
|
|
+ struct delayed_work work;
|
|
+ struct completion c;
|
|
+ u8 attempts;
|
|
+ bool blocking;
|
|
+ bool completed;
|
|
+};
|
|
+
|
|
+struct cec_adap_ops;
|
|
+
|
|
+struct cec_adapter {
|
|
+ struct module *owner;
|
|
+ char name[32];
|
|
+ struct cec_devnode devnode;
|
|
+ struct mutex lock;
|
|
+ struct rc_dev *rc;
|
|
+ struct list_head transmit_queue;
|
|
+ unsigned int transmit_queue_sz;
|
|
+ struct list_head wait_queue;
|
|
+ struct cec_data *transmitting;
|
|
+ bool transmit_in_progress;
|
|
+ struct task_struct *kthread_config;
|
|
+ struct completion config_completion;
|
|
+ struct task_struct *kthread;
|
|
+ wait_queue_head_t kthread_waitq;
|
|
+ wait_queue_head_t waitq;
|
|
+ const struct cec_adap_ops *ops;
|
|
+ void *priv;
|
|
+ u32 capabilities;
|
|
+ u8 available_log_addrs;
|
|
+ u16 phys_addr;
|
|
+ bool needs_hpd;
|
|
+ bool is_configuring;
|
|
+ bool is_configured;
|
|
+ bool cec_pin_is_high;
|
|
+ u8 last_initiator;
|
|
+ u32 monitor_all_cnt;
|
|
+ u32 monitor_pin_cnt;
|
|
+ u32 follower_cnt;
|
|
+ struct cec_fh *cec_follower;
|
|
+ struct cec_fh *cec_initiator;
|
|
+ bool passthrough;
|
|
+ struct cec_log_addrs log_addrs;
|
|
+ u32 tx_timeouts;
|
|
+ struct dentry *cec_dir;
|
|
+ struct dentry *status_file;
|
|
+ struct dentry *error_inj_file;
|
|
+ u16 phys_addrs[15];
|
|
+ u32 sequence;
|
|
+ char device_name[32];
|
|
+ char input_phys[32];
|
|
+ char input_drv[32];
|
|
+};
|
|
+
|
|
+struct cec_event_entry {
|
|
+ struct list_head list;
|
|
+ struct cec_event ev;
|
|
+};
|
|
+
|
|
+struct cec_fh {
|
|
+ struct list_head list;
|
|
+ struct list_head xfer_list;
|
|
+ struct cec_adapter *adap;
|
|
+ u8 mode_initiator;
|
|
+ u8 mode_follower;
|
|
+ wait_queue_head_t wait;
|
|
+ struct mutex lock;
|
|
+ struct list_head events[8];
|
|
+ u16 queued_events[8];
|
|
+ unsigned int total_queued_events;
|
|
+ struct cec_event_entry core_events[2];
|
|
+ struct list_head msgs;
|
|
+ unsigned int queued_msgs;
|
|
+};
|
|
+
|
|
+struct cec_adap_ops {
|
|
+ int (*adap_enable)(struct cec_adapter *, bool);
|
|
+ int (*adap_monitor_all_enable)(struct cec_adapter *, bool);
|
|
+ int (*adap_monitor_pin_enable)(struct cec_adapter *, bool);
|
|
+ int (*adap_log_addr)(struct cec_adapter *, u8);
|
|
+ int (*adap_transmit)(struct cec_adapter *, u8, u32, struct cec_msg *);
|
|
+ void (*adap_status)(struct cec_adapter *, struct seq_file *);
|
|
+ void (*adap_free)(struct cec_adapter *);
|
|
+ int (*error_inj_show)(struct cec_adapter *, struct seq_file *);
|
|
+ bool (*error_inj_parse_line)(struct cec_adapter *, char *);
|
|
+ int (*received)(struct cec_adapter *, struct cec_msg *);
|
|
+};
|
|
+
|
|
+struct est_timings {
|
|
+ u8 t1;
|
|
+ u8 t2;
|
|
+ u8 mfg_rsvd;
|
|
+};
|
|
+
|
|
+struct std_timing {
|
|
+ u8 hsize;
|
|
+ u8 vfreq_aspect;
|
|
+};
|
|
+
|
|
+struct detailed_pixel_timing {
|
|
+ u8 hactive_lo;
|
|
+ u8 hblank_lo;
|
|
+ u8 hactive_hblank_hi;
|
|
+ u8 vactive_lo;
|
|
+ u8 vblank_lo;
|
|
+ u8 vactive_vblank_hi;
|
|
+ u8 hsync_offset_lo;
|
|
+ u8 hsync_pulse_width_lo;
|
|
+ u8 vsync_offset_pulse_width_lo;
|
|
+ u8 hsync_vsync_offset_pulse_width_hi;
|
|
+ u8 width_mm_lo;
|
|
+ u8 height_mm_lo;
|
|
+ u8 width_height_mm_hi;
|
|
+ u8 hborder;
|
|
+ u8 vborder;
|
|
+ u8 misc;
|
|
+};
|
|
+
|
|
+struct detailed_data_string {
|
|
+ u8 str[13];
|
|
+};
|
|
+
|
|
+struct detailed_data_monitor_range {
|
|
+ u8 min_vfreq;
|
|
+ u8 max_vfreq;
|
|
+ u8 min_hfreq_khz;
|
|
+ u8 max_hfreq_khz;
|
|
+ u8 pixel_clock_mhz;
|
|
+ u8 flags;
|
|
+ union {
|
|
+ struct {
|
|
+ u8 reserved;
|
|
+ u8 hfreq_start_khz;
|
|
+ u8 c;
|
|
+ __le16 m;
|
|
+ u8 k;
|
|
+ u8 j;
|
|
+ } __attribute__((packed)) gtf2;
|
|
+ struct {
|
|
+ u8 version;
|
|
+ u8 data1;
|
|
+ u8 data2;
|
|
+ u8 supported_aspects;
|
|
+ u8 flags;
|
|
+ u8 supported_scalings;
|
|
+ u8 preferred_refresh;
|
|
+ } cvt;
|
|
+ } formula;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct detailed_data_wpindex {
|
|
+ u8 white_yx_lo;
|
|
+ u8 white_x_hi;
|
|
+ u8 white_y_hi;
|
|
+ u8 gamma;
|
|
+};
|
|
+
|
|
+struct cvt_timing {
|
|
+ u8 code[3];
|
|
+};
|
|
+
|
|
+struct detailed_non_pixel {
|
|
+ u8 pad1;
|
|
+ u8 type;
|
|
+ u8 pad2;
|
|
+ union {
|
|
+ struct detailed_data_string str;
|
|
+ struct detailed_data_monitor_range range;
|
|
+ struct detailed_data_wpindex color;
|
|
+ struct std_timing timings[6];
|
|
+ struct cvt_timing cvt[4];
|
|
+ } data;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct detailed_timing {
|
|
+ __le16 pixel_clock;
|
|
+ union {
|
|
+ struct detailed_pixel_timing pixel_data;
|
|
+ struct detailed_non_pixel other_data;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+struct edid {
|
|
+ u8 header[8];
|
|
+ u8 mfg_id[2];
|
|
+ u8 prod_code[2];
|
|
+ u32 serial;
|
|
+ u8 mfg_week;
|
|
+ u8 mfg_year;
|
|
+ u8 version;
|
|
+ u8 revision;
|
|
+ u8 input;
|
|
+ u8 width_cm;
|
|
+ u8 height_cm;
|
|
+ u8 gamma;
|
|
+ u8 features;
|
|
+ u8 red_green_lo;
|
|
+ u8 black_white_lo;
|
|
+ u8 red_x;
|
|
+ u8 red_y;
|
|
+ u8 green_x;
|
|
+ u8 green_y;
|
|
+ u8 blue_x;
|
|
+ u8 blue_y;
|
|
+ u8 white_x;
|
|
+ u8 white_y;
|
|
+ struct est_timings established_timings;
|
|
+ struct std_timing standard_timings[8];
|
|
+ struct detailed_timing detailed_timings[4];
|
|
+ u8 extensions;
|
|
+ u8 checksum;
|
|
+};
|
|
+
|
|
+struct cec_msg_entry {
|
|
+ struct list_head list;
|
|
+ struct cec_msg msg;
|
|
+};
|
|
+
|
|
+struct cec_caps {
|
|
+ char driver[32];
|
|
+ char name[32];
|
|
+ __u32 available_log_addrs;
|
|
+ __u32 capabilities;
|
|
+ __u32 version;
|
|
+};
|
|
+
|
|
+struct pps_ktime {
|
|
+ __s64 sec;
|
|
+ __s32 nsec;
|
|
+ __u32 flags;
|
|
+};
|
|
+
|
|
+struct pps_ktime_compat {
|
|
+ __s64 sec;
|
|
+ __s32 nsec;
|
|
+ __u32 flags;
|
|
+};
|
|
+
|
|
+struct pps_kinfo {
|
|
+ __u32 assert_sequence;
|
|
+ __u32 clear_sequence;
|
|
+ struct pps_ktime assert_tu;
|
|
+ struct pps_ktime clear_tu;
|
|
+ int current_mode;
|
|
+};
|
|
+
|
|
+struct pps_kinfo_compat {
|
|
+ __u32 assert_sequence;
|
|
+ __u32 clear_sequence;
|
|
+ struct pps_ktime_compat assert_tu;
|
|
+ struct pps_ktime_compat clear_tu;
|
|
+ int current_mode;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct pps_kparams {
|
|
+ int api_version;
|
|
+ int mode;
|
|
+ struct pps_ktime assert_off_tu;
|
|
+ struct pps_ktime clear_off_tu;
|
|
+};
|
|
+
|
|
+struct pps_fdata {
|
|
+ struct pps_kinfo info;
|
|
+ struct pps_ktime timeout;
|
|
+};
|
|
+
|
|
+struct pps_fdata_compat {
|
|
+ struct pps_kinfo_compat info;
|
|
+ struct pps_ktime_compat timeout;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct pps_bind_args {
|
|
+ int tsformat;
|
|
+ int edge;
|
|
+ int consumer;
|
|
+};
|
|
+
|
|
+struct pps_device;
|
|
+
|
|
+struct pps_source_info {
|
|
+ char name[32];
|
|
+ char path[32];
|
|
+ int mode;
|
|
+ void (*echo)(struct pps_device *, int, void *);
|
|
+ struct module *owner;
|
|
+ struct device *dev;
|
|
+};
|
|
+
|
|
+struct pps_device {
|
|
+ struct pps_source_info info;
|
|
+ struct pps_kparams params;
|
|
+ __u32 assert_sequence;
|
|
+ __u32 clear_sequence;
|
|
+ struct pps_ktime assert_tu;
|
|
+ struct pps_ktime clear_tu;
|
|
+ int current_mode;
|
|
+ unsigned int last_ev;
|
|
+ wait_queue_head_t queue;
|
|
+ unsigned int id;
|
|
+ const void *lookup_cookie;
|
|
+ struct cdev cdev;
|
|
+ struct device *dev;
|
|
+ struct fasync_struct *async_queue;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct pps_event_time {
|
|
+ struct timespec64 ts_real;
|
|
+};
|
|
+
|
|
+struct ptp_extts_event {
|
|
+ struct ptp_clock_time t;
|
|
+ unsigned int index;
|
|
+ unsigned int flags;
|
|
+ unsigned int rsv[2];
|
|
+};
|
|
+
|
|
+enum ptp_clock_events {
|
|
+ PTP_CLOCK_ALARM = 0,
|
|
+ PTP_CLOCK_EXTTS = 1,
|
|
+ PTP_CLOCK_PPS = 2,
|
|
+ PTP_CLOCK_PPSUSR = 3,
|
|
+};
|
|
+
|
|
+struct ptp_clock_event {
|
|
+ int type;
|
|
+ int index;
|
|
+ union {
|
|
+ u64 timestamp;
|
|
+ struct pps_event_time pps_times;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct timestamp_event_queue {
|
|
+ struct ptp_extts_event buf[128];
|
|
+ int head;
|
|
+ int tail;
|
|
+ spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct ptp_clock___2 {
|
|
+ struct posix_clock clock;
|
|
+ struct device dev;
|
|
+ struct ptp_clock_info *info;
|
|
+ dev_t devid;
|
|
+ int index;
|
|
+ struct pps_device *pps_source;
|
|
+ long int dialed_frequency;
|
|
+ struct timestamp_event_queue tsevq;
|
|
+ struct mutex tsevq_mux;
|
|
+ struct mutex pincfg_mux;
|
|
+ wait_queue_head_t tsev_wq;
|
|
+ int defunct;
|
|
+ struct device_attribute *pin_dev_attr;
|
|
+ struct attribute **pin_attr;
|
|
+ struct attribute_group pin_attr_group;
|
|
+ const struct attribute_group *pin_attr_groups[2];
|
|
+ struct kthread_worker *kworker;
|
|
+ struct kthread_delayed_work aux_work;
|
|
+};
|
|
+
|
|
+struct ptp_clock_caps {
|
|
+ int max_adj;
|
|
+ int n_alarm;
|
|
+ int n_ext_ts;
|
|
+ int n_per_out;
|
|
+ int pps;
|
|
+ int n_pins;
|
|
+ int cross_timestamping;
|
|
+ int rsv[13];
|
|
+};
|
|
+
|
|
+struct ptp_sys_offset {
|
|
+ unsigned int n_samples;
|
|
+ unsigned int rsv[3];
|
|
+ struct ptp_clock_time ts[51];
|
|
+};
|
|
+
|
|
+struct ptp_sys_offset_precise {
|
|
+ struct ptp_clock_time device;
|
|
+ struct ptp_clock_time sys_realtime;
|
|
+ struct ptp_clock_time sys_monoraw;
|
|
+ unsigned int rsv[4];
|
|
+};
|
|
+
|
|
+enum power_supply_notifier_events {
|
|
+ PSY_EVENT_PROP_CHANGED = 0,
|
|
+};
|
|
+
|
|
+struct power_supply_battery_info {
|
|
+ int energy_full_design_uwh;
|
|
+ int charge_full_design_uah;
|
|
+ int voltage_min_design_uv;
|
|
+ int precharge_current_ua;
|
|
+ int charge_term_current_ua;
|
|
+ int constant_charge_current_max_ua;
|
|
+ int constant_charge_voltage_max_uv;
|
|
+};
|
|
+
|
|
+struct psy_am_i_supplied_data {
|
|
+ struct power_supply *psy;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+enum hwmon_sensor_types {
|
|
+ hwmon_chip = 0,
|
|
+ hwmon_temp = 1,
|
|
+ hwmon_in = 2,
|
|
+ hwmon_curr = 3,
|
|
+ hwmon_power = 4,
|
|
+ hwmon_energy = 5,
|
|
+ hwmon_humidity = 6,
|
|
+ hwmon_fan = 7,
|
|
+ hwmon_pwm = 8,
|
|
+ hwmon_max = 9,
|
|
+};
|
|
+
|
|
+enum hwmon_chip_attributes {
|
|
+ hwmon_chip_temp_reset_history = 0,
|
|
+ hwmon_chip_in_reset_history = 1,
|
|
+ hwmon_chip_curr_reset_history = 2,
|
|
+ hwmon_chip_power_reset_history = 3,
|
|
+ hwmon_chip_register_tz = 4,
|
|
+ hwmon_chip_update_interval = 5,
|
|
+ hwmon_chip_alarms = 6,
|
|
+};
|
|
+
|
|
+enum hwmon_temp_attributes {
|
|
+ hwmon_temp_input = 0,
|
|
+ hwmon_temp_type = 1,
|
|
+ hwmon_temp_lcrit = 2,
|
|
+ hwmon_temp_lcrit_hyst = 3,
|
|
+ hwmon_temp_min = 4,
|
|
+ hwmon_temp_min_hyst = 5,
|
|
+ hwmon_temp_max = 6,
|
|
+ hwmon_temp_max_hyst = 7,
|
|
+ hwmon_temp_crit = 8,
|
|
+ hwmon_temp_crit_hyst = 9,
|
|
+ hwmon_temp_emergency = 10,
|
|
+ hwmon_temp_emergency_hyst = 11,
|
|
+ hwmon_temp_alarm = 12,
|
|
+ hwmon_temp_lcrit_alarm = 13,
|
|
+ hwmon_temp_min_alarm = 14,
|
|
+ hwmon_temp_max_alarm = 15,
|
|
+ hwmon_temp_crit_alarm = 16,
|
|
+ hwmon_temp_emergency_alarm = 17,
|
|
+ hwmon_temp_fault = 18,
|
|
+ hwmon_temp_offset = 19,
|
|
+ hwmon_temp_label = 20,
|
|
+ hwmon_temp_lowest = 21,
|
|
+ hwmon_temp_highest = 22,
|
|
+ hwmon_temp_reset_history = 23,
|
|
+};
|
|
+
|
|
+enum hwmon_in_attributes {
|
|
+ hwmon_in_input = 0,
|
|
+ hwmon_in_min = 1,
|
|
+ hwmon_in_max = 2,
|
|
+ hwmon_in_lcrit = 3,
|
|
+ hwmon_in_crit = 4,
|
|
+ hwmon_in_average = 5,
|
|
+ hwmon_in_lowest = 6,
|
|
+ hwmon_in_highest = 7,
|
|
+ hwmon_in_reset_history = 8,
|
|
+ hwmon_in_label = 9,
|
|
+ hwmon_in_alarm = 10,
|
|
+ hwmon_in_min_alarm = 11,
|
|
+ hwmon_in_max_alarm = 12,
|
|
+ hwmon_in_lcrit_alarm = 13,
|
|
+ hwmon_in_crit_alarm = 14,
|
|
+};
|
|
+
|
|
+enum hwmon_curr_attributes {
|
|
+ hwmon_curr_input = 0,
|
|
+ hwmon_curr_min = 1,
|
|
+ hwmon_curr_max = 2,
|
|
+ hwmon_curr_lcrit = 3,
|
|
+ hwmon_curr_crit = 4,
|
|
+ hwmon_curr_average = 5,
|
|
+ hwmon_curr_lowest = 6,
|
|
+ hwmon_curr_highest = 7,
|
|
+ hwmon_curr_reset_history = 8,
|
|
+ hwmon_curr_label = 9,
|
|
+ hwmon_curr_alarm = 10,
|
|
+ hwmon_curr_min_alarm = 11,
|
|
+ hwmon_curr_max_alarm = 12,
|
|
+ hwmon_curr_lcrit_alarm = 13,
|
|
+ hwmon_curr_crit_alarm = 14,
|
|
+};
|
|
+
|
|
+enum hwmon_power_attributes {
|
|
+ hwmon_power_average = 0,
|
|
+ hwmon_power_average_interval = 1,
|
|
+ hwmon_power_average_interval_max = 2,
|
|
+ hwmon_power_average_interval_min = 3,
|
|
+ hwmon_power_average_highest = 4,
|
|
+ hwmon_power_average_lowest = 5,
|
|
+ hwmon_power_average_max = 6,
|
|
+ hwmon_power_average_min = 7,
|
|
+ hwmon_power_input = 8,
|
|
+ hwmon_power_input_highest = 9,
|
|
+ hwmon_power_input_lowest = 10,
|
|
+ hwmon_power_reset_history = 11,
|
|
+ hwmon_power_accuracy = 12,
|
|
+ hwmon_power_cap = 13,
|
|
+ hwmon_power_cap_hyst = 14,
|
|
+ hwmon_power_cap_max = 15,
|
|
+ hwmon_power_cap_min = 16,
|
|
+ hwmon_power_min = 17,
|
|
+ hwmon_power_max = 18,
|
|
+ hwmon_power_crit = 19,
|
|
+ hwmon_power_lcrit = 20,
|
|
+ hwmon_power_label = 21,
|
|
+ hwmon_power_alarm = 22,
|
|
+ hwmon_power_cap_alarm = 23,
|
|
+ hwmon_power_min_alarm = 24,
|
|
+ hwmon_power_max_alarm = 25,
|
|
+ hwmon_power_lcrit_alarm = 26,
|
|
+ hwmon_power_crit_alarm = 27,
|
|
+};
|
|
+
|
|
+enum hwmon_energy_attributes {
|
|
+ hwmon_energy_input = 0,
|
|
+ hwmon_energy_label = 1,
|
|
+};
|
|
+
|
|
+enum hwmon_humidity_attributes {
|
|
+ hwmon_humidity_input = 0,
|
|
+ hwmon_humidity_label = 1,
|
|
+ hwmon_humidity_min = 2,
|
|
+ hwmon_humidity_min_hyst = 3,
|
|
+ hwmon_humidity_max = 4,
|
|
+ hwmon_humidity_max_hyst = 5,
|
|
+ hwmon_humidity_alarm = 6,
|
|
+ hwmon_humidity_fault = 7,
|
|
+};
|
|
+
|
|
+enum hwmon_fan_attributes {
|
|
+ hwmon_fan_input = 0,
|
|
+ hwmon_fan_label = 1,
|
|
+ hwmon_fan_min = 2,
|
|
+ hwmon_fan_max = 3,
|
|
+ hwmon_fan_div = 4,
|
|
+ hwmon_fan_pulses = 5,
|
|
+ hwmon_fan_target = 6,
|
|
+ hwmon_fan_alarm = 7,
|
|
+ hwmon_fan_min_alarm = 8,
|
|
+ hwmon_fan_max_alarm = 9,
|
|
+ hwmon_fan_fault = 10,
|
|
+};
|
|
+
|
|
+enum hwmon_pwm_attributes {
|
|
+ hwmon_pwm_input = 0,
|
|
+ hwmon_pwm_enable = 1,
|
|
+ hwmon_pwm_mode = 2,
|
|
+ hwmon_pwm_freq = 3,
|
|
+};
|
|
+
|
|
+struct hwmon_ops {
|
|
+ umode_t (*is_visible)(const void *, enum hwmon_sensor_types, u32, int);
|
|
+ int (*read)(struct device *, enum hwmon_sensor_types, u32, int, long int *);
|
|
+ int (*read_string)(struct device *, enum hwmon_sensor_types, u32, int, const char **);
|
|
+ int (*write)(struct device *, enum hwmon_sensor_types, u32, int, long int);
|
|
+};
|
|
+
|
|
+struct hwmon_channel_info {
|
|
+ enum hwmon_sensor_types type;
|
|
+ const u32 *config;
|
|
+};
|
|
+
|
|
+struct hwmon_chip_info {
|
|
+ const struct hwmon_ops *ops;
|
|
+ const struct hwmon_channel_info **info;
|
|
+};
|
|
+
|
|
+struct hwmon_device {
|
|
+ const char *name;
|
|
+ struct device dev;
|
|
+ const struct hwmon_chip_info *chip;
|
|
+ struct attribute_group group;
|
|
+ const struct attribute_group **groups;
|
|
+};
|
|
+
|
|
+struct hwmon_device_attribute {
|
|
+ struct device_attribute dev_attr;
|
|
+ const struct hwmon_ops *ops;
|
|
+ enum hwmon_sensor_types type;
|
|
+ u32 attr;
|
|
+ int index;
|
|
+ char name[32];
|
|
+};
|
|
+
|
|
+enum events {
|
|
+ THERMAL_AUX0 = 0,
|
|
+ THERMAL_AUX1 = 1,
|
|
+ THERMAL_CRITICAL = 2,
|
|
+ THERMAL_DEV_FAULT = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ THERMAL_GENL_ATTR_UNSPEC = 0,
|
|
+ THERMAL_GENL_ATTR_EVENT = 1,
|
|
+ __THERMAL_GENL_ATTR_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ THERMAL_GENL_CMD_UNSPEC = 0,
|
|
+ THERMAL_GENL_CMD_EVENT = 1,
|
|
+ __THERMAL_GENL_CMD_MAX = 2,
|
|
+};
|
|
+
|
|
+struct thermal_genl_event {
|
|
+ u32 orig;
|
|
+ enum events event;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_thermal_temperature {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_thermal_zone;
|
|
+ int id;
|
|
+ int temp_prev;
|
|
+ int temp;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_cdev_update {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_type;
|
|
+ long unsigned int target;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_thermal_zone_trip {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_thermal_zone;
|
|
+ int id;
|
|
+ int trip;
|
|
+ enum thermal_trip_type trip_type;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_thermal_temperature {
|
|
+ u32 thermal_zone;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_cdev_update {
|
|
+ u32 type;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_thermal_zone_trip {
|
|
+ u32 thermal_zone;
|
|
+};
|
|
+
|
|
+struct thermal_instance {
|
|
+ int id;
|
|
+ char name[20];
|
|
+ struct thermal_zone_device *tz;
|
|
+ struct thermal_cooling_device *cdev;
|
|
+ int trip;
|
|
+ bool initialized;
|
|
+ long unsigned int upper;
|
|
+ long unsigned int lower;
|
|
+ long unsigned int target;
|
|
+ char attr_name[20];
|
|
+ struct device_attribute attr;
|
|
+ char weight_attr_name[20];
|
|
+ struct device_attribute weight_attr;
|
|
+ struct list_head tz_node;
|
|
+ struct list_head cdev_node;
|
|
+ unsigned int weight;
|
|
+};
|
|
+
|
|
+struct thermal_hwmon_device {
|
|
+ char type[20];
|
|
+ struct device *device;
|
|
+ int count;
|
|
+ struct list_head tz_list;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct thermal_hwmon_attr {
|
|
+ struct device_attribute attr;
|
|
+ char name[16];
|
|
+};
|
|
+
|
|
+struct thermal_hwmon_temp {
|
|
+ struct list_head hwmon_node;
|
|
+ struct thermal_zone_device *tz;
|
|
+ struct thermal_hwmon_attr temp_input;
|
|
+ struct thermal_hwmon_attr temp_crit;
|
|
+};
|
|
+
|
|
+struct watchdog_info {
|
|
+ __u32 options;
|
|
+ __u32 firmware_version;
|
|
+ __u8 identity[32];
|
|
+};
|
|
+
|
|
+struct watchdog_device;
|
|
+
|
|
+struct watchdog_ops {
|
|
+ struct module *owner;
|
|
+ int (*start)(struct watchdog_device *);
|
|
+ int (*stop)(struct watchdog_device *);
|
|
+ int (*ping)(struct watchdog_device *);
|
|
+ unsigned int (*status)(struct watchdog_device *);
|
|
+ int (*set_timeout)(struct watchdog_device *, unsigned int);
|
|
+ int (*set_pretimeout)(struct watchdog_device *, unsigned int);
|
|
+ unsigned int (*get_timeleft)(struct watchdog_device *);
|
|
+ int (*restart)(struct watchdog_device *, long unsigned int, void *);
|
|
+ long int (*ioctl)(struct watchdog_device *, unsigned int, long unsigned int);
|
|
+};
|
|
+
|
|
+struct watchdog_governor;
|
|
+
|
|
+struct watchdog_core_data;
|
|
+
|
|
+struct watchdog_device {
|
|
+ int id;
|
|
+ struct device *parent;
|
|
+ const struct attribute_group **groups;
|
|
+ const struct watchdog_info *info;
|
|
+ const struct watchdog_ops *ops;
|
|
+ const struct watchdog_governor *gov;
|
|
+ unsigned int bootstatus;
|
|
+ unsigned int timeout;
|
|
+ unsigned int pretimeout;
|
|
+ unsigned int min_timeout;
|
|
+ unsigned int max_timeout;
|
|
+ unsigned int min_hw_heartbeat_ms;
|
|
+ unsigned int max_hw_heartbeat_ms;
|
|
+ struct notifier_block reboot_nb;
|
|
+ struct notifier_block restart_nb;
|
|
+ void *driver_data;
|
|
+ struct watchdog_core_data *wd_data;
|
|
+ long unsigned int status;
|
|
+ struct list_head deferred;
|
|
+};
|
|
+
|
|
+struct watchdog_governor {
|
|
+ const char name[20];
|
|
+ void (*pretimeout)(struct watchdog_device *);
|
|
+};
|
|
+
|
|
+struct watchdog_core_data {
|
|
+ struct device dev;
|
|
+ struct cdev cdev;
|
|
+ struct watchdog_device *wdd;
|
|
+ struct mutex lock;
|
|
+ ktime_t last_keepalive;
|
|
+ ktime_t last_hw_keepalive;
|
|
+ struct hrtimer timer;
|
|
+ struct kthread_work work;
|
|
+ long unsigned int status;
|
|
+};
|
|
+
|
|
+struct mdp_device_descriptor_s {
|
|
+ __u32 number;
|
|
+ __u32 major;
|
|
+ __u32 minor;
|
|
+ __u32 raid_disk;
|
|
+ __u32 state;
|
|
+ __u32 reserved[27];
|
|
+};
|
|
+
|
|
+typedef struct mdp_device_descriptor_s mdp_disk_t;
|
|
+
|
|
+struct mdp_superblock_s {
|
|
+ __u32 md_magic;
|
|
+ __u32 major_version;
|
|
+ __u32 minor_version;
|
|
+ __u32 patch_version;
|
|
+ __u32 gvalid_words;
|
|
+ __u32 set_uuid0;
|
|
+ __u32 ctime;
|
|
+ __u32 level;
|
|
+ __u32 size;
|
|
+ __u32 nr_disks;
|
|
+ __u32 raid_disks;
|
|
+ __u32 md_minor;
|
|
+ __u32 not_persistent;
|
|
+ __u32 set_uuid1;
|
|
+ __u32 set_uuid2;
|
|
+ __u32 set_uuid3;
|
|
+ __u32 gstate_creserved[16];
|
|
+ __u32 utime;
|
|
+ __u32 state;
|
|
+ __u32 active_disks;
|
|
+ __u32 working_disks;
|
|
+ __u32 failed_disks;
|
|
+ __u32 spare_disks;
|
|
+ __u32 sb_csum;
|
|
+ __u32 events_lo;
|
|
+ __u32 events_hi;
|
|
+ __u32 cp_events_lo;
|
|
+ __u32 cp_events_hi;
|
|
+ __u32 recovery_cp;
|
|
+ __u64 reshape_position;
|
|
+ __u32 new_level;
|
|
+ __u32 delta_disks;
|
|
+ __u32 new_layout;
|
|
+ __u32 new_chunk;
|
|
+ __u32 gstate_sreserved[14];
|
|
+ __u32 layout;
|
|
+ __u32 chunk_size;
|
|
+ __u32 root_pv;
|
|
+ __u32 root_block;
|
|
+ __u32 pstate_reserved[60];
|
|
+ mdp_disk_t disks[27];
|
|
+ __u32 reserved[0];
|
|
+ mdp_disk_t this_disk;
|
|
+};
|
|
+
|
|
+typedef struct mdp_superblock_s mdp_super_t;
|
|
+
|
|
+struct mdp_superblock_1 {
|
|
+ __le32 magic;
|
|
+ __le32 major_version;
|
|
+ __le32 feature_map;
|
|
+ __le32 pad0;
|
|
+ __u8 set_uuid[16];
|
|
+ char set_name[32];
|
|
+ __le64 ctime;
|
|
+ __le32 level;
|
|
+ __le32 layout;
|
|
+ __le64 size;
|
|
+ __le32 chunksize;
|
|
+ __le32 raid_disks;
|
|
+ union {
|
|
+ __le32 bitmap_offset;
|
|
+ struct {
|
|
+ __le16 offset;
|
|
+ __le16 size;
|
|
+ } ppl;
|
|
+ };
|
|
+ __le32 new_level;
|
|
+ __le64 reshape_position;
|
|
+ __le32 delta_disks;
|
|
+ __le32 new_layout;
|
|
+ __le32 new_chunk;
|
|
+ __le32 new_offset;
|
|
+ __le64 data_offset;
|
|
+ __le64 data_size;
|
|
+ __le64 super_offset;
|
|
+ union {
|
|
+ __le64 recovery_offset;
|
|
+ __le64 journal_tail;
|
|
+ };
|
|
+ __le32 dev_number;
|
|
+ __le32 cnt_corrected_read;
|
|
+ __u8 device_uuid[16];
|
|
+ __u8 devflags;
|
|
+ __u8 bblog_shift;
|
|
+ __le16 bblog_size;
|
|
+ __le32 bblog_offset;
|
|
+ __le64 utime;
|
|
+ __le64 events;
|
|
+ __le64 resync_offset;
|
|
+ __le32 sb_csum;
|
|
+ __le32 max_dev;
|
|
+ __u8 pad3[32];
|
|
+ __le16 dev_roles[0];
|
|
+};
|
|
+
|
|
+struct mdu_version_s {
|
|
+ int major;
|
|
+ int minor;
|
|
+ int patchlevel;
|
|
+};
|
|
+
|
|
+typedef struct mdu_version_s mdu_version_t;
|
|
+
|
|
+struct mdu_bitmap_file_s {
|
|
+ char pathname[4096];
|
|
+};
|
|
+
|
|
+typedef struct mdu_bitmap_file_s mdu_bitmap_file_t;
|
|
+
|
|
+struct mddev;
|
|
+
|
|
+struct md_rdev;
|
|
+
|
|
+struct md_cluster_operations {
|
|
+ int (*join)(struct mddev *, int);
|
|
+ int (*leave)(struct mddev *);
|
|
+ int (*slot_number)(struct mddev *);
|
|
+ int (*resync_info_update)(struct mddev *, sector_t, sector_t);
|
|
+ int (*metadata_update_start)(struct mddev *);
|
|
+ int (*metadata_update_finish)(struct mddev *);
|
|
+ void (*metadata_update_cancel)(struct mddev *);
|
|
+ int (*resync_start)(struct mddev *);
|
|
+ int (*resync_finish)(struct mddev *);
|
|
+ int (*area_resyncing)(struct mddev *, int, sector_t, sector_t);
|
|
+ int (*add_new_disk)(struct mddev *, struct md_rdev *);
|
|
+ void (*add_new_disk_cancel)(struct mddev *);
|
|
+ int (*new_disk_ack)(struct mddev *, bool);
|
|
+ int (*remove_disk)(struct mddev *, struct md_rdev *);
|
|
+ void (*load_bitmaps)(struct mddev *, int);
|
|
+ int (*gather_bitmaps)(struct md_rdev *);
|
|
+ int (*lock_all_bitmaps)(struct mddev *);
|
|
+ void (*unlock_all_bitmaps)(struct mddev *);
|
|
+ void (*update_size)(struct mddev *, sector_t);
|
|
+};
|
|
+
|
|
+struct md_cluster_info;
|
|
+
|
|
+struct md_personality;
|
|
+
|
|
+struct md_thread;
|
|
+
|
|
+struct bitmap;
|
|
+
|
|
+struct mddev {
|
|
+ void *private;
|
|
+ struct md_personality *pers;
|
|
+ dev_t unit;
|
|
+ int md_minor;
|
|
+ struct list_head disks;
|
|
+ long unsigned int flags;
|
|
+ long unsigned int sb_flags;
|
|
+ int suspended;
|
|
+ atomic_t active_io;
|
|
+ int ro;
|
|
+ int sysfs_active;
|
|
+ struct gendisk *gendisk;
|
|
+ struct kobject kobj;
|
|
+ int hold_active;
|
|
+ int major_version;
|
|
+ int minor_version;
|
|
+ int patch_version;
|
|
+ int persistent;
|
|
+ int external;
|
|
+ char metadata_type[17];
|
|
+ int chunk_sectors;
|
|
+ time64_t ctime;
|
|
+ time64_t utime;
|
|
+ int level;
|
|
+ int layout;
|
|
+ char clevel[16];
|
|
+ int raid_disks;
|
|
+ int max_disks;
|
|
+ sector_t dev_sectors;
|
|
+ sector_t array_sectors;
|
|
+ int external_size;
|
|
+ __u64 events;
|
|
+ int can_decrease_events;
|
|
+ char uuid[16];
|
|
+ sector_t reshape_position;
|
|
+ int delta_disks;
|
|
+ int new_level;
|
|
+ int new_layout;
|
|
+ int new_chunk_sectors;
|
|
+ int reshape_backwards;
|
|
+ struct md_thread *thread;
|
|
+ struct md_thread *sync_thread;
|
|
+ char *last_sync_action;
|
|
+ sector_t curr_resync;
|
|
+ sector_t curr_resync_completed;
|
|
+ long unsigned int resync_mark;
|
|
+ sector_t resync_mark_cnt;
|
|
+ sector_t curr_mark_cnt;
|
|
+ sector_t resync_max_sectors;
|
|
+ atomic64_t resync_mismatches;
|
|
+ sector_t suspend_lo;
|
|
+ sector_t suspend_hi;
|
|
+ int sync_speed_min;
|
|
+ int sync_speed_max;
|
|
+ int parallel_resync;
|
|
+ int ok_start_degraded;
|
|
+ long unsigned int recovery;
|
|
+ int recovery_disabled;
|
|
+ int in_sync;
|
|
+ struct mutex open_mutex;
|
|
+ struct mutex reconfig_mutex;
|
|
+ atomic_t active;
|
|
+ atomic_t openers;
|
|
+ int changed;
|
|
+ int degraded;
|
|
+ atomic_t recovery_active;
|
|
+ wait_queue_head_t recovery_wait;
|
|
+ sector_t recovery_cp;
|
|
+ sector_t resync_min;
|
|
+ sector_t resync_max;
|
|
+ struct kernfs_node *sysfs_state;
|
|
+ struct kernfs_node *sysfs_action;
|
|
+ struct kernfs_node *sysfs_completed;
|
|
+ struct kernfs_node *sysfs_degraded;
|
|
+ struct kernfs_node *sysfs_level;
|
|
+ struct work_struct del_work;
|
|
+ spinlock_t lock;
|
|
+ wait_queue_head_t sb_wait;
|
|
+ atomic_t pending_writes;
|
|
+ unsigned int safemode;
|
|
+ unsigned int safemode_delay;
|
|
+ struct timer_list safemode_timer;
|
|
+ struct percpu_ref writes_pending;
|
|
+ int sync_checkers;
|
|
+ struct request_queue *queue;
|
|
+ struct bitmap *bitmap;
|
|
+ struct {
|
|
+ struct file *file;
|
|
+ loff_t offset;
|
|
+ long unsigned int space;
|
|
+ loff_t default_offset;
|
|
+ long unsigned int default_space;
|
|
+ struct mutex mutex;
|
|
+ long unsigned int chunksize;
|
|
+ long unsigned int daemon_sleep;
|
|
+ long unsigned int max_write_behind;
|
|
+ int external;
|
|
+ int nodes;
|
|
+ char cluster_name[64];
|
|
+ } bitmap_info;
|
|
+ atomic_t max_corr_read_errors;
|
|
+ struct list_head all_mddevs;
|
|
+ struct attribute_group *to_remove;
|
|
+ struct bio_set bio_set;
|
|
+ struct bio_set sync_set;
|
|
+ struct bio *flush_bio;
|
|
+ atomic_t flush_pending;
|
|
+ ktime_t start_flush;
|
|
+ ktime_t last_flush;
|
|
+ struct work_struct flush_work;
|
|
+ struct work_struct event_work;
|
|
+ void (*sync_super)(struct mddev *, struct md_rdev *);
|
|
+ struct md_cluster_info *cluster_info;
|
|
+ unsigned int good_device_nr;
|
|
+ bool has_superblocks: 1;
|
|
+};
|
|
+
|
|
+struct md_rdev {
|
|
+ struct list_head same_set;
|
|
+ sector_t sectors;
|
|
+ struct mddev *mddev;
|
|
+ int last_events;
|
|
+ struct block_device *meta_bdev;
|
|
+ struct block_device *bdev;
|
|
+ struct page *sb_page;
|
|
+ struct page *bb_page;
|
|
+ int sb_loaded;
|
|
+ __u64 sb_events;
|
|
+ sector_t data_offset;
|
|
+ sector_t new_data_offset;
|
|
+ sector_t sb_start;
|
|
+ int sb_size;
|
|
+ int preferred_minor;
|
|
+ struct kobject kobj;
|
|
+ long unsigned int flags;
|
|
+ wait_queue_head_t blocked_wait;
|
|
+ int desc_nr;
|
|
+ int raid_disk;
|
|
+ int new_raid_disk;
|
|
+ int saved_raid_disk;
|
|
+ union {
|
|
+ sector_t recovery_offset;
|
|
+ sector_t journal_tail;
|
|
+ };
|
|
+ atomic_t nr_pending;
|
|
+ atomic_t read_errors;
|
|
+ time64_t last_read_error;
|
|
+ atomic_t corrected_errors;
|
|
+ struct work_struct del_work;
|
|
+ struct kernfs_node *sysfs_state;
|
|
+ struct kernfs_node *sysfs_unack_badblocks;
|
|
+ struct kernfs_node *sysfs_badblocks;
|
|
+ struct badblocks badblocks;
|
|
+ struct {
|
|
+ short int offset;
|
|
+ unsigned int size;
|
|
+ sector_t sector;
|
|
+ } ppl;
|
|
+};
|
|
+
|
|
+enum flag_bits {
|
|
+ Faulty = 0,
|
|
+ In_sync = 1,
|
|
+ Bitmap_sync = 2,
|
|
+ WriteMostly = 3,
|
|
+ AutoDetected = 4,
|
|
+ Blocked = 5,
|
|
+ WriteErrorSeen = 6,
|
|
+ FaultRecorded = 7,
|
|
+ BlockedBadBlocks = 8,
|
|
+ WantReplacement = 9,
|
|
+ Replacement = 10,
|
|
+ Candidate = 11,
|
|
+ Journal = 12,
|
|
+ ClusterRemove = 13,
|
|
+ RemoveSynchronized = 14,
|
|
+ ExternalBbl = 15,
|
|
+ FailFast = 16,
|
|
+ LastDev = 17,
|
|
+ WantRemove = 18,
|
|
+};
|
|
+
|
|
+enum mddev_flags {
|
|
+ MD_ARRAY_FIRST_USE = 0,
|
|
+ MD_CLOSING = 1,
|
|
+ MD_JOURNAL_CLEAN = 2,
|
|
+ MD_HAS_JOURNAL = 3,
|
|
+ MD_CLUSTER_RESYNC_LOCKED = 4,
|
|
+ MD_FAILFAST_SUPPORTED = 5,
|
|
+ MD_HAS_PPL = 6,
|
|
+ MD_HAS_MULTIPLE_PPLS = 7,
|
|
+ MD_ALLOW_SB_UPDATE = 8,
|
|
+ MD_UPDATING_SB = 9,
|
|
+ MD_NOT_READY = 10,
|
|
+};
|
|
+
|
|
+enum mddev_sb_flags {
|
|
+ MD_SB_CHANGE_DEVS = 0,
|
|
+ MD_SB_CHANGE_CLEAN = 1,
|
|
+ MD_SB_CHANGE_PENDING = 2,
|
|
+ MD_SB_NEED_REWRITE = 3,
|
|
+};
|
|
+
|
|
+struct md_personality {
|
|
+ char *name;
|
|
+ int level;
|
|
+ struct list_head list;
|
|
+ struct module *owner;
|
|
+ bool (*make_request)(struct mddev *, struct bio *);
|
|
+ int (*run)(struct mddev *);
|
|
+ int (*start)(struct mddev *);
|
|
+ void (*free)(struct mddev *, void *);
|
|
+ void (*status)(struct seq_file *, struct mddev *);
|
|
+ void (*error_handler)(struct mddev *, struct md_rdev *);
|
|
+ int (*hot_add_disk)(struct mddev *, struct md_rdev *);
|
|
+ int (*hot_remove_disk)(struct mddev *, struct md_rdev *);
|
|
+ int (*spare_active)(struct mddev *);
|
|
+ sector_t (*sync_request)(struct mddev *, sector_t, int *);
|
|
+ int (*resize)(struct mddev *, sector_t);
|
|
+ sector_t (*size)(struct mddev *, sector_t, int);
|
|
+ int (*check_reshape)(struct mddev *);
|
|
+ int (*start_reshape)(struct mddev *);
|
|
+ void (*finish_reshape)(struct mddev *);
|
|
+ void (*quiesce)(struct mddev *, int);
|
|
+ void * (*takeover)(struct mddev *);
|
|
+ int (*congested)(struct mddev *, int);
|
|
+ int (*change_consistency_policy)(struct mddev *, const char *);
|
|
+};
|
|
+
|
|
+struct md_thread {
|
|
+ void (*run)(struct md_thread *);
|
|
+ struct mddev *mddev;
|
|
+ wait_queue_head_t wqueue;
|
|
+ long unsigned int flags;
|
|
+ struct task_struct *tsk;
|
|
+ long unsigned int timeout;
|
|
+ void *private;
|
|
+};
|
|
+
|
|
+struct bitmap_page;
|
|
+
|
|
+struct bitmap_counts {
|
|
+ spinlock_t lock;
|
|
+ struct bitmap_page *bp;
|
|
+ long unsigned int pages;
|
|
+ long unsigned int missing_pages;
|
|
+ long unsigned int chunkshift;
|
|
+ long unsigned int chunks;
|
|
+};
|
|
+
|
|
+struct bitmap_storage {
|
|
+ struct file *file;
|
|
+ struct page *sb_page;
|
|
+ struct page **filemap;
|
|
+ long unsigned int *filemap_attr;
|
|
+ long unsigned int file_pages;
|
|
+ long unsigned int bytes;
|
|
+};
|
|
+
|
|
+struct bitmap {
|
|
+ struct bitmap_counts counts;
|
|
+ struct mddev *mddev;
|
|
+ __u64 events_cleared;
|
|
+ int need_sync;
|
|
+ struct bitmap_storage storage;
|
|
+ long unsigned int flags;
|
|
+ int allclean;
|
|
+ atomic_t behind_writes;
|
|
+ long unsigned int behind_writes_used;
|
|
+ long unsigned int daemon_lastrun;
|
|
+ long unsigned int last_end_sync;
|
|
+ atomic_t pending_writes;
|
|
+ wait_queue_head_t write_wait;
|
|
+ wait_queue_head_t overflow_wait;
|
|
+ wait_queue_head_t behind_wait;
|
|
+ struct kernfs_node *sysfs_can_clear;
|
|
+ int cluster_slot;
|
|
+};
|
|
+
|
|
+enum recovery_flags {
|
|
+ MD_RECOVERY_RUNNING = 0,
|
|
+ MD_RECOVERY_SYNC = 1,
|
|
+ MD_RECOVERY_RECOVER = 2,
|
|
+ MD_RECOVERY_INTR = 3,
|
|
+ MD_RECOVERY_DONE = 4,
|
|
+ MD_RECOVERY_NEEDED = 5,
|
|
+ MD_RECOVERY_REQUESTED = 6,
|
|
+ MD_RECOVERY_CHECK = 7,
|
|
+ MD_RECOVERY_RESHAPE = 8,
|
|
+ MD_RECOVERY_FROZEN = 9,
|
|
+ MD_RECOVERY_ERROR = 10,
|
|
+ MD_RECOVERY_WAIT = 11,
|
|
+ MD_RESYNCING_REMOTE = 12,
|
|
+};
|
|
+
|
|
+struct md_sysfs_entry {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct mddev *, char *);
|
|
+ ssize_t (*store)(struct mddev *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct bitmap_page {
|
|
+ char *map;
|
|
+ unsigned int hijacked: 1;
|
|
+ unsigned int pending: 1;
|
|
+ unsigned int count: 30;
|
|
+};
|
|
+
|
|
+struct super_type {
|
|
+ char *name;
|
|
+ struct module *owner;
|
|
+ int (*load_super)(struct md_rdev *, struct md_rdev *, int);
|
|
+ int (*validate_super)(struct mddev *, struct md_rdev *);
|
|
+ void (*sync_super)(struct mddev *, struct md_rdev *);
|
|
+ long long unsigned int (*rdev_size_change)(struct md_rdev *, sector_t);
|
|
+ int (*allow_new_offset)(struct md_rdev *, long long unsigned int);
|
|
+};
|
|
+
|
|
+struct rdev_sysfs_entry {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct md_rdev *, char *);
|
|
+ ssize_t (*store)(struct md_rdev *, const char *, size_t);
|
|
+};
|
|
+
|
|
+enum array_state {
|
|
+ clear = 0,
|
|
+ inactive = 1,
|
|
+ suspended = 2,
|
|
+ readonly = 3,
|
|
+ read_auto = 4,
|
|
+ clean = 5,
|
|
+ active = 6,
|
|
+ write_pending = 7,
|
|
+ active_idle = 8,
|
|
+ bad_word = 9,
|
|
+};
|
|
+
|
|
+struct detected_devices_node {
|
|
+ struct list_head list;
|
|
+ dev_t dev;
|
|
+};
|
|
+
|
|
+typedef __u16 bitmap_counter_t;
|
|
+
|
|
+enum bitmap_state {
|
|
+ BITMAP_STALE = 1,
|
|
+ BITMAP_WRITE_ERROR = 2,
|
|
+ BITMAP_HOSTENDIAN = 15,
|
|
+};
|
|
+
|
|
+struct bitmap_super_s {
|
|
+ __le32 magic;
|
|
+ __le32 version;
|
|
+ __u8 uuid[16];
|
|
+ __le64 events;
|
|
+ __le64 events_cleared;
|
|
+ __le64 sync_size;
|
|
+ __le32 state;
|
|
+ __le32 chunksize;
|
|
+ __le32 daemon_sleep;
|
|
+ __le32 write_behind;
|
|
+ __le32 sectors_reserved;
|
|
+ __le32 nodes;
|
|
+ __u8 cluster_name[64];
|
|
+ __u8 pad[120];
|
|
+};
|
|
+
|
|
+typedef struct bitmap_super_s bitmap_super_t;
|
|
+
|
|
+enum bitmap_page_attr {
|
|
+ BITMAP_PAGE_DIRTY = 0,
|
|
+ BITMAP_PAGE_PENDING = 1,
|
|
+ BITMAP_PAGE_NEEDWRITE = 2,
|
|
+};
|
|
+
|
|
+struct dm_kobject_holder {
|
|
+ struct kobject kobj;
|
|
+ struct completion completion;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ EDAC_REPORTING_ENABLED = 0,
|
|
+ EDAC_REPORTING_DISABLED = 1,
|
|
+ EDAC_REPORTING_FORCE = 2,
|
|
+};
|
|
+
|
|
+enum dev_type {
|
|
+ DEV_UNKNOWN = 0,
|
|
+ DEV_X1 = 1,
|
|
+ DEV_X2 = 2,
|
|
+ DEV_X4 = 3,
|
|
+ DEV_X8 = 4,
|
|
+ DEV_X16 = 5,
|
|
+ DEV_X32 = 6,
|
|
+ DEV_X64 = 7,
|
|
+};
|
|
+
|
|
+enum hw_event_mc_err_type {
|
|
+ HW_EVENT_ERR_CORRECTED = 0,
|
|
+ HW_EVENT_ERR_UNCORRECTED = 1,
|
|
+ HW_EVENT_ERR_DEFERRED = 2,
|
|
+ HW_EVENT_ERR_FATAL = 3,
|
|
+ HW_EVENT_ERR_INFO = 4,
|
|
+};
|
|
+
|
|
+enum mem_type {
|
|
+ MEM_EMPTY = 0,
|
|
+ MEM_RESERVED = 1,
|
|
+ MEM_UNKNOWN = 2,
|
|
+ MEM_FPM = 3,
|
|
+ MEM_EDO = 4,
|
|
+ MEM_BEDO = 5,
|
|
+ MEM_SDR = 6,
|
|
+ MEM_RDR = 7,
|
|
+ MEM_DDR = 8,
|
|
+ MEM_RDDR = 9,
|
|
+ MEM_RMBS = 10,
|
|
+ MEM_DDR2 = 11,
|
|
+ MEM_FB_DDR2 = 12,
|
|
+ MEM_RDDR2 = 13,
|
|
+ MEM_XDR = 14,
|
|
+ MEM_DDR3 = 15,
|
|
+ MEM_RDDR3 = 16,
|
|
+ MEM_LRDDR3 = 17,
|
|
+ MEM_DDR4 = 18,
|
|
+ MEM_RDDR4 = 19,
|
|
+ MEM_LRDDR4 = 20,
|
|
+ MEM_NVDIMM = 21,
|
|
+};
|
|
+
|
|
+enum edac_type {
|
|
+ EDAC_UNKNOWN = 0,
|
|
+ EDAC_NONE = 1,
|
|
+ EDAC_RESERVED = 2,
|
|
+ EDAC_PARITY = 3,
|
|
+ EDAC_EC = 4,
|
|
+ EDAC_SECDED = 5,
|
|
+ EDAC_S2ECD2ED = 6,
|
|
+ EDAC_S4ECD4ED = 7,
|
|
+ EDAC_S8ECD8ED = 8,
|
|
+ EDAC_S16ECD16ED = 9,
|
|
+};
|
|
+
|
|
+enum scrub_type {
|
|
+ SCRUB_UNKNOWN = 0,
|
|
+ SCRUB_NONE = 1,
|
|
+ SCRUB_SW_PROG = 2,
|
|
+ SCRUB_SW_SRC = 3,
|
|
+ SCRUB_SW_PROG_SRC = 4,
|
|
+ SCRUB_SW_TUNABLE = 5,
|
|
+ SCRUB_HW_PROG = 6,
|
|
+ SCRUB_HW_SRC = 7,
|
|
+ SCRUB_HW_PROG_SRC = 8,
|
|
+ SCRUB_HW_TUNABLE = 9,
|
|
+};
|
|
+
|
|
+enum edac_mc_layer_type {
|
|
+ EDAC_MC_LAYER_BRANCH = 0,
|
|
+ EDAC_MC_LAYER_CHANNEL = 1,
|
|
+ EDAC_MC_LAYER_SLOT = 2,
|
|
+ EDAC_MC_LAYER_CHIP_SELECT = 3,
|
|
+ EDAC_MC_LAYER_ALL_MEM = 4,
|
|
+};
|
|
+
|
|
+struct edac_mc_layer {
|
|
+ enum edac_mc_layer_type type;
|
|
+ unsigned int size;
|
|
+ bool is_virt_csrow;
|
|
+};
|
|
+
|
|
+struct mem_ctl_info;
|
|
+
|
|
+struct dimm_info {
|
|
+ struct device dev;
|
|
+ char label[32];
|
|
+ unsigned int location[3];
|
|
+ struct mem_ctl_info *mci;
|
|
+ u32 grain;
|
|
+ enum dev_type dtype;
|
|
+ enum mem_type mtype;
|
|
+ enum edac_type edac_mode;
|
|
+ u32 nr_pages;
|
|
+ unsigned int csrow;
|
|
+ unsigned int cschannel;
|
|
+};
|
|
+
|
|
+struct mcidev_sysfs_attribute;
|
|
+
|
|
+struct edac_raw_error_desc {
|
|
+ char location[256];
|
|
+ char label[296];
|
|
+ long int grain;
|
|
+ u16 error_count;
|
|
+ int top_layer;
|
|
+ int mid_layer;
|
|
+ int low_layer;
|
|
+ long unsigned int page_frame_number;
|
|
+ long unsigned int offset_in_page;
|
|
+ long unsigned int syndrome;
|
|
+ const char *msg;
|
|
+ const char *other_detail;
|
|
+ bool enable_per_layer_report;
|
|
+};
|
|
+
|
|
+struct csrow_info;
|
|
+
|
|
+struct mem_ctl_info {
|
|
+ struct device dev;
|
|
+ struct bus_type *bus;
|
|
+ struct list_head link;
|
|
+ struct module *owner;
|
|
+ long unsigned int mtype_cap;
|
|
+ long unsigned int edac_ctl_cap;
|
|
+ long unsigned int edac_cap;
|
|
+ long unsigned int scrub_cap;
|
|
+ enum scrub_type scrub_mode;
|
|
+ int (*set_sdram_scrub_rate)(struct mem_ctl_info *, u32);
|
|
+ int (*get_sdram_scrub_rate)(struct mem_ctl_info *);
|
|
+ void (*edac_check)(struct mem_ctl_info *);
|
|
+ long unsigned int (*ctl_page_to_phys)(struct mem_ctl_info *, long unsigned int);
|
|
+ int mc_idx;
|
|
+ struct csrow_info **csrows;
|
|
+ unsigned int nr_csrows;
|
|
+ unsigned int num_cschannel;
|
|
+ unsigned int n_layers;
|
|
+ struct edac_mc_layer *layers;
|
|
+ bool csbased;
|
|
+ unsigned int tot_dimms;
|
|
+ struct dimm_info **dimms;
|
|
+ struct device *pdev;
|
|
+ const char *mod_name;
|
|
+ const char *ctl_name;
|
|
+ const char *dev_name;
|
|
+ void *pvt_info;
|
|
+ long unsigned int start_time;
|
|
+ u32 ce_noinfo_count;
|
|
+ u32 ue_noinfo_count;
|
|
+ u32 ue_mc;
|
|
+ u32 ce_mc;
|
|
+ u32 *ce_per_layer[3];
|
|
+ u32 *ue_per_layer[3];
|
|
+ struct completion complete;
|
|
+ const struct mcidev_sysfs_attribute *mc_driver_sysfs_attributes;
|
|
+ struct delayed_work work;
|
|
+ struct edac_raw_error_desc error_desc;
|
|
+ int op_state;
|
|
+ struct dentry *debugfs;
|
|
+ u8 fake_inject_layer[3];
|
|
+ bool fake_inject_ue;
|
|
+ u16 fake_inject_count;
|
|
+};
|
|
+
|
|
+struct rank_info {
|
|
+ int chan_idx;
|
|
+ struct csrow_info *csrow;
|
|
+ struct dimm_info *dimm;
|
|
+ u32 ce_count;
|
|
+};
|
|
+
|
|
+struct csrow_info {
|
|
+ struct device dev;
|
|
+ long unsigned int first_page;
|
|
+ long unsigned int last_page;
|
|
+ long unsigned int page_mask;
|
|
+ int csrow_idx;
|
|
+ u32 ue_count;
|
|
+ u32 ce_count;
|
|
+ struct mem_ctl_info *mci;
|
|
+ u32 nr_channels;
|
|
+ struct rank_info **channels;
|
|
+};
|
|
+
|
|
+struct edac_device_counter {
|
|
+ u32 ue_count;
|
|
+ u32 ce_count;
|
|
+};
|
|
+
|
|
+struct edac_device_ctl_info;
|
|
+
|
|
+struct edac_dev_sysfs_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct edac_device_ctl_info *, char *);
|
|
+ ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct edac_device_instance;
|
|
+
|
|
+struct edac_device_ctl_info {
|
|
+ struct list_head link;
|
|
+ struct module *owner;
|
|
+ int dev_idx;
|
|
+ int log_ue;
|
|
+ int log_ce;
|
|
+ int panic_on_ue;
|
|
+ unsigned int poll_msec;
|
|
+ long unsigned int delay;
|
|
+ struct edac_dev_sysfs_attribute *sysfs_attributes;
|
|
+ struct bus_type *edac_subsys;
|
|
+ int op_state;
|
|
+ struct delayed_work work;
|
|
+ void (*edac_check)(struct edac_device_ctl_info *);
|
|
+ struct device *dev;
|
|
+ const char *mod_name;
|
|
+ const char *ctl_name;
|
|
+ const char *dev_name;
|
|
+ void *pvt_info;
|
|
+ long unsigned int start_time;
|
|
+ struct completion removal_complete;
|
|
+ char name[32];
|
|
+ u32 nr_instances;
|
|
+ struct edac_device_instance *instances;
|
|
+ struct edac_device_counter counters;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+struct edac_device_block;
|
|
+
|
|
+struct edac_dev_sysfs_block_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
|
|
+ ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
|
|
+ struct edac_device_block *block;
|
|
+ unsigned int value;
|
|
+};
|
|
+
|
|
+struct edac_device_block {
|
|
+ struct edac_device_instance *instance;
|
|
+ char name[32];
|
|
+ struct edac_device_counter counters;
|
|
+ int nr_attribs;
|
|
+ struct edac_dev_sysfs_block_attribute *block_attributes;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+struct edac_device_instance {
|
|
+ struct edac_device_ctl_info *ctl;
|
|
+ char name[35];
|
|
+ struct edac_device_counter counters;
|
|
+ u32 nr_blocks;
|
|
+ struct edac_device_block *blocks;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+struct dev_ch_attribute {
|
|
+ struct device_attribute attr;
|
|
+ int channel;
|
|
+};
|
|
+
|
|
+struct ctl_info_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct edac_device_ctl_info *, char *);
|
|
+ ssize_t (*store)(struct edac_device_ctl_info *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct instance_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct edac_device_instance *, char *);
|
|
+ ssize_t (*store)(struct edac_device_instance *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct edac_pci_counter {
|
|
+ atomic_t pe_count;
|
|
+ atomic_t npe_count;
|
|
+};
|
|
+
|
|
+struct edac_pci_ctl_info {
|
|
+ struct list_head link;
|
|
+ int pci_idx;
|
|
+ struct bus_type *edac_subsys;
|
|
+ int op_state;
|
|
+ struct delayed_work work;
|
|
+ void (*edac_check)(struct edac_pci_ctl_info *);
|
|
+ struct device *dev;
|
|
+ const char *mod_name;
|
|
+ const char *ctl_name;
|
|
+ const char *dev_name;
|
|
+ void *pvt_info;
|
|
+ long unsigned int start_time;
|
|
+ struct completion complete;
|
|
+ char name[32];
|
|
+ struct edac_pci_counter counters;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+struct edac_pci_gen_data {
|
|
+ int edac_idx;
|
|
+};
|
|
+
|
|
+struct instance_attribute___2 {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct edac_pci_ctl_info *, char *);
|
|
+ ssize_t (*store)(struct edac_pci_ctl_info *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct edac_pci_dev_attribute {
|
|
+ struct attribute attr;
|
|
+ void *value;
|
|
+ ssize_t (*show)(void *, char *);
|
|
+ ssize_t (*store)(void *, const char *, size_t);
|
|
+};
|
|
+
|
|
+typedef void (*pci_parity_check_fn_t)(struct pci_dev *);
|
|
+
|
|
+struct ghes_edac_pvt {
|
|
+ struct list_head list;
|
|
+ struct ghes *ghes;
|
|
+ struct mem_ctl_info *mci;
|
|
+ char other_detail[400];
|
|
+ char msg[80];
|
|
+};
|
|
+
|
|
+struct memdev_dmi_entry {
|
|
+ u8 type;
|
|
+ u8 length;
|
|
+ u16 handle;
|
|
+ u16 phys_mem_array_handle;
|
|
+ u16 mem_err_info_handle;
|
|
+ u16 total_width;
|
|
+ u16 data_width;
|
|
+ u16 size;
|
|
+ u8 form_factor;
|
|
+ u8 device_set;
|
|
+ u8 device_locator;
|
|
+ u8 bank_locator;
|
|
+ u8 memory_type;
|
|
+ u16 type_detail;
|
|
+ u16 speed;
|
|
+ u8 manufacturer;
|
|
+ u8 serial_number;
|
|
+ u8 asset_tag;
|
|
+ u8 part_number;
|
|
+ u8 attributes;
|
|
+ u32 extended_size;
|
|
+ u16 conf_mem_clk_speed;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct ghes_edac_dimm_fill {
|
|
+ struct mem_ctl_info *mci;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct cpufreq_driver {
|
|
+ char name[16];
|
|
+ u8 flags;
|
|
+ void *driver_data;
|
|
+ int (*init)(struct cpufreq_policy *);
|
|
+ int (*verify)(struct cpufreq_policy *);
|
|
+ int (*setpolicy)(struct cpufreq_policy *);
|
|
+ int (*target)(struct cpufreq_policy *, unsigned int, unsigned int);
|
|
+ int (*target_index)(struct cpufreq_policy *, unsigned int);
|
|
+ unsigned int (*fast_switch)(struct cpufreq_policy *, unsigned int);
|
|
+ unsigned int (*resolve_freq)(struct cpufreq_policy *, unsigned int);
|
|
+ unsigned int (*get_intermediate)(struct cpufreq_policy *, unsigned int);
|
|
+ int (*target_intermediate)(struct cpufreq_policy *, unsigned int);
|
|
+ unsigned int (*get)(unsigned int);
|
|
+ int (*bios_limit)(int, unsigned int *);
|
|
+ int (*exit)(struct cpufreq_policy *);
|
|
+ void (*stop_cpu)(struct cpufreq_policy *);
|
|
+ int (*suspend)(struct cpufreq_policy *);
|
|
+ int (*resume)(struct cpufreq_policy *);
|
|
+ void (*ready)(struct cpufreq_policy *);
|
|
+ struct freq_attr **attr;
|
|
+ bool boost_enabled;
|
|
+ int (*set_boost)(int);
|
|
+};
|
|
+
|
|
+struct cpufreq_stats {
|
|
+ unsigned int total_trans;
|
|
+ long long unsigned int last_time;
|
|
+ unsigned int max_state;
|
|
+ unsigned int state_num;
|
|
+ unsigned int last_index;
|
|
+ u64 *time_in_state;
|
|
+ unsigned int *freq_table;
|
|
+ unsigned int *trans_table;
|
|
+};
|
|
+
|
|
+struct gov_attr_set {
|
|
+ struct kobject kobj;
|
|
+ struct list_head policy_list;
|
|
+ struct mutex update_lock;
|
|
+ int usage_count;
|
|
+};
|
|
+
|
|
+struct governor_attr {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct gov_attr_set *, char *);
|
|
+ ssize_t (*store)(struct gov_attr_set *, const char *, size_t);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ OD_NORMAL_SAMPLE = 0,
|
|
+ OD_SUB_SAMPLE = 1,
|
|
+};
|
|
+
|
|
+struct dbs_data {
|
|
+ struct gov_attr_set attr_set;
|
|
+ void *tuners;
|
|
+ unsigned int ignore_nice_load;
|
|
+ unsigned int sampling_rate;
|
|
+ unsigned int sampling_down_factor;
|
|
+ unsigned int up_threshold;
|
|
+ unsigned int io_is_busy;
|
|
+};
|
|
+
|
|
+struct policy_dbs_info {
|
|
+ struct cpufreq_policy *policy;
|
|
+ struct mutex update_mutex;
|
|
+ u64 last_sample_time;
|
|
+ s64 sample_delay_ns;
|
|
+ atomic_t work_count;
|
|
+ struct irq_work irq_work;
|
|
+ struct work_struct work;
|
|
+ struct dbs_data *dbs_data;
|
|
+ struct list_head list;
|
|
+ unsigned int rate_mult;
|
|
+ unsigned int idle_periods;
|
|
+ bool is_shared;
|
|
+ bool work_in_progress;
|
|
+};
|
|
+
|
|
+struct dbs_governor {
|
|
+ struct cpufreq_governor gov;
|
|
+ struct kobj_type kobj_type;
|
|
+ struct dbs_data *gdbs_data;
|
|
+ unsigned int (*gov_dbs_update)(struct cpufreq_policy *);
|
|
+ struct policy_dbs_info * (*alloc)();
|
|
+ void (*free)(struct policy_dbs_info *);
|
|
+ int (*init)(struct dbs_data *);
|
|
+ void (*exit)(struct dbs_data *);
|
|
+ void (*start)(struct cpufreq_policy *);
|
|
+};
|
|
+
|
|
+struct od_ops {
|
|
+ unsigned int (*powersave_bias_target)(struct cpufreq_policy *, unsigned int, unsigned int);
|
|
+};
|
|
+
|
|
+struct od_policy_dbs_info {
|
|
+ struct policy_dbs_info policy_dbs;
|
|
+ unsigned int freq_lo;
|
|
+ unsigned int freq_lo_delay_us;
|
|
+ unsigned int freq_hi_delay_us;
|
|
+ unsigned int sample_type: 1;
|
|
+};
|
|
+
|
|
+struct od_dbs_tuners {
|
|
+ unsigned int powersave_bias;
|
|
+};
|
|
+
|
|
+struct cs_policy_dbs_info {
|
|
+ struct policy_dbs_info policy_dbs;
|
|
+ unsigned int down_skip;
|
|
+ unsigned int requested_freq;
|
|
+};
|
|
+
|
|
+struct cs_dbs_tuners {
|
|
+ unsigned int down_threshold;
|
|
+ unsigned int freq_step;
|
|
+};
|
|
+
|
|
+struct cpu_dbs_info {
|
|
+ u64 prev_cpu_idle;
|
|
+ u64 prev_update_time;
|
|
+ u64 prev_cpu_nice;
|
|
+ unsigned int prev_load;
|
|
+ struct update_util_data update_util;
|
|
+ struct policy_dbs_info *policy_dbs;
|
|
+};
|
|
+
|
|
+enum acpi_preferred_pm_profiles {
|
|
+ PM_UNSPECIFIED = 0,
|
|
+ PM_DESKTOP = 1,
|
|
+ PM_MOBILE = 2,
|
|
+ PM_WORKSTATION = 3,
|
|
+ PM_ENTERPRISE_SERVER = 4,
|
|
+ PM_SOHO_SERVER = 5,
|
|
+ PM_APPLIANCE_PC = 6,
|
|
+ PM_PERFORMANCE_SERVER = 7,
|
|
+ PM_TABLET = 8,
|
|
+};
|
|
+
|
|
+struct sample {
|
|
+ int32_t core_avg_perf;
|
|
+ int32_t busy_scaled;
|
|
+ u64 aperf;
|
|
+ u64 mperf;
|
|
+ u64 tsc;
|
|
+ u64 time;
|
|
+};
|
|
+
|
|
+struct pstate_data {
|
|
+ int current_pstate;
|
|
+ int min_pstate;
|
|
+ int max_pstate;
|
|
+ int max_pstate_physical;
|
|
+ int scaling;
|
|
+ int turbo_pstate;
|
|
+ unsigned int max_freq;
|
|
+ unsigned int turbo_freq;
|
|
+};
|
|
+
|
|
+struct vid_data {
|
|
+ int min;
|
|
+ int max;
|
|
+ int turbo;
|
|
+ int32_t ratio;
|
|
+};
|
|
+
|
|
+struct global_params {
|
|
+ bool no_turbo;
|
|
+ bool turbo_disabled;
|
|
+ int max_perf_pct;
|
|
+ int min_perf_pct;
|
|
+};
|
|
+
|
|
+struct cpudata {
|
|
+ int cpu;
|
|
+ unsigned int policy;
|
|
+ struct update_util_data update_util;
|
|
+ bool update_util_set;
|
|
+ struct pstate_data pstate;
|
|
+ struct vid_data vid;
|
|
+ u64 last_update;
|
|
+ u64 last_sample_time;
|
|
+ u64 aperf_mperf_shift;
|
|
+ u64 prev_aperf;
|
|
+ u64 prev_mperf;
|
|
+ u64 prev_tsc;
|
|
+ u64 prev_cummulative_iowait;
|
|
+ struct sample sample;
|
|
+ int32_t min_perf_ratio;
|
|
+ int32_t max_perf_ratio;
|
|
+ struct acpi_processor_performance acpi_perf_data;
|
|
+ bool valid_pss_table;
|
|
+ unsigned int iowait_boost;
|
|
+ s16 epp_powersave;
|
|
+ s16 epp_policy;
|
|
+ s16 epp_default;
|
|
+ s16 epp_saved;
|
|
+ u64 hwp_req_cached;
|
|
+ u64 hwp_cap_cached;
|
|
+ u64 last_io_update;
|
|
+ unsigned int sched_flags;
|
|
+ u32 hwp_boost_min;
|
|
+};
|
|
+
|
|
+struct pstate_funcs {
|
|
+ int (*get_max)();
|
|
+ int (*get_max_physical)();
|
|
+ int (*get_min)();
|
|
+ int (*get_turbo)();
|
|
+ int (*get_scaling)();
|
|
+ int (*get_aperf_mperf_shift)();
|
|
+ u64 (*get_val)(struct cpudata *, int);
|
|
+ void (*get_vid)(struct cpudata *);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PSS = 0,
|
|
+ PPC = 1,
|
|
+};
|
|
+
|
|
+struct cpuidle_device_wrapper {
|
|
+ struct cpuidle_device dev;
|
|
+ int last_state_idx;
|
|
+ u64 poll_limit_ns;
|
|
+};
|
|
+
|
|
+struct cpuidle_governor {
|
|
+ char name[16];
|
|
+ struct list_head governor_list;
|
|
+ unsigned int rating;
|
|
+ int (*enable)(struct cpuidle_driver___2 *, struct cpuidle_device *);
|
|
+ void (*disable)(struct cpuidle_driver___2 *, struct cpuidle_device *);
|
|
+ int (*select)(struct cpuidle_driver___2 *, struct cpuidle_device *, bool *);
|
|
+ void (*reflect)(struct cpuidle_device *, int);
|
|
+};
|
|
+
|
|
+struct cpuidle_driver_wrapper {
|
|
+ struct cpuidle_driver___2 drv;
|
|
+ const char *governor;
|
|
+};
|
|
+
|
|
+struct cpuidle_state_kobj {
|
|
+ struct cpuidle_state *state;
|
|
+ struct cpuidle_state_usage *state_usage;
|
|
+ struct completion kobj_unregister;
|
|
+ struct kobject kobj;
|
|
+ struct cpuidle_device *device;
|
|
+};
|
|
+
|
|
+struct cpuidle_device_kobj {
|
|
+ struct cpuidle_device *dev;
|
|
+ struct completion kobj_unregister;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+struct cpuidle_attr {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct cpuidle_device *, char *);
|
|
+ ssize_t (*store)(struct cpuidle_device *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct cpuidle_state_attr {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct cpuidle_state *, struct cpuidle_state_usage *, char *);
|
|
+ ssize_t (*store)(struct cpuidle_state *, struct cpuidle_state_usage *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct menu_device {
|
|
+ int last_state_idx;
|
|
+ int needs_update;
|
|
+ int tick_wakeup;
|
|
+ unsigned int next_timer_us;
|
|
+ unsigned int predicted_us;
|
|
+ unsigned int bucket;
|
|
+ unsigned int correction_factor[12];
|
|
+ unsigned int intervals[8];
|
|
+ int interval_ptr;
|
|
+};
|
|
+
|
|
+struct sdhci_pci_data {
|
|
+ struct pci_dev *pdev;
|
|
+ int slotno;
|
|
+ int rst_n_gpio;
|
|
+ int cd_gpio;
|
|
+ int (*setup)(struct sdhci_pci_data *);
|
|
+ void (*cleanup)(struct sdhci_pci_data *);
|
|
+};
|
|
+
|
|
+struct led_cdev;
|
|
+
|
|
+struct dmi_memdev_info {
|
|
+ const char *device;
|
|
+ const char *bank;
|
|
+ u64 size;
|
|
+ u16 handle;
|
|
+};
|
|
+
|
|
+struct dmi_sysfs_entry {
|
|
+ struct dmi_header dh;
|
|
+ struct kobject kobj;
|
|
+ int instance;
|
|
+ int position;
|
|
+ struct list_head list;
|
|
+ struct kobject *child;
|
|
+};
|
|
+
|
|
+struct dmi_sysfs_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct dmi_sysfs_entry *, char *);
|
|
+};
|
|
+
|
|
+struct dmi_sysfs_mapped_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct dmi_sysfs_entry *, const struct dmi_header *, char *);
|
|
+};
|
|
+
|
|
+typedef ssize_t (*dmi_callback)(struct dmi_sysfs_entry *, const struct dmi_header *, void *);
|
|
+
|
|
+struct find_dmi_data {
|
|
+ struct dmi_sysfs_entry *entry;
|
|
+ dmi_callback callback;
|
|
+ void *private;
|
|
+ int instance_countdown;
|
|
+ ssize_t ret;
|
|
+};
|
|
+
|
|
+struct dmi_read_state {
|
|
+ char *buf;
|
|
+ loff_t pos;
|
|
+ size_t count;
|
|
+};
|
|
+
|
|
+struct dmi_entry_attr_show_data {
|
|
+ struct attribute *attr;
|
|
+ char *buf;
|
|
+};
|
|
+
|
|
+struct dmi_system_event_log {
|
|
+ struct dmi_header header;
|
|
+ u16 area_length;
|
|
+ u16 header_start_offset;
|
|
+ u16 data_start_offset;
|
|
+ u8 access_method;
|
|
+ u8 status;
|
|
+ u32 change_token;
|
|
+ union {
|
|
+ struct {
|
|
+ u16 index_addr;
|
|
+ u16 data_addr;
|
|
+ } io;
|
|
+ u32 phys_addr32;
|
|
+ u16 gpnv_handle;
|
|
+ u32 access_method_address;
|
|
+ };
|
|
+ u8 header_format;
|
|
+ u8 type_descriptors_supported_count;
|
|
+ u8 per_log_type_descriptor_length;
|
|
+ u8 supported_log_type_descriptos[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *, loff_t);
|
|
+
|
|
+struct dmi_device_attribute {
|
|
+ struct device_attribute dev_attr;
|
|
+ int field;
|
|
+};
|
|
+
|
|
+struct mafield {
|
|
+ const char *prefix;
|
|
+ int field;
|
|
+};
|
|
+
|
|
+struct firmware_map_entry {
|
|
+ u64 start;
|
|
+ u64 end;
|
|
+ const char *type;
|
|
+ struct list_head list;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+struct memmap_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct firmware_map_entry *, char *);
|
|
+};
|
|
+
|
|
+struct fw_cfg_file {
|
|
+ __be32 size;
|
|
+ __be16 select;
|
|
+ __u16 reserved;
|
|
+ char name[56];
|
|
+};
|
|
+
|
|
+struct fw_cfg_dma_access {
|
|
+ __be32 control;
|
|
+ __be32 length;
|
|
+ __be64 address;
|
|
+};
|
|
+
|
|
+struct fw_cfg_vmcoreinfo {
|
|
+ __le16 host_format;
|
|
+ __le16 guest_format;
|
|
+ __le32 size;
|
|
+ __le64 paddr;
|
|
+};
|
|
+
|
|
+struct fw_cfg_sysfs_entry {
|
|
+ struct kobject kobj;
|
|
+ u32 size;
|
|
+ u16 select;
|
|
+ char name[56];
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct fw_cfg_sysfs_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct fw_cfg_sysfs_entry *, char *);
|
|
+};
|
|
+
|
|
+struct bmp_header {
|
|
+ u16 id;
|
|
+ u32 size;
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef efi_status_t efi_query_variable_store_t(u32, long unsigned int, bool);
|
|
+
|
|
+typedef struct {
|
|
+ efi_guid_t guid;
|
|
+ u32 table;
|
|
+} efi_config_table_32_t;
|
|
+
|
|
+typedef struct {
|
|
+ u32 version;
|
|
+ u32 length;
|
|
+ u64 memory_protection_attribute;
|
|
+} efi_properties_table_t;
|
|
+
|
|
+struct efivar_operations {
|
|
+ efi_get_variable_t *get_variable;
|
|
+ efi_get_next_variable_t *get_next_variable;
|
|
+ efi_set_variable_t *set_variable;
|
|
+ efi_set_variable_t *set_variable_nonblocking;
|
|
+ efi_query_variable_store_t *query_variable_store;
|
|
+};
|
|
+
|
|
+struct efivars {
|
|
+ struct kset *kset;
|
|
+ struct kobject *kobject;
|
|
+ const struct efivar_operations *ops;
|
|
+};
|
|
+
|
|
+struct linux_efi_random_seed {
|
|
+ u32 size;
|
|
+ u8 bits[0];
|
|
+};
|
|
+
|
|
+struct linux_efi_memreserve {
|
|
+ int size;
|
|
+ atomic_t count;
|
|
+ phys_addr_t next;
|
|
+ struct {
|
|
+ phys_addr_t base;
|
|
+ phys_addr_t size;
|
|
+ } entry[0];
|
|
+};
|
|
+
|
|
+struct efi_generic_dev_path {
|
|
+ u8 type;
|
|
+ u8 sub_type;
|
|
+ u16 length;
|
|
+};
|
|
+
|
|
+struct variable_validate {
|
|
+ efi_guid_t vendor;
|
|
+ char *name;
|
|
+ bool (*validate)(efi_char16_t *, int, u8 *, long unsigned int);
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ u32 version;
|
|
+ u32 num_entries;
|
|
+ u32 desc_size;
|
|
+ u32 reserved;
|
|
+ efi_memory_desc_t entry[0];
|
|
+} efi_memory_attributes_table_t;
|
|
+
|
|
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
|
|
+
|
|
+typedef struct {
|
|
+ u64 length;
|
|
+ u64 data;
|
|
+} efi_capsule_block_desc_t;
|
|
+
|
|
+struct efi_system_resource_entry_v1 {
|
|
+ efi_guid_t fw_class;
|
|
+ u32 fw_type;
|
|
+ u32 fw_version;
|
|
+ u32 lowest_supported_fw_version;
|
|
+ u32 capsule_flags;
|
|
+ u32 last_attempt_version;
|
|
+ u32 last_attempt_status;
|
|
+};
|
|
+
|
|
+struct efi_system_resource_table {
|
|
+ u32 fw_resource_count;
|
|
+ u32 fw_resource_count_max;
|
|
+ u64 fw_resource_version;
|
|
+ u8 entries[0];
|
|
+};
|
|
+
|
|
+struct esre_entry {
|
|
+ union {
|
|
+ struct efi_system_resource_entry_v1 *esre1;
|
|
+ } esre;
|
|
+ struct kobject kobj;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct esre_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct esre_entry *, char *);
|
|
+ ssize_t (*store)(struct esre_entry *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct cper_sec_proc_generic {
|
|
+ __u64 validation_bits;
|
|
+ __u8 proc_type;
|
|
+ __u8 proc_isa;
|
|
+ __u8 proc_error_type;
|
|
+ __u8 operation;
|
|
+ __u8 flags;
|
|
+ __u8 level;
|
|
+ __u16 reserved;
|
|
+ __u64 cpu_version;
|
|
+ char cpu_brand[128];
|
|
+ __u64 proc_id;
|
|
+ __u64 target_addr;
|
|
+ __u64 requestor_id;
|
|
+ __u64 responder_id;
|
|
+ __u64 ip;
|
|
+};
|
|
+
|
|
+struct cper_sec_proc_ia {
|
|
+ __u64 validation_bits;
|
|
+ __u64 lapic_id;
|
|
+ __u8 cpuid[48];
|
|
+};
|
|
+
|
|
+struct cper_mem_err_compact {
|
|
+ __u64 validation_bits;
|
|
+ __u16 node;
|
|
+ __u16 card;
|
|
+ __u16 module;
|
|
+ __u16 bank;
|
|
+ __u16 device;
|
|
+ __u16 row;
|
|
+ __u16 column;
|
|
+ __u16 bit_pos;
|
|
+ __u64 requestor_id;
|
|
+ __u64 responder_id;
|
|
+ __u64 target_id;
|
|
+ __u16 rank;
|
|
+ __u16 mem_array_handle;
|
|
+ __u16 mem_dev_handle;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct efi_runtime_map_entry {
|
|
+ efi_memory_desc_t md;
|
|
+ struct kobject kobj;
|
|
+};
|
|
+
|
|
+struct map_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct efi_runtime_map_entry *, char *);
|
|
+};
|
|
+
|
|
+enum efi_rts_ids {
|
|
+ GET_TIME = 0,
|
|
+ SET_TIME = 1,
|
|
+ GET_WAKEUP_TIME = 2,
|
|
+ SET_WAKEUP_TIME = 3,
|
|
+ GET_VARIABLE = 4,
|
|
+ GET_NEXT_VARIABLE = 5,
|
|
+ SET_VARIABLE = 6,
|
|
+ QUERY_VARIABLE_INFO = 7,
|
|
+ GET_NEXT_HIGH_MONO_COUNT = 8,
|
|
+ UPDATE_CAPSULE = 9,
|
|
+ QUERY_CAPSULE_CAPS = 10,
|
|
+};
|
|
+
|
|
+struct efi_runtime_work {
|
|
+ void *arg1;
|
|
+ void *arg2;
|
|
+ void *arg3;
|
|
+ void *arg4;
|
|
+ void *arg5;
|
|
+ efi_status_t status;
|
|
+ struct work_struct work;
|
|
+ enum efi_rts_ids efi_rts_id;
|
|
+ struct completion efi_rts_comp;
|
|
+};
|
|
+
|
|
+struct efi_dev_path {
|
|
+ u8 type;
|
|
+ u8 sub_type;
|
|
+ u16 length;
|
|
+ union {
|
|
+ struct {
|
|
+ u32 hid;
|
|
+ u32 uid;
|
|
+ } acpi;
|
|
+ struct {
|
|
+ u8 fn;
|
|
+ u8 dev;
|
|
+ } pci;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct acpi_hid_uid {
|
|
+ struct acpi_device_id hid[2];
|
|
+ char uid[11];
|
|
+};
|
|
+
|
|
+struct dev_header {
|
|
+ u32 len;
|
|
+ u32 prop_count;
|
|
+ struct efi_dev_path path[0];
|
|
+};
|
|
+
|
|
+struct properties_header {
|
|
+ u32 len;
|
|
+ u32 version;
|
|
+ u32 dev_count;
|
|
+ struct dev_header dev_header[0];
|
|
+};
|
|
+
|
|
+struct cper_ia_err_info {
|
|
+ guid_t err_type;
|
|
+ __u64 validation_bits;
|
|
+ __u64 check_info;
|
|
+ __u64 target_id;
|
|
+ __u64 requestor_id;
|
|
+ __u64 responder_id;
|
|
+ __u64 ip;
|
|
+};
|
|
+
|
|
+struct cper_ia_proc_ctx {
|
|
+ __u16 reg_ctx_type;
|
|
+ __u16 reg_arr_size;
|
|
+ __u32 msr_addr;
|
|
+ __u64 mm_reg_addr;
|
|
+};
|
|
+
|
|
+enum err_types {
|
|
+ ERR_TYPE_CACHE = 0,
|
|
+ ERR_TYPE_TLB = 1,
|
|
+ ERR_TYPE_BUS = 2,
|
|
+ ERR_TYPE_MS = 3,
|
|
+ N_ERR_TYPES = 4,
|
|
+};
|
|
+
|
|
+struct hid_device_id {
|
|
+ __u16 bus;
|
|
+ __u16 group;
|
|
+ __u32 vendor;
|
|
+ __u32 product;
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+struct hid_item {
|
|
+ unsigned int format;
|
|
+ __u8 size;
|
|
+ __u8 type;
|
|
+ __u8 tag;
|
|
+ union {
|
|
+ __u8 u8;
|
|
+ __s8 s8;
|
|
+ __u16 u16;
|
|
+ __s16 s16;
|
|
+ __u32 u32;
|
|
+ __s32 s32;
|
|
+ __u8 *longdata;
|
|
+ } data;
|
|
+};
|
|
+
|
|
+struct hid_global {
|
|
+ unsigned int usage_page;
|
|
+ __s32 logical_minimum;
|
|
+ __s32 logical_maximum;
|
|
+ __s32 physical_minimum;
|
|
+ __s32 physical_maximum;
|
|
+ __s32 unit_exponent;
|
|
+ unsigned int unit;
|
|
+ unsigned int report_id;
|
|
+ unsigned int report_size;
|
|
+ unsigned int report_count;
|
|
+};
|
|
+
|
|
+struct hid_local {
|
|
+ unsigned int usage[12288];
|
|
+ u8 usage_size[12288];
|
|
+ unsigned int collection_index[12288];
|
|
+ unsigned int usage_index;
|
|
+ unsigned int usage_minimum;
|
|
+ unsigned int delimiter_depth;
|
|
+ unsigned int delimiter_branch;
|
|
+};
|
|
+
|
|
+struct hid_collection {
|
|
+ unsigned int type;
|
|
+ unsigned int usage;
|
|
+ unsigned int level;
|
|
+};
|
|
+
|
|
+struct hid_usage {
|
|
+ unsigned int hid;
|
|
+ unsigned int collection_index;
|
|
+ unsigned int usage_index;
|
|
+ __u16 code;
|
|
+ __u8 type;
|
|
+ __s8 hat_min;
|
|
+ __s8 hat_max;
|
|
+ __s8 hat_dir;
|
|
+};
|
|
+
|
|
+struct hid_report;
|
|
+
|
|
+struct hid_input;
|
|
+
|
|
+struct hid_field {
|
|
+ unsigned int physical;
|
|
+ unsigned int logical;
|
|
+ unsigned int application;
|
|
+ struct hid_usage *usage;
|
|
+ unsigned int maxusage;
|
|
+ unsigned int flags;
|
|
+ unsigned int report_offset;
|
|
+ unsigned int report_size;
|
|
+ unsigned int report_count;
|
|
+ unsigned int report_type;
|
|
+ __s32 *value;
|
|
+ __s32 logical_minimum;
|
|
+ __s32 logical_maximum;
|
|
+ __s32 physical_minimum;
|
|
+ __s32 physical_maximum;
|
|
+ __s32 unit_exponent;
|
|
+ unsigned int unit;
|
|
+ struct hid_report *report;
|
|
+ unsigned int index;
|
|
+ struct hid_input *hidinput;
|
|
+ __u16 dpad;
|
|
+};
|
|
+
|
|
+struct hid_device;
|
|
+
|
|
+struct hid_report {
|
|
+ struct list_head list;
|
|
+ struct list_head hidinput_list;
|
|
+ unsigned int id;
|
|
+ unsigned int type;
|
|
+ unsigned int application;
|
|
+ struct hid_field *field[256];
|
|
+ unsigned int maxfield;
|
|
+ unsigned int size;
|
|
+ struct hid_device *device;
|
|
+};
|
|
+
|
|
+struct hid_input {
|
|
+ struct list_head list;
|
|
+ struct hid_report *report;
|
|
+ struct input_dev *input;
|
|
+ const char *name;
|
|
+ bool registered;
|
|
+ struct list_head reports;
|
|
+ unsigned int application;
|
|
+};
|
|
+
|
|
+enum hid_type {
|
|
+ HID_TYPE_OTHER = 0,
|
|
+ HID_TYPE_USBMOUSE = 1,
|
|
+ HID_TYPE_USBNONE = 2,
|
|
+};
|
|
+
|
|
+struct hid_report_enum {
|
|
+ unsigned int numbered;
|
|
+ struct list_head report_list;
|
|
+ struct hid_report *report_id_hash[256];
|
|
+};
|
|
+
|
|
+enum hid_battery_status {
|
|
+ HID_BATTERY_UNKNOWN = 0,
|
|
+ HID_BATTERY_QUERIED = 1,
|
|
+ HID_BATTERY_REPORTED = 2,
|
|
+};
|
|
+
|
|
+struct hid_driver;
|
|
+
|
|
+struct hid_ll_driver;
|
|
+
|
|
+struct hid_device {
|
|
+ __u8 *dev_rdesc;
|
|
+ unsigned int dev_rsize;
|
|
+ __u8 *rdesc;
|
|
+ unsigned int rsize;
|
|
+ struct hid_collection *collection;
|
|
+ unsigned int collection_size;
|
|
+ unsigned int maxcollection;
|
|
+ unsigned int maxapplication;
|
|
+ __u16 bus;
|
|
+ __u16 group;
|
|
+ __u32 vendor;
|
|
+ __u32 product;
|
|
+ __u32 version;
|
|
+ enum hid_type type;
|
|
+ unsigned int country;
|
|
+ struct hid_report_enum report_enum[3];
|
|
+ struct work_struct led_work;
|
|
+ struct semaphore driver_input_lock;
|
|
+ struct device dev;
|
|
+ struct hid_driver *driver;
|
|
+ struct hid_ll_driver *ll_driver;
|
|
+ struct mutex ll_open_lock;
|
|
+ unsigned int ll_open_count;
|
|
+ struct power_supply *battery;
|
|
+ __s32 battery_capacity;
|
|
+ __s32 battery_min;
|
|
+ __s32 battery_max;
|
|
+ __s32 battery_report_type;
|
|
+ __s32 battery_report_id;
|
|
+ enum hid_battery_status battery_status;
|
|
+ bool battery_avoid_query;
|
|
+ long unsigned int status;
|
|
+ unsigned int claimed;
|
|
+ unsigned int quirks;
|
|
+ bool io_started;
|
|
+ struct list_head inputs;
|
|
+ void *hiddev;
|
|
+ void *hidraw;
|
|
+ char name[128];
|
|
+ char phys[64];
|
|
+ char uniq[64];
|
|
+ void *driver_data;
|
|
+ int (*ff_init)(struct hid_device *);
|
|
+ int (*hiddev_connect)(struct hid_device *, unsigned int);
|
|
+ void (*hiddev_disconnect)(struct hid_device *);
|
|
+ void (*hiddev_hid_event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32);
|
|
+ void (*hiddev_report_event)(struct hid_device *, struct hid_report *);
|
|
+ short unsigned int debug;
|
|
+ struct dentry *debug_dir;
|
|
+ struct dentry *debug_rdesc;
|
|
+ struct dentry *debug_events;
|
|
+ struct list_head debug_list;
|
|
+ spinlock_t debug_list_lock;
|
|
+ wait_queue_head_t debug_wait;
|
|
+};
|
|
+
|
|
+struct hid_report_id;
|
|
+
|
|
+struct hid_usage_id;
|
|
+
|
|
+struct hid_driver {
|
|
+ char *name;
|
|
+ const struct hid_device_id *id_table;
|
|
+ struct list_head dyn_list;
|
|
+ spinlock_t dyn_lock;
|
|
+ bool (*match)(struct hid_device *, bool);
|
|
+ int (*probe)(struct hid_device *, const struct hid_device_id *);
|
|
+ void (*remove)(struct hid_device *);
|
|
+ const struct hid_report_id *report_table;
|
|
+ int (*raw_event)(struct hid_device *, struct hid_report *, u8 *, int);
|
|
+ const struct hid_usage_id *usage_table;
|
|
+ int (*event)(struct hid_device *, struct hid_field *, struct hid_usage *, __s32);
|
|
+ void (*report)(struct hid_device *, struct hid_report *);
|
|
+ __u8 * (*report_fixup)(struct hid_device *, __u8 *, unsigned int *);
|
|
+ int (*input_mapping)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *);
|
|
+ int (*input_mapped)(struct hid_device *, struct hid_input *, struct hid_field *, struct hid_usage *, long unsigned int **, int *);
|
|
+ int (*input_configured)(struct hid_device *, struct hid_input *);
|
|
+ void (*feature_mapping)(struct hid_device *, struct hid_field *, struct hid_usage *);
|
|
+ int (*suspend)(struct hid_device *, pm_message_t);
|
|
+ int (*resume)(struct hid_device *);
|
|
+ int (*reset_resume)(struct hid_device *);
|
|
+ struct device_driver driver;
|
|
+};
|
|
+
|
|
+struct hid_ll_driver {
|
|
+ int (*start)(struct hid_device *);
|
|
+ void (*stop)(struct hid_device *);
|
|
+ int (*open)(struct hid_device *);
|
|
+ void (*close)(struct hid_device *);
|
|
+ int (*power)(struct hid_device *, int);
|
|
+ int (*parse)(struct hid_device *);
|
|
+ void (*request)(struct hid_device *, struct hid_report *, int);
|
|
+ int (*wait)(struct hid_device *);
|
|
+ int (*raw_request)(struct hid_device *, unsigned char, __u8 *, size_t, unsigned char, int);
|
|
+ int (*output_report)(struct hid_device *, __u8 *, size_t);
|
|
+ int (*idle)(struct hid_device *, int, int, int);
|
|
+};
|
|
+
|
|
+struct hid_parser {
|
|
+ struct hid_global global;
|
|
+ struct hid_global global_stack[4];
|
|
+ unsigned int global_stack_ptr;
|
|
+ struct hid_local local;
|
|
+ unsigned int *collection_stack;
|
|
+ unsigned int collection_stack_ptr;
|
|
+ unsigned int collection_stack_size;
|
|
+ struct hid_device *device;
|
|
+ unsigned int scan_flags;
|
|
+};
|
|
+
|
|
+struct hid_report_id {
|
|
+ __u32 report_type;
|
|
+};
|
|
+
|
|
+struct hid_usage_id {
|
|
+ __u32 usage_hid;
|
|
+ __u32 usage_type;
|
|
+ __u32 usage_code;
|
|
+};
|
|
+
|
|
+struct hiddev {
|
|
+ int minor;
|
|
+ int exist;
|
|
+ int open;
|
|
+ struct mutex existancelock;
|
|
+ wait_queue_head_t wait;
|
|
+ struct hid_device *hid;
|
|
+ struct list_head list;
|
|
+ spinlock_t list_lock;
|
|
+ bool initialized;
|
|
+};
|
|
+
|
|
+struct hidraw {
|
|
+ unsigned int minor;
|
|
+ int exist;
|
|
+ int open;
|
|
+ wait_queue_head_t wait;
|
|
+ struct hid_device *hid;
|
|
+ struct device *dev;
|
|
+ spinlock_t list_lock;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct hid_dynid {
|
|
+ struct list_head list;
|
|
+ struct hid_device_id id;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ POWER_SUPPLY_SCOPE_UNKNOWN = 0,
|
|
+ POWER_SUPPLY_SCOPE_SYSTEM = 1,
|
|
+ POWER_SUPPLY_SCOPE_DEVICE = 2,
|
|
+};
|
|
+
|
|
+typedef bool (*hid_usage_cmp_t)(struct hid_usage *, unsigned int, unsigned int);
|
|
+
|
|
+struct quirks_list_struct {
|
|
+ struct hid_device_id hid_bl_item;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct hid_debug_list {
|
|
+ struct {
|
|
+ union {
|
|
+ struct __kfifo kfifo;
|
|
+ char *type;
|
|
+ const char *const_type;
|
|
+ char (*rectype)[0];
|
|
+ char *ptr;
|
|
+ const char *ptr_const;
|
|
+ };
|
|
+ char buf[0];
|
|
+ } hid_debug_fifo;
|
|
+ struct fasync_struct *fasync;
|
|
+ struct hid_device *hdev;
|
|
+ struct list_head node;
|
|
+ struct mutex read_mutex;
|
|
+};
|
|
+
|
|
+struct hid_usage_entry {
|
|
+ unsigned int page;
|
|
+ unsigned int usage;
|
|
+ const char *description;
|
|
+};
|
|
+
|
|
+struct hidraw_devinfo {
|
|
+ __u32 bustype;
|
|
+ __s16 vendor;
|
|
+ __s16 product;
|
|
+};
|
|
+
|
|
+struct hidraw_report {
|
|
+ __u8 *value;
|
|
+ int len;
|
|
+};
|
|
+
|
|
+struct hidraw_list {
|
|
+ struct hidraw_report buffer[64];
|
|
+ int head;
|
|
+ int tail;
|
|
+ struct fasync_struct *fasync;
|
|
+ struct hidraw *hidraw;
|
|
+ struct list_head node;
|
|
+ struct mutex read_mutex;
|
|
+};
|
|
+
|
|
+struct magicmouse_sc {
|
|
+ struct input_dev *input;
|
|
+ long unsigned int quirks;
|
|
+ int ntouches;
|
|
+ int scroll_accel;
|
|
+ long unsigned int scroll_jiffies;
|
|
+ struct {
|
|
+ short int x;
|
|
+ short int y;
|
|
+ short int scroll_x;
|
|
+ short int scroll_y;
|
|
+ u8 size;
|
|
+ } touches[16];
|
|
+ int tracking_ids[16];
|
|
+};
|
|
+
|
|
+struct ntrig_data {
|
|
+ __u16 x;
|
|
+ __u16 y;
|
|
+ __u16 w;
|
|
+ __u16 h;
|
|
+ __u16 id;
|
|
+ bool tipswitch;
|
|
+ bool confidence;
|
|
+ bool first_contact_touch;
|
|
+ bool reading_mt;
|
|
+ __u8 mt_footer[4];
|
|
+ __u8 mt_foot_count;
|
|
+ __s8 act_state;
|
|
+ __s8 deactivate_slack;
|
|
+ __s8 activate_slack;
|
|
+ __u16 min_width;
|
|
+ __u16 min_height;
|
|
+ __u16 activation_width;
|
|
+ __u16 activation_height;
|
|
+ __u16 sensor_logical_width;
|
|
+ __u16 sensor_logical_height;
|
|
+ __u16 sensor_physical_width;
|
|
+ __u16 sensor_physical_height;
|
|
+};
|
|
+
|
|
+struct hid_sensor_hub_attribute_info {
|
|
+ u32 usage_id;
|
|
+ u32 attrib_id;
|
|
+ s32 report_id;
|
|
+ s32 index;
|
|
+ s32 units;
|
|
+ s32 unit_expo;
|
|
+ s32 size;
|
|
+ s32 logical_minimum;
|
|
+ s32 logical_maximum;
|
|
+};
|
|
+
|
|
+struct sensor_hub_pending {
|
|
+ bool status;
|
|
+ struct completion ready;
|
|
+ u32 usage_id;
|
|
+ u32 attr_usage_id;
|
|
+ int raw_size;
|
|
+ u8 *raw_data;
|
|
+};
|
|
+
|
|
+struct hid_sensor_hub_device {
|
|
+ struct hid_device *hdev;
|
|
+ u32 vendor_id;
|
|
+ u32 product_id;
|
|
+ u32 usage;
|
|
+ int start_collection_index;
|
|
+ int end_collection_index;
|
|
+ struct mutex *mutex_ptr;
|
|
+ struct sensor_hub_pending pending;
|
|
+};
|
|
+
|
|
+struct hid_sensor_hub_callbacks {
|
|
+ struct platform_device *pdev;
|
|
+ int (*suspend)(struct hid_sensor_hub_device *, void *);
|
|
+ int (*resume)(struct hid_sensor_hub_device *, void *);
|
|
+ int (*capture_sample)(struct hid_sensor_hub_device *, u32, size_t, char *, void *);
|
|
+ int (*send_event)(struct hid_sensor_hub_device *, u32, void *);
|
|
+};
|
|
+
|
|
+enum sensor_hub_read_flags {
|
|
+ SENSOR_HUB_SYNC = 0,
|
|
+ SENSOR_HUB_ASYNC = 1,
|
|
+};
|
|
+
|
|
+struct sensor_hub_data {
|
|
+ struct mutex mutex;
|
|
+ spinlock_t lock;
|
|
+ struct list_head dyn_callback_list;
|
|
+ spinlock_t dyn_callback_lock;
|
|
+ struct mfd_cell *hid_sensor_hub_client_devs;
|
|
+ int hid_sensor_client_cnt;
|
|
+ long unsigned int quirks;
|
|
+ int ref_cnt;
|
|
+};
|
|
+
|
|
+struct hid_sensor_hub_callbacks_list {
|
|
+ struct list_head list;
|
|
+ u32 usage_id;
|
|
+ struct hid_sensor_hub_device *hsdev;
|
|
+ struct hid_sensor_hub_callbacks *usage_callback;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct hid_control_fifo {
|
|
+ unsigned char dir;
|
|
+ struct hid_report *report;
|
|
+ char *raw_report;
|
|
+};
|
|
+
|
|
+struct hid_output_fifo {
|
|
+ struct hid_report *report;
|
|
+ char *raw_report;
|
|
+};
|
|
+
|
|
+struct hid_class_descriptor {
|
|
+ __u8 bDescriptorType;
|
|
+ __le16 wDescriptorLength;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct hid_descriptor {
|
|
+ __u8 bLength;
|
|
+ __u8 bDescriptorType;
|
|
+ __le16 bcdHID;
|
|
+ __u8 bCountryCode;
|
|
+ __u8 bNumDescriptors;
|
|
+ struct hid_class_descriptor desc[1];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct usbhid_device {
|
|
+ struct hid_device *hid;
|
|
+ struct usb_interface *intf;
|
|
+ int ifnum;
|
|
+ unsigned int bufsize;
|
|
+ struct urb *urbin;
|
|
+ char *inbuf;
|
|
+ dma_addr_t inbuf_dma;
|
|
+ struct urb *urbctrl;
|
|
+ struct usb_ctrlrequest *cr;
|
|
+ struct hid_control_fifo ctrl[256];
|
|
+ unsigned char ctrlhead;
|
|
+ unsigned char ctrltail;
|
|
+ char *ctrlbuf;
|
|
+ dma_addr_t ctrlbuf_dma;
|
|
+ long unsigned int last_ctrl;
|
|
+ struct urb *urbout;
|
|
+ struct hid_output_fifo out[256];
|
|
+ unsigned char outhead;
|
|
+ unsigned char outtail;
|
|
+ char *outbuf;
|
|
+ dma_addr_t outbuf_dma;
|
|
+ long unsigned int last_out;
|
|
+ spinlock_t lock;
|
|
+ long unsigned int iofl;
|
|
+ struct timer_list io_retry;
|
|
+ long unsigned int stop_retry;
|
|
+ unsigned int retry_delay;
|
|
+ struct work_struct reset_work;
|
|
+ wait_queue_head_t wait;
|
|
+};
|
|
+
|
|
+struct hiddev_event {
|
|
+ unsigned int hid;
|
|
+ int value;
|
|
+};
|
|
+
|
|
+struct hiddev_devinfo {
|
|
+ __u32 bustype;
|
|
+ __u32 busnum;
|
|
+ __u32 devnum;
|
|
+ __u32 ifnum;
|
|
+ __s16 vendor;
|
|
+ __s16 product;
|
|
+ __s16 version;
|
|
+ __u32 num_applications;
|
|
+};
|
|
+
|
|
+struct hiddev_collection_info {
|
|
+ __u32 index;
|
|
+ __u32 type;
|
|
+ __u32 usage;
|
|
+ __u32 level;
|
|
+};
|
|
+
|
|
+struct hiddev_report_info {
|
|
+ __u32 report_type;
|
|
+ __u32 report_id;
|
|
+ __u32 num_fields;
|
|
+};
|
|
+
|
|
+struct hiddev_field_info {
|
|
+ __u32 report_type;
|
|
+ __u32 report_id;
|
|
+ __u32 field_index;
|
|
+ __u32 maxusage;
|
|
+ __u32 flags;
|
|
+ __u32 physical;
|
|
+ __u32 logical;
|
|
+ __u32 application;
|
|
+ __s32 logical_minimum;
|
|
+ __s32 logical_maximum;
|
|
+ __s32 physical_minimum;
|
|
+ __s32 physical_maximum;
|
|
+ __u32 unit_exponent;
|
|
+ __u32 unit;
|
|
+};
|
|
+
|
|
+struct hiddev_usage_ref {
|
|
+ __u32 report_type;
|
|
+ __u32 report_id;
|
|
+ __u32 field_index;
|
|
+ __u32 usage_index;
|
|
+ __u32 usage_code;
|
|
+ __s32 value;
|
|
+};
|
|
+
|
|
+struct hiddev_usage_ref_multi {
|
|
+ struct hiddev_usage_ref uref;
|
|
+ __u32 num_values;
|
|
+ __s32 values[1024];
|
|
+};
|
|
+
|
|
+struct hiddev_list {
|
|
+ struct hiddev_usage_ref buffer[2048];
|
|
+ int head;
|
|
+ int tail;
|
|
+ unsigned int flags;
|
|
+ struct fasync_struct *fasync;
|
|
+ struct hiddev *hiddev;
|
|
+ struct list_head node;
|
|
+ struct mutex thread_lock;
|
|
+};
|
|
+
|
|
+struct pidff_usage {
|
|
+ struct hid_field *field;
|
|
+ s32 *value;
|
|
+};
|
|
+
|
|
+struct pidff_device {
|
|
+ struct hid_device *hid;
|
|
+ struct hid_report *reports[13];
|
|
+ struct pidff_usage set_effect[7];
|
|
+ struct pidff_usage set_envelope[5];
|
|
+ struct pidff_usage set_condition[8];
|
|
+ struct pidff_usage set_periodic[5];
|
|
+ struct pidff_usage set_constant[2];
|
|
+ struct pidff_usage set_ramp[3];
|
|
+ struct pidff_usage device_gain[1];
|
|
+ struct pidff_usage block_load[2];
|
|
+ struct pidff_usage pool[3];
|
|
+ struct pidff_usage effect_operation[2];
|
|
+ struct pidff_usage block_free[1];
|
|
+ struct hid_field *create_new_effect_type;
|
|
+ struct hid_field *set_effect_type;
|
|
+ struct hid_field *effect_direction;
|
|
+ struct hid_field *device_control;
|
|
+ struct hid_field *block_load_status;
|
|
+ struct hid_field *effect_operation_status;
|
|
+ int control_id[2];
|
|
+ int type_id[11];
|
|
+ int status_id[2];
|
|
+ int operation_id[2];
|
|
+ int pid_id[64];
|
|
+};
|
|
+
|
|
+struct pmc_bit_map {
|
|
+ const char *name;
|
|
+ u32 bit_mask;
|
|
+};
|
|
+
|
|
+struct pmc_reg_map {
|
|
+ const struct pmc_bit_map *d3_sts_0;
|
|
+ const struct pmc_bit_map *d3_sts_1;
|
|
+ const struct pmc_bit_map *func_dis;
|
|
+ const struct pmc_bit_map *func_dis_2;
|
|
+ const struct pmc_bit_map *pss;
|
|
+};
|
|
+
|
|
+struct pmc_data {
|
|
+ const struct pmc_reg_map *map;
|
|
+ const struct pmc_clk *clks;
|
|
+};
|
|
+
|
|
+struct pmc_dev {
|
|
+ u32 base_addr;
|
|
+ void *regmap;
|
|
+ const struct pmc_reg_map *map;
|
|
+ struct dentry *dbgfs_dir;
|
|
+ bool init;
|
|
+};
|
|
+
|
|
+struct acpi_table_pcct {
|
|
+ struct acpi_table_header header;
|
|
+ u32 flags;
|
|
+ u64 reserved;
|
|
+};
|
|
+
|
|
+enum acpi_pcct_type {
|
|
+ ACPI_PCCT_TYPE_GENERIC_SUBSPACE = 0,
|
|
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE = 1,
|
|
+ ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2 = 2,
|
|
+ ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE = 3,
|
|
+ ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE = 4,
|
|
+ ACPI_PCCT_TYPE_RESERVED = 5,
|
|
+};
|
|
+
|
|
+struct acpi_pcct_subspace {
|
|
+ struct acpi_subtable_header header;
|
|
+ u8 reserved[6];
|
|
+ u64 base_address;
|
|
+ u64 length;
|
|
+ struct acpi_generic_address doorbell_register;
|
|
+ u64 preserve_mask;
|
|
+ u64 write_mask;
|
|
+ u32 latency;
|
|
+ u32 max_access_rate;
|
|
+ u16 min_turnaround_time;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct acpi_pcct_hw_reduced_type2 {
|
|
+ struct acpi_subtable_header header;
|
|
+ u32 platform_interrupt;
|
|
+ u8 flags;
|
|
+ u8 reserved;
|
|
+ u64 base_address;
|
|
+ u64 length;
|
|
+ struct acpi_generic_address doorbell_register;
|
|
+ u64 preserve_mask;
|
|
+ u64 write_mask;
|
|
+ u32 latency;
|
|
+ u32 max_access_rate;
|
|
+ u16 min_turnaround_time;
|
|
+ struct acpi_generic_address platform_ack_register;
|
|
+ u64 ack_preserve_mask;
|
|
+ u64 ack_write_mask;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct hwspinlock___2;
|
|
+
|
|
+struct hwspinlock_ops {
|
|
+ int (*trylock)(struct hwspinlock___2 *);
|
|
+ void (*unlock)(struct hwspinlock___2 *);
|
|
+ void (*relax)(struct hwspinlock___2 *);
|
|
+};
|
|
+
|
|
+struct hwspinlock_device;
|
|
+
|
|
+struct hwspinlock___2 {
|
|
+ struct hwspinlock_device *bank;
|
|
+ spinlock_t lock;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct hwspinlock_device {
|
|
+ struct device *dev;
|
|
+ const struct hwspinlock_ops *ops;
|
|
+ int base_id;
|
|
+ int num_locks;
|
|
+ struct hwspinlock___2 lock[0];
|
|
+};
|
|
+
|
|
+struct powercap_control_type;
|
|
+
|
|
+struct powercap_control_type_ops {
|
|
+ int (*set_enable)(struct powercap_control_type *, bool);
|
|
+ int (*get_enable)(struct powercap_control_type *, bool *);
|
|
+ int (*release)(struct powercap_control_type *);
|
|
+};
|
|
+
|
|
+struct powercap_control_type {
|
|
+ struct device dev;
|
|
+ struct idr idr;
|
|
+ int nr_zones;
|
|
+ const struct powercap_control_type_ops *ops;
|
|
+ struct mutex lock;
|
|
+ bool allocated;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct powercap_zone;
|
|
+
|
|
+struct powercap_zone_ops {
|
|
+ int (*get_max_energy_range_uj)(struct powercap_zone *, u64 *);
|
|
+ int (*get_energy_uj)(struct powercap_zone *, u64 *);
|
|
+ int (*reset_energy_uj)(struct powercap_zone *);
|
|
+ int (*get_max_power_range_uw)(struct powercap_zone *, u64 *);
|
|
+ int (*get_power_uw)(struct powercap_zone *, u64 *);
|
|
+ int (*set_enable)(struct powercap_zone *, bool);
|
|
+ int (*get_enable)(struct powercap_zone *, bool *);
|
|
+ int (*release)(struct powercap_zone *);
|
|
+};
|
|
+
|
|
+struct powercap_zone_constraint;
|
|
+
|
|
+struct powercap_zone {
|
|
+ int id;
|
|
+ char *name;
|
|
+ void *control_type_inst;
|
|
+ const struct powercap_zone_ops *ops;
|
|
+ struct device dev;
|
|
+ int const_id_cnt;
|
|
+ struct idr idr;
|
|
+ struct idr *parent_idr;
|
|
+ void *private_data;
|
|
+ struct attribute **zone_dev_attrs;
|
|
+ int zone_attr_count;
|
|
+ struct attribute_group dev_zone_attr_group;
|
|
+ const struct attribute_group *dev_attr_groups[2];
|
|
+ bool allocated;
|
|
+ struct powercap_zone_constraint *constraints;
|
|
+};
|
|
+
|
|
+struct powercap_zone_constraint_ops;
|
|
+
|
|
+struct powercap_zone_constraint {
|
|
+ int id;
|
|
+ struct powercap_zone *power_zone;
|
|
+ const struct powercap_zone_constraint_ops *ops;
|
|
+};
|
|
+
|
|
+struct powercap_zone_constraint_ops {
|
|
+ int (*set_power_limit_uw)(struct powercap_zone *, int, u64);
|
|
+ int (*get_power_limit_uw)(struct powercap_zone *, int, u64 *);
|
|
+ int (*set_time_window_us)(struct powercap_zone *, int, u64);
|
|
+ int (*get_time_window_us)(struct powercap_zone *, int, u64 *);
|
|
+ int (*get_max_power_uw)(struct powercap_zone *, int, u64 *);
|
|
+ int (*get_min_power_uw)(struct powercap_zone *, int, u64 *);
|
|
+ int (*get_max_time_window_us)(struct powercap_zone *, int, u64 *);
|
|
+ int (*get_min_time_window_us)(struct powercap_zone *, int, u64 *);
|
|
+ const char * (*get_name)(struct powercap_zone *, int);
|
|
+};
|
|
+
|
|
+struct powercap_constraint_attr {
|
|
+ struct device_attribute power_limit_attr;
|
|
+ struct device_attribute time_window_attr;
|
|
+ struct device_attribute max_power_attr;
|
|
+ struct device_attribute min_power_attr;
|
|
+ struct device_attribute max_time_window_attr;
|
|
+ struct device_attribute min_time_window_attr;
|
|
+ struct device_attribute name_attr;
|
|
+};
|
|
+
|
|
+typedef guid_t uuid_le;
|
|
+
|
|
+struct cper_arm_err_info {
|
|
+ __u8 version;
|
|
+ __u8 length;
|
|
+ __u16 validation_bits;
|
|
+ __u8 type;
|
|
+ __u16 multiple_error;
|
|
+ __u8 flags;
|
|
+ __u64 error_info;
|
|
+ __u64 virt_fault_addr;
|
|
+ __u64 physical_fault_addr;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct cper_arm_ctx_info {
|
|
+ __u16 version;
|
|
+ __u16 type;
|
|
+ __u32 size;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_extlog_mem_event {
|
|
+ struct trace_entry ent;
|
|
+ u32 err_seq;
|
|
+ u8 etype;
|
|
+ u8 sev;
|
|
+ u64 pa;
|
|
+ u8 pa_mask_lsb;
|
|
+ uuid_le fru_id;
|
|
+ u32 __data_loc_fru_text;
|
|
+ struct cper_mem_err_compact data;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_mc_event {
|
|
+ struct trace_entry ent;
|
|
+ unsigned int error_type;
|
|
+ u32 __data_loc_msg;
|
|
+ u32 __data_loc_label;
|
|
+ u16 error_count;
|
|
+ u8 mc_index;
|
|
+ s8 top_layer;
|
|
+ s8 middle_layer;
|
|
+ s8 lower_layer;
|
|
+ long int address;
|
|
+ u8 grain_bits;
|
|
+ long int syndrome;
|
|
+ u32 __data_loc_driver_detail;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_arm_event {
|
|
+ struct trace_entry ent;
|
|
+ u64 mpidr;
|
|
+ u64 midr;
|
|
+ u32 running_state;
|
|
+ u32 psci_state;
|
|
+ u8 affinity;
|
|
+ u32 pei_len;
|
|
+ u32 __data_loc_buf;
|
|
+ u32 ctx_len;
|
|
+ u32 __data_loc_buf1;
|
|
+ u32 oem_len;
|
|
+ u32 __data_loc_buf2;
|
|
+ u8 sev;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_non_standard_event {
|
|
+ struct trace_entry ent;
|
|
+ char sec_type[16];
|
|
+ char fru_id[16];
|
|
+ u32 __data_loc_fru_text;
|
|
+ u8 sev;
|
|
+ u32 len;
|
|
+ u32 __data_loc_buf;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_aer_event {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_dev_name;
|
|
+ u32 status;
|
|
+ u8 severity;
|
|
+ u8 tlp_header_valid;
|
|
+ u32 tlp_header[4];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_memory_failure_event {
|
|
+ struct trace_entry ent;
|
|
+ long unsigned int pfn;
|
|
+ int type;
|
|
+ int result;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_extlog_mem_event {
|
|
+ u32 fru_text;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_mc_event {
|
|
+ u32 msg;
|
|
+ u32 label;
|
|
+ u32 driver_detail;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_arm_event {
|
|
+ u32 buf;
|
|
+ u32 buf1;
|
|
+ u32 buf2;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_non_standard_event {
|
|
+ u32 fru_text;
|
|
+ u32 buf;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_aer_event {
|
|
+ u32 dev_name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_memory_failure_event {};
|
|
+
|
|
+enum tb_cfg_pkg_type {
|
|
+ TB_CFG_PKG_READ = 1,
|
|
+ TB_CFG_PKG_WRITE = 2,
|
|
+ TB_CFG_PKG_ERROR = 3,
|
|
+ TB_CFG_PKG_NOTIFY_ACK = 4,
|
|
+ TB_CFG_PKG_EVENT = 5,
|
|
+ TB_CFG_PKG_XDOMAIN_REQ = 6,
|
|
+ TB_CFG_PKG_XDOMAIN_RESP = 7,
|
|
+ TB_CFG_PKG_OVERRIDE = 8,
|
|
+ TB_CFG_PKG_RESET = 9,
|
|
+ TB_CFG_PKG_ICM_EVENT = 10,
|
|
+ TB_CFG_PKG_ICM_CMD = 11,
|
|
+ TB_CFG_PKG_ICM_RESP = 12,
|
|
+ TB_CFG_PKG_PREPARE_TO_SLEEP = 13,
|
|
+};
|
|
+
|
|
+enum tb_security_level {
|
|
+ TB_SECURITY_NONE = 0,
|
|
+ TB_SECURITY_USER = 1,
|
|
+ TB_SECURITY_SECURE = 2,
|
|
+ TB_SECURITY_DPONLY = 3,
|
|
+ TB_SECURITY_USBONLY = 4,
|
|
+};
|
|
+
|
|
+struct tb_nhi;
|
|
+
|
|
+struct tb_ctl;
|
|
+
|
|
+struct tb_switch;
|
|
+
|
|
+struct tb_cm_ops;
|
|
+
|
|
+struct tb {
|
|
+ struct device dev;
|
|
+ struct mutex lock;
|
|
+ struct tb_nhi *nhi;
|
|
+ struct tb_ctl *ctl;
|
|
+ struct workqueue_struct *wq;
|
|
+ struct tb_switch *root_switch;
|
|
+ const struct tb_cm_ops *cm_ops;
|
|
+ int index;
|
|
+ enum tb_security_level security_level;
|
|
+ size_t nboot_acl;
|
|
+ long unsigned int privdata[0];
|
|
+};
|
|
+
|
|
+struct tb_ring;
|
|
+
|
|
+struct tb_nhi {
|
|
+ spinlock_t lock;
|
|
+ struct pci_dev *pdev;
|
|
+ void *iobase;
|
|
+ struct tb_ring **tx_rings;
|
|
+ struct tb_ring **rx_rings;
|
|
+ struct ida msix_ida;
|
|
+ bool going_away;
|
|
+ struct work_struct interrupt_work;
|
|
+ u32 hop_count;
|
|
+};
|
|
+
|
|
+struct tb_regs_switch_header {
|
|
+ u16 vendor_id;
|
|
+ u16 device_id;
|
|
+ u32 first_cap_offset: 8;
|
|
+ u32 upstream_port_number: 6;
|
|
+ u32 max_port_number: 6;
|
|
+ u32 depth: 3;
|
|
+ u32 __unknown1: 1;
|
|
+ u32 revision: 8;
|
|
+ u32 route_lo;
|
|
+ u32 route_hi: 31;
|
|
+ bool enabled: 1;
|
|
+ u32 plug_events_delay: 8;
|
|
+ u32 __unknown4: 16;
|
|
+ u32 thunderbolt_version: 8;
|
|
+};
|
|
+
|
|
+struct tb_port;
|
|
+
|
|
+struct tb_dma_port;
|
|
+
|
|
+struct tb_switch_nvm;
|
|
+
|
|
+struct tb_switch {
|
|
+ struct device dev;
|
|
+ struct tb_regs_switch_header config;
|
|
+ struct tb_port *ports;
|
|
+ struct tb_dma_port *dma_port;
|
|
+ struct tb *tb;
|
|
+ u64 uid;
|
|
+ uuid_t *uuid;
|
|
+ u16 vendor;
|
|
+ u16 device;
|
|
+ const char *vendor_name;
|
|
+ const char *device_name;
|
|
+ unsigned int generation;
|
|
+ int cap_plug_events;
|
|
+ bool is_unplugged;
|
|
+ u8 *drom;
|
|
+ struct tb_switch_nvm *nvm;
|
|
+ bool no_nvm_upgrade;
|
|
+ bool safe_mode;
|
|
+ bool boot;
|
|
+ bool rpm;
|
|
+ unsigned int authorized;
|
|
+ struct work_struct work;
|
|
+ enum tb_security_level security_level;
|
|
+ u8 *key;
|
|
+ u8 connection_id;
|
|
+ u8 connection_key;
|
|
+ u8 link;
|
|
+ u8 depth;
|
|
+};
|
|
+
|
|
+struct tb_xdomain;
|
|
+
|
|
+struct tb_cm_ops {
|
|
+ int (*driver_ready)(struct tb *);
|
|
+ int (*start)(struct tb *);
|
|
+ void (*stop)(struct tb *);
|
|
+ int (*suspend_noirq)(struct tb *);
|
|
+ int (*resume_noirq)(struct tb *);
|
|
+ int (*suspend)(struct tb *);
|
|
+ void (*complete)(struct tb *);
|
|
+ int (*runtime_suspend)(struct tb *);
|
|
+ int (*runtime_resume)(struct tb *);
|
|
+ void (*handle_event)(struct tb *, enum tb_cfg_pkg_type, const void *, size_t);
|
|
+ int (*get_boot_acl)(struct tb *, uuid_t *, size_t);
|
|
+ int (*set_boot_acl)(struct tb *, const uuid_t *, size_t);
|
|
+ int (*approve_switch)(struct tb *, struct tb_switch *);
|
|
+ int (*add_switch_key)(struct tb *, struct tb_switch *);
|
|
+ int (*challenge_switch_key)(struct tb *, struct tb_switch *, const u8 *, u8 *);
|
|
+ int (*disconnect_pcie_paths)(struct tb *);
|
|
+ int (*approve_xdomain_paths)(struct tb *, struct tb_xdomain *);
|
|
+ int (*disconnect_xdomain_paths)(struct tb *, struct tb_xdomain *);
|
|
+};
|
|
+
|
|
+struct tb_property_dir {
|
|
+ const uuid_t *uuid;
|
|
+ struct list_head properties;
|
|
+};
|
|
+
|
|
+struct tb_xdomain {
|
|
+ struct device dev;
|
|
+ struct tb *tb;
|
|
+ uuid_t *remote_uuid;
|
|
+ const uuid_t *local_uuid;
|
|
+ u64 route;
|
|
+ u16 vendor;
|
|
+ u16 device;
|
|
+ struct mutex lock;
|
|
+ const char *vendor_name;
|
|
+ const char *device_name;
|
|
+ bool is_unplugged;
|
|
+ bool resume;
|
|
+ u16 transmit_path;
|
|
+ u16 transmit_ring;
|
|
+ u16 receive_path;
|
|
+ u16 receive_ring;
|
|
+ struct ida service_ids;
|
|
+ struct tb_property_dir *properties;
|
|
+ u32 property_block_gen;
|
|
+ struct delayed_work get_properties_work;
|
|
+ int properties_retries;
|
|
+ struct delayed_work properties_changed_work;
|
|
+ int properties_changed_retries;
|
|
+ u8 link;
|
|
+ u8 depth;
|
|
+};
|
|
+
|
|
+struct ring_desc;
|
|
+
|
|
+struct tb_ring {
|
|
+ spinlock_t lock;
|
|
+ struct tb_nhi *nhi;
|
|
+ int size;
|
|
+ int hop;
|
|
+ int head;
|
|
+ int tail;
|
|
+ struct ring_desc *descriptors;
|
|
+ dma_addr_t descriptors_dma;
|
|
+ struct list_head queue;
|
|
+ struct list_head in_flight;
|
|
+ struct work_struct work;
|
|
+ bool is_tx: 1;
|
|
+ bool running: 1;
|
|
+ int irq;
|
|
+ u8 vector;
|
|
+ unsigned int flags;
|
|
+ u16 sof_mask;
|
|
+ u16 eof_mask;
|
|
+ void (*start_poll)(void *);
|
|
+ void *poll_data;
|
|
+};
|
|
+
|
|
+enum ring_desc_flags {
|
|
+ RING_DESC_ISOCH = 1,
|
|
+ RING_DESC_CRC_ERROR = 1,
|
|
+ RING_DESC_COMPLETED = 2,
|
|
+ RING_DESC_POSTED = 4,
|
|
+ RING_DESC_BUFFER_OVERRUN = 4,
|
|
+ RING_DESC_INTERRUPT = 8,
|
|
+};
|
|
+
|
|
+struct ring_desc {
|
|
+ u64 phys;
|
|
+ u32 length: 12;
|
|
+ u32 eof: 4;
|
|
+ u32 sof: 4;
|
|
+ enum ring_desc_flags flags: 12;
|
|
+ u32 time;
|
|
+};
|
|
+
|
|
+struct ring_frame;
|
|
+
|
|
+typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool);
|
|
+
|
|
+struct ring_frame {
|
|
+ dma_addr_t buffer_phy;
|
|
+ ring_cb callback;
|
|
+ struct list_head list;
|
|
+ u32 size: 12;
|
|
+ u32 flags: 12;
|
|
+ u32 eof: 4;
|
|
+ u32 sof: 4;
|
|
+};
|
|
+
|
|
+enum nhi_fw_mode {
|
|
+ NHI_FW_SAFE_MODE = 0,
|
|
+ NHI_FW_AUTH_MODE = 1,
|
|
+ NHI_FW_EP_MODE = 2,
|
|
+ NHI_FW_CM_MODE = 3,
|
|
+};
|
|
+
|
|
+enum nhi_mailbox_cmd {
|
|
+ NHI_MAILBOX_SAVE_DEVS = 5,
|
|
+ NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 6,
|
|
+ NHI_MAILBOX_DRV_UNLOADS = 7,
|
|
+ NHI_MAILBOX_DISCONNECT_PA = 16,
|
|
+ NHI_MAILBOX_DISCONNECT_PB = 17,
|
|
+ NHI_MAILBOX_ALLOW_ALL_DEVS = 35,
|
|
+};
|
|
+
|
|
+enum ring_flags {
|
|
+ RING_FLAG_ISOCH_ENABLE = 134217728,
|
|
+ RING_FLAG_E2E_FLOW_CONTROL = 268435456,
|
|
+ RING_FLAG_PCI_NO_SNOOP = 536870912,
|
|
+ RING_FLAG_RAW = 1073741824,
|
|
+ RING_FLAG_ENABLE = -2147483648,
|
|
+};
|
|
+
|
|
+enum tb_port_type {
|
|
+ TB_TYPE_INACTIVE = 0,
|
|
+ TB_TYPE_PORT = 1,
|
|
+ TB_TYPE_NHI = 2,
|
|
+ TB_TYPE_DP_HDMI_IN = 917761,
|
|
+ TB_TYPE_DP_HDMI_OUT = 917762,
|
|
+ TB_TYPE_PCIE_DOWN = 1048833,
|
|
+ TB_TYPE_PCIE_UP = 1048834,
|
|
+};
|
|
+
|
|
+struct tb_regs_port_header {
|
|
+ u16 vendor_id;
|
|
+ u16 device_id;
|
|
+ u32 first_cap_offset: 8;
|
|
+ u32 max_counters: 11;
|
|
+ u32 __unknown1: 5;
|
|
+ u32 revision: 8;
|
|
+ enum tb_port_type type: 24;
|
|
+ u32 thunderbolt_version: 8;
|
|
+ u32 __unknown2: 20;
|
|
+ u32 port_number: 6;
|
|
+ u32 __unknown3: 6;
|
|
+ u32 nfc_credits;
|
|
+ u32 max_in_hop_id: 11;
|
|
+ u32 max_out_hop_id: 11;
|
|
+ u32 __unknown4: 10;
|
|
+ u32 __unknown5;
|
|
+ u32 __unknown6;
|
|
+};
|
|
+
|
|
+struct tb_switch_nvm {
|
|
+ u8 major;
|
|
+ u8 minor;
|
|
+ int id;
|
|
+ struct nvmem_device *active;
|
|
+ struct nvmem_device *non_active;
|
|
+ void *buf;
|
|
+ size_t buf_data_size;
|
|
+ bool authenticating;
|
|
+};
|
|
+
|
|
+struct tb_port {
|
|
+ struct tb_regs_port_header config;
|
|
+ struct tb_switch *sw;
|
|
+ struct tb_port *remote;
|
|
+ struct tb_xdomain *xdomain;
|
|
+ int cap_phy;
|
|
+ u8 port;
|
|
+ bool disabled;
|
|
+ struct tb_port *dual_link_port;
|
|
+ u8 link_nr: 1;
|
|
+};
|
|
+
|
|
+typedef bool (*event_cb)(void *, enum tb_cfg_pkg_type, const void *, size_t);
|
|
+
|
|
+struct ctl_pkg;
|
|
+
|
|
+struct tb_ctl {
|
|
+ struct tb_nhi *nhi;
|
|
+ struct tb_ring *tx;
|
|
+ struct tb_ring *rx;
|
|
+ struct dma_pool___2 *frame_pool;
|
|
+ struct ctl_pkg *rx_packets[10];
|
|
+ struct mutex request_queue_lock;
|
|
+ struct list_head request_queue;
|
|
+ bool running;
|
|
+ event_cb callback;
|
|
+ void *callback_data;
|
|
+};
|
|
+
|
|
+enum tb_cfg_space {
|
|
+ TB_CFG_HOPS = 0,
|
|
+ TB_CFG_PORT = 1,
|
|
+ TB_CFG_SWITCH = 2,
|
|
+ TB_CFG_COUNTERS = 3,
|
|
+};
|
|
+
|
|
+enum tb_cfg_error {
|
|
+ TB_CFG_ERROR_PORT_NOT_CONNECTED = 0,
|
|
+ TB_CFG_ERROR_LINK_ERROR = 1,
|
|
+ TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2,
|
|
+ TB_CFG_ERROR_NO_SUCH_PORT = 4,
|
|
+ TB_CFG_ERROR_ACK_PLUG_EVENT = 7,
|
|
+ TB_CFG_ERROR_LOOP = 8,
|
|
+ TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
|
|
+ TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
|
|
+};
|
|
+
|
|
+struct tb_cfg_header {
|
|
+ u32 route_hi: 22;
|
|
+ u32 unknown: 10;
|
|
+ u32 route_lo;
|
|
+};
|
|
+
|
|
+struct tb_cfg_address {
|
|
+ u32 offset: 13;
|
|
+ u32 length: 6;
|
|
+ u32 port: 6;
|
|
+ enum tb_cfg_space space: 2;
|
|
+ u32 seq: 2;
|
|
+ u32 zero: 3;
|
|
+};
|
|
+
|
|
+struct cfg_read_pkg {
|
|
+ struct tb_cfg_header header;
|
|
+ struct tb_cfg_address addr;
|
|
+};
|
|
+
|
|
+struct cfg_write_pkg {
|
|
+ struct tb_cfg_header header;
|
|
+ struct tb_cfg_address addr;
|
|
+ u32 data[64];
|
|
+};
|
|
+
|
|
+struct cfg_error_pkg {
|
|
+ struct tb_cfg_header header;
|
|
+ enum tb_cfg_error error: 4;
|
|
+ u32 zero1: 4;
|
|
+ u32 port: 6;
|
|
+ u32 zero2: 2;
|
|
+ u32 zero3: 16;
|
|
+};
|
|
+
|
|
+struct cfg_reset_pkg {
|
|
+ struct tb_cfg_header header;
|
|
+};
|
|
+
|
|
+struct tb_cfg_result {
|
|
+ u64 response_route;
|
|
+ u32 response_port;
|
|
+ int err;
|
|
+ enum tb_cfg_error tb_error;
|
|
+};
|
|
+
|
|
+struct ctl_pkg {
|
|
+ struct tb_ctl *ctl;
|
|
+ void *buffer;
|
|
+ struct ring_frame frame;
|
|
+};
|
|
+
|
|
+struct tb_cfg_request {
|
|
+ struct kref kref;
|
|
+ struct tb_ctl *ctl;
|
|
+ const void *request;
|
|
+ size_t request_size;
|
|
+ enum tb_cfg_pkg_type request_type;
|
|
+ void *response;
|
|
+ size_t response_size;
|
|
+ enum tb_cfg_pkg_type response_type;
|
|
+ size_t npackets;
|
|
+ bool (*match)(const struct tb_cfg_request *, const struct ctl_pkg *);
|
|
+ bool (*copy)(struct tb_cfg_request *, const struct ctl_pkg *);
|
|
+ void (*callback)(void *);
|
|
+ void *callback_data;
|
|
+ long unsigned int flags;
|
|
+ struct work_struct work;
|
|
+ struct tb_cfg_result result;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+enum tb_port_cap {
|
|
+ TB_PORT_CAP_PHY = 1,
|
|
+ TB_PORT_CAP_TIME1 = 3,
|
|
+ TB_PORT_CAP_ADAP = 4,
|
|
+ TB_PORT_CAP_VSE = 5,
|
|
+};
|
|
+
|
|
+struct cfg_event_pkg {
|
|
+ struct tb_cfg_header header;
|
|
+ u32 port: 6;
|
|
+ u32 zero: 25;
|
|
+ bool unplug: 1;
|
|
+};
|
|
+
|
|
+struct tb_path_hop {
|
|
+ struct tb_port *in_port;
|
|
+ struct tb_port *out_port;
|
|
+ int in_hop_index;
|
|
+ int in_counter_index;
|
|
+ int next_hop_index;
|
|
+};
|
|
+
|
|
+enum tb_path_port {
|
|
+ TB_PATH_NONE = 0,
|
|
+ TB_PATH_SOURCE = 1,
|
|
+ TB_PATH_INTERNAL = 2,
|
|
+ TB_PATH_DESTINATION = 4,
|
|
+ TB_PATH_ALL = 7,
|
|
+};
|
|
+
|
|
+struct tb_path {
|
|
+ struct tb *tb;
|
|
+ int nfc_credits;
|
|
+ enum tb_path_port ingress_shared_buffer;
|
|
+ enum tb_path_port egress_shared_buffer;
|
|
+ enum tb_path_port ingress_fc_enable;
|
|
+ enum tb_path_port egress_fc_enable;
|
|
+ int priority: 3;
|
|
+ int weight: 4;
|
|
+ bool drop_packages;
|
|
+ bool activated;
|
|
+ struct tb_path_hop *hops;
|
|
+ int path_length;
|
|
+};
|
|
+
|
|
+struct tb_pci_tunnel {
|
|
+ struct tb *tb;
|
|
+ struct tb_port *up_port;
|
|
+ struct tb_port *down_port;
|
|
+ struct tb_path *path_to_up;
|
|
+ struct tb_path *path_to_down;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct tb_cm {
|
|
+ struct list_head tunnel_list;
|
|
+ bool hotplug_active;
|
|
+};
|
|
+
|
|
+struct tb_hotplug_event {
|
|
+ struct work_struct work;
|
|
+ struct tb *tb;
|
|
+ u64 route;
|
|
+ u8 port;
|
|
+ bool unplug;
|
|
+};
|
|
+
|
|
+enum tb_switch_vse_cap {
|
|
+ TB_VSE_CAP_PLUG_EVENTS = 1,
|
|
+ TB_VSE_CAP_TIME2 = 3,
|
|
+ TB_VSE_CAP_IECS = 4,
|
|
+ TB_VSE_CAP_LINK_CONTROLLER = 6,
|
|
+};
|
|
+
|
|
+enum tb_port_state {
|
|
+ TB_PORT_DISABLED = 0,
|
|
+ TB_PORT_CONNECTING = 1,
|
|
+ TB_PORT_UP = 2,
|
|
+ TB_PORT_UNPLUGGED = 7,
|
|
+};
|
|
+
|
|
+struct tb_cap_basic {
|
|
+ u8 next;
|
|
+ u8 cap;
|
|
+};
|
|
+
|
|
+struct tb_cap_phy {
|
|
+ struct tb_cap_basic cap_header;
|
|
+ u32 unknown1: 16;
|
|
+ u32 unknown2: 14;
|
|
+ bool disable: 1;
|
|
+ u32 unknown3: 11;
|
|
+ enum tb_port_state state: 4;
|
|
+ u32 unknown4: 2;
|
|
+};
|
|
+
|
|
+struct nvm_auth_status {
|
|
+ struct list_head list;
|
|
+ uuid_t uuid;
|
|
+ u32 status;
|
|
+};
|
|
+
|
|
+struct tb_sw_lookup {
|
|
+ struct tb *tb;
|
|
+ u8 link;
|
|
+ u8 depth;
|
|
+ const uuid_t *uuid;
|
|
+ u64 route;
|
|
+};
|
|
+
|
|
+enum tb_switch_cap {
|
|
+ TB_SWITCH_CAP_VSE = 5,
|
|
+};
|
|
+
|
|
+struct tb_cap_extended_short {
|
|
+ u8 next;
|
|
+ u8 cap;
|
|
+ u8 vsec_id;
|
|
+ u8 length;
|
|
+};
|
|
+
|
|
+struct tb_cap_extended_long {
|
|
+ u8 zero1;
|
|
+ u8 cap;
|
|
+ u8 vsec_id;
|
|
+ u8 zero2;
|
|
+ u16 next;
|
|
+ u16 length;
|
|
+};
|
|
+
|
|
+struct tb_cap_any {
|
|
+ union {
|
|
+ struct tb_cap_basic basic;
|
|
+ struct tb_cap_extended_short extended_short;
|
|
+ struct tb_cap_extended_long extended_long;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct tb_regs_hop {
|
|
+ u32 next_hop: 11;
|
|
+ u32 out_port: 6;
|
|
+ u32 initial_credits: 8;
|
|
+ u32 unknown1: 6;
|
|
+ bool enable: 1;
|
|
+ u32 weight: 4;
|
|
+ u32 unknown2: 4;
|
|
+ u32 priority: 3;
|
|
+ bool drop_packages: 1;
|
|
+ u32 counter: 11;
|
|
+ bool counter_enable: 1;
|
|
+ bool ingress_fc: 1;
|
|
+ bool egress_fc: 1;
|
|
+ bool ingress_shared_buffer: 1;
|
|
+ bool egress_shared_buffer: 1;
|
|
+ u32 unknown3: 4;
|
|
+};
|
|
+
|
|
+struct tb_eeprom_ctl {
|
|
+ bool clock: 1;
|
|
+ bool access_low: 1;
|
|
+ bool data_out: 1;
|
|
+ bool data_in: 1;
|
|
+ bool access_high: 1;
|
|
+ bool not_present: 1;
|
|
+ bool unknown1: 1;
|
|
+ bool present: 1;
|
|
+ u32 unknown2: 24;
|
|
+};
|
|
+
|
|
+struct tb_cap_plug_events {
|
|
+ struct tb_cap_extended_short cap_header;
|
|
+ u32 __unknown1: 2;
|
|
+ u32 plug_events: 5;
|
|
+ u32 __unknown2: 25;
|
|
+ u32 __unknown3;
|
|
+ u32 __unknown4;
|
|
+ struct tb_eeprom_ctl eeprom_ctl;
|
|
+ u32 __unknown5[7];
|
|
+ u32 drom_offset;
|
|
+};
|
|
+
|
|
+enum tb_eeprom_transfer {
|
|
+ TB_EEPROM_IN = 0,
|
|
+ TB_EEPROM_OUT = 1,
|
|
+};
|
|
+
|
|
+struct tb_drom_header {
|
|
+ u8 uid_crc8;
|
|
+ u64 uid;
|
|
+ u32 data_crc32;
|
|
+ u8 device_rom_revision;
|
|
+ u16 data_len: 10;
|
|
+ u8 __unknown1: 6;
|
|
+ u16 vendor_id;
|
|
+ u16 model_id;
|
|
+ u8 model_rev;
|
|
+ u8 eeprom_rev;
|
|
+} __attribute__((packed));
|
|
+
|
|
+enum tb_drom_entry_type {
|
|
+ TB_DROM_ENTRY_GENERIC = 0,
|
|
+ TB_DROM_ENTRY_PORT = 1,
|
|
+};
|
|
+
|
|
+struct tb_drom_entry_header {
|
|
+ u8 len;
|
|
+ u8 index: 6;
|
|
+ bool port_disabled: 1;
|
|
+ enum tb_drom_entry_type type: 1;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tb_drom_entry_generic {
|
|
+ struct tb_drom_entry_header header;
|
|
+ u8 data[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct tb_drom_entry_port {
|
|
+ struct tb_drom_entry_header header;
|
|
+ u8 dual_link_port_rid: 4;
|
|
+ u8 link_nr: 1;
|
|
+ u8 unknown1: 2;
|
|
+ bool has_dual_link_port: 1;
|
|
+ u8 dual_link_port_nr: 6;
|
|
+ u8 unknown2: 2;
|
|
+ u8 micro2: 4;
|
|
+ u8 micro1: 4;
|
|
+ u8 micro3;
|
|
+ u8 peer_port_rid: 4;
|
|
+ u8 unknown3: 3;
|
|
+ bool has_peer_port: 1;
|
|
+ u8 peer_port_nr: 6;
|
|
+ u8 unknown4: 2;
|
|
+};
|
|
+
|
|
+struct tb_service_id {
|
|
+ __u32 match_flags;
|
|
+ char protocol_key[9];
|
|
+ __u32 protocol_id;
|
|
+ __u32 protocol_version;
|
|
+ __u32 protocol_revision;
|
|
+ kernel_ulong_t driver_data;
|
|
+};
|
|
+
|
|
+struct tb_service {
|
|
+ struct device dev;
|
|
+ int id;
|
|
+ const char *key;
|
|
+ u32 prtcid;
|
|
+ u32 prtcvers;
|
|
+ u32 prtcrevs;
|
|
+ u32 prtcstns;
|
|
+};
|
|
+
|
|
+struct tb_service_driver {
|
|
+ struct device_driver driver;
|
|
+ int (*probe)(struct tb_service *, const struct tb_service_id *);
|
|
+ void (*remove)(struct tb_service *);
|
|
+ void (*shutdown)(struct tb_service *);
|
|
+ const struct tb_service_id *id_table;
|
|
+};
|
|
+
|
|
+struct tb_dma_port {
|
|
+ struct tb_switch *sw;
|
|
+ u8 port;
|
|
+ u32 base;
|
|
+ u8 *buf;
|
|
+};
|
|
+
|
|
+enum icm_pkg_code {
|
|
+ ICM_GET_TOPOLOGY = 1,
|
|
+ ICM_DRIVER_READY = 3,
|
|
+ ICM_APPROVE_DEVICE = 4,
|
|
+ ICM_CHALLENGE_DEVICE = 5,
|
|
+ ICM_ADD_DEVICE_KEY = 6,
|
|
+ ICM_GET_ROUTE = 10,
|
|
+ ICM_APPROVE_XDOMAIN = 16,
|
|
+ ICM_DISCONNECT_XDOMAIN = 17,
|
|
+ ICM_PREBOOT_ACL = 24,
|
|
+};
|
|
+
|
|
+enum icm_event_code {
|
|
+ ICM_EVENT_DEVICE_CONNECTED = 3,
|
|
+ ICM_EVENT_DEVICE_DISCONNECTED = 4,
|
|
+ ICM_EVENT_XDOMAIN_CONNECTED = 6,
|
|
+ ICM_EVENT_XDOMAIN_DISCONNECTED = 7,
|
|
+};
|
|
+
|
|
+struct icm_pkg_header {
|
|
+ u8 code;
|
|
+ u8 flags;
|
|
+ u8 packet_id;
|
|
+ u8 total_packets;
|
|
+};
|
|
+
|
|
+struct icm_pkg_driver_ready {
|
|
+ struct icm_pkg_header hdr;
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_driver_ready_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u8 romver;
|
|
+ u8 ramver;
|
|
+ u16 security_level;
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_get_topology {
|
|
+ struct icm_pkg_header hdr;
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_get_topology_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u32 route_lo;
|
|
+ u32 route_hi;
|
|
+ u8 first_data;
|
|
+ u8 second_data;
|
|
+ u8 drom_i2c_address_index;
|
|
+ u8 switch_index;
|
|
+ u32 reserved[2];
|
|
+ u32 ports[16];
|
|
+ u32 port_hop_info[16];
|
|
+};
|
|
+
|
|
+struct icm_fr_event_device_connected {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u8 connection_key;
|
|
+ u8 connection_id;
|
|
+ u16 link_info;
|
|
+ u32 ep_name[55];
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_approve_device {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u8 connection_key;
|
|
+ u8 connection_id;
|
|
+ u16 reserved;
|
|
+};
|
|
+
|
|
+struct icm_fr_event_device_disconnected {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved;
|
|
+ u16 link_info;
|
|
+};
|
|
+
|
|
+struct icm_fr_event_xdomain_connected {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved;
|
|
+ u16 link_info;
|
|
+ uuid_t remote_uuid;
|
|
+ uuid_t local_uuid;
|
|
+ u32 local_route_hi;
|
|
+ u32 local_route_lo;
|
|
+ u32 remote_route_hi;
|
|
+ u32 remote_route_lo;
|
|
+};
|
|
+
|
|
+struct icm_fr_event_xdomain_disconnected {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved;
|
|
+ u16 link_info;
|
|
+ uuid_t remote_uuid;
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_add_device_key {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u8 connection_key;
|
|
+ u8 connection_id;
|
|
+ u16 reserved;
|
|
+ u32 key[8];
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_add_device_key_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u8 connection_key;
|
|
+ u8 connection_id;
|
|
+ u16 reserved;
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_challenge_device {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u8 connection_key;
|
|
+ u8 connection_id;
|
|
+ u16 reserved;
|
|
+ u32 challenge[8];
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_challenge_device_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u8 connection_key;
|
|
+ u8 connection_id;
|
|
+ u16 reserved;
|
|
+ u32 challenge[8];
|
|
+ u32 response[8];
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_approve_xdomain {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved;
|
|
+ u16 link_info;
|
|
+ uuid_t remote_uuid;
|
|
+ u16 transmit_path;
|
|
+ u16 transmit_ring;
|
|
+ u16 receive_path;
|
|
+ u16 receive_ring;
|
|
+};
|
|
+
|
|
+struct icm_fr_pkg_approve_xdomain_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved;
|
|
+ u16 link_info;
|
|
+ uuid_t remote_uuid;
|
|
+ u16 transmit_path;
|
|
+ u16 transmit_ring;
|
|
+ u16 receive_path;
|
|
+ u16 receive_ring;
|
|
+};
|
|
+
|
|
+struct icm_ar_pkg_driver_ready_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u8 romver;
|
|
+ u8 ramver;
|
|
+ u16 info;
|
|
+};
|
|
+
|
|
+struct icm_ar_pkg_get_route {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved;
|
|
+ u16 link_info;
|
|
+};
|
|
+
|
|
+struct icm_ar_pkg_get_route_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved;
|
|
+ u16 link_info;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+};
|
|
+
|
|
+struct icm_ar_boot_acl_entry {
|
|
+ u32 uuid_lo;
|
|
+ u32 uuid_hi;
|
|
+};
|
|
+
|
|
+struct icm_ar_pkg_preboot_acl {
|
|
+ struct icm_pkg_header hdr;
|
|
+ struct icm_ar_boot_acl_entry acl[16];
|
|
+};
|
|
+
|
|
+struct icm_ar_pkg_preboot_acl_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ struct icm_ar_boot_acl_entry acl[16];
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_driver_ready_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved1;
|
|
+ u16 info;
|
|
+ u32 nvm_version;
|
|
+ u16 device_id;
|
|
+ u16 reserved2;
|
|
+};
|
|
+
|
|
+struct icm_tr_event_device_connected {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ u8 connection_id;
|
|
+ u8 reserved;
|
|
+ u16 link_info;
|
|
+ u32 ep_name[55];
|
|
+};
|
|
+
|
|
+struct icm_tr_event_device_disconnected {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+};
|
|
+
|
|
+struct icm_tr_event_xdomain_connected {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u16 reserved;
|
|
+ u16 link_info;
|
|
+ uuid_t remote_uuid;
|
|
+ uuid_t local_uuid;
|
|
+ u32 local_route_hi;
|
|
+ u32 local_route_lo;
|
|
+ u32 remote_route_hi;
|
|
+ u32 remote_route_lo;
|
|
+};
|
|
+
|
|
+struct icm_tr_event_xdomain_disconnected {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ uuid_t remote_uuid;
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_approve_device {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ u8 connection_id;
|
|
+ u8 reserved1[3];
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_add_device_key {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ u8 connection_id;
|
|
+ u8 reserved[3];
|
|
+ u32 key[8];
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_challenge_device {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ u8 connection_id;
|
|
+ u8 reserved[3];
|
|
+ u32 challenge[8];
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_approve_xdomain {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ uuid_t remote_uuid;
|
|
+ u16 transmit_path;
|
|
+ u16 transmit_ring;
|
|
+ u16 receive_path;
|
|
+ u16 receive_ring;
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_disconnect_xdomain {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u8 stage;
|
|
+ u8 reserved[3];
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ uuid_t remote_uuid;
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_challenge_device_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ u8 connection_id;
|
|
+ u8 reserved[3];
|
|
+ u32 challenge[8];
|
|
+ u32 response[8];
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_add_device_key_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ uuid_t ep_uuid;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ u8 connection_id;
|
|
+ u8 reserved[3];
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_approve_xdomain_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ uuid_t remote_uuid;
|
|
+ u16 transmit_path;
|
|
+ u16 transmit_ring;
|
|
+ u16 receive_path;
|
|
+ u16 receive_ring;
|
|
+};
|
|
+
|
|
+struct icm_tr_pkg_disconnect_xdomain_response {
|
|
+ struct icm_pkg_header hdr;
|
|
+ u8 stage;
|
|
+ u8 reserved[3];
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ uuid_t remote_uuid;
|
|
+};
|
|
+
|
|
+struct icm {
|
|
+ struct mutex request_lock;
|
|
+ struct delayed_work rescan_work;
|
|
+ struct pci_dev *upstream_port;
|
|
+ size_t max_boot_acl;
|
|
+ int vnd_cap;
|
|
+ bool safe_mode;
|
|
+ bool rpm;
|
|
+ bool (*is_supported)(struct tb *);
|
|
+ int (*get_mode)(struct tb *);
|
|
+ int (*get_route)(struct tb *, u8, u8, u64 *);
|
|
+ void (*save_devices)(struct tb *);
|
|
+ int (*driver_ready)(struct tb *, enum tb_security_level *, size_t *, bool *);
|
|
+ void (*device_connected)(struct tb *, const struct icm_pkg_header *);
|
|
+ void (*device_disconnected)(struct tb *, const struct icm_pkg_header *);
|
|
+ void (*xdomain_connected)(struct tb *, const struct icm_pkg_header *);
|
|
+ void (*xdomain_disconnected)(struct tb *, const struct icm_pkg_header *);
|
|
+};
|
|
+
|
|
+struct icm_notification {
|
|
+ struct work_struct work;
|
|
+ struct icm_pkg_header *pkg;
|
|
+ struct tb *tb;
|
|
+};
|
|
+
|
|
+struct ep_name_entry {
|
|
+ u8 len;
|
|
+ u8 type;
|
|
+ u8 data[0];
|
|
+};
|
|
+
|
|
+struct intel_vss {
|
|
+ u16 vendor;
|
|
+ u16 model;
|
|
+ u8 mc;
|
|
+ u8 flags;
|
|
+ u16 pci_devid;
|
|
+ u32 nvm_version;
|
|
+};
|
|
+
|
|
+enum tb_property_type {
|
|
+ TB_PROPERTY_TYPE_UNKNOWN = 0,
|
|
+ TB_PROPERTY_TYPE_DIRECTORY = 68,
|
|
+ TB_PROPERTY_TYPE_DATA = 100,
|
|
+ TB_PROPERTY_TYPE_TEXT = 116,
|
|
+ TB_PROPERTY_TYPE_VALUE = 118,
|
|
+};
|
|
+
|
|
+struct tb_property {
|
|
+ struct list_head list;
|
|
+ char key[9];
|
|
+ enum tb_property_type type;
|
|
+ size_t length;
|
|
+ union {
|
|
+ struct tb_property_dir *dir;
|
|
+ u8 *data;
|
|
+ char *text;
|
|
+ u32 immediate;
|
|
+ } value;
|
|
+};
|
|
+
|
|
+struct tb_property_entry {
|
|
+ u32 key_hi;
|
|
+ u32 key_lo;
|
|
+ u16 length;
|
|
+ u8 reserved;
|
|
+ u8 type;
|
|
+ u32 value;
|
|
+};
|
|
+
|
|
+struct tb_property_rootdir_entry {
|
|
+ u32 magic;
|
|
+ u32 length;
|
|
+ struct tb_property_entry entries[0];
|
|
+};
|
|
+
|
|
+struct tb_property_dir_entry {
|
|
+ u32 uuid[4];
|
|
+ struct tb_property_entry entries[0];
|
|
+};
|
|
+
|
|
+struct tb_protocol_handler {
|
|
+ const uuid_t *uuid;
|
|
+ int (*callback)(const void *, size_t, void *);
|
|
+ void *data;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct tb_xdomain_header {
|
|
+ u32 route_hi;
|
|
+ u32 route_lo;
|
|
+ u32 length_sn;
|
|
+};
|
|
+
|
|
+enum tb_xdp_type {
|
|
+ UUID_REQUEST_OLD = 1,
|
|
+ UUID_RESPONSE = 2,
|
|
+ PROPERTIES_REQUEST = 3,
|
|
+ PROPERTIES_RESPONSE = 4,
|
|
+ PROPERTIES_CHANGED_REQUEST = 5,
|
|
+ PROPERTIES_CHANGED_RESPONSE = 6,
|
|
+ ERROR_RESPONSE = 7,
|
|
+ UUID_REQUEST = 12,
|
|
+};
|
|
+
|
|
+struct tb_xdp_header {
|
|
+ struct tb_xdomain_header xd_hdr;
|
|
+ uuid_t uuid;
|
|
+ u32 type;
|
|
+};
|
|
+
|
|
+struct tb_xdp_properties {
|
|
+ struct tb_xdp_header hdr;
|
|
+ uuid_t src_uuid;
|
|
+ uuid_t dst_uuid;
|
|
+ u16 offset;
|
|
+ u16 reserved;
|
|
+};
|
|
+
|
|
+struct tb_xdp_properties_response {
|
|
+ struct tb_xdp_header hdr;
|
|
+ uuid_t src_uuid;
|
|
+ uuid_t dst_uuid;
|
|
+ u16 offset;
|
|
+ u16 data_length;
|
|
+ u32 generation;
|
|
+ u32 data[0];
|
|
+};
|
|
+
|
|
+struct tb_xdp_properties_changed {
|
|
+ struct tb_xdp_header hdr;
|
|
+ uuid_t src_uuid;
|
|
+};
|
|
+
|
|
+struct tb_xdp_properties_changed_response {
|
|
+ struct tb_xdp_header hdr;
|
|
+};
|
|
+
|
|
+enum tb_xdp_error {
|
|
+ ERROR_SUCCESS = 0,
|
|
+ ERROR_UNKNOWN_PACKET = 1,
|
|
+ ERROR_UNKNOWN_DOMAIN = 2,
|
|
+ ERROR_NOT_SUPPORTED = 3,
|
|
+ ERROR_NOT_READY = 4,
|
|
+};
|
|
+
|
|
+struct tb_xdp_error_response {
|
|
+ struct tb_xdp_header hdr;
|
|
+ u32 error;
|
|
+};
|
|
+
|
|
+struct xdomain_request_work {
|
|
+ struct work_struct work;
|
|
+ struct tb_xdp_header *pkg;
|
|
+ struct tb *tb;
|
|
+};
|
|
+
|
|
+struct tb_xdomain_lookup {
|
|
+ const uuid_t *uuid;
|
|
+ u8 link;
|
|
+ u8 depth;
|
|
+ u64 route;
|
|
+};
|
|
+
|
|
+struct nvmem_device___2 {
|
|
+ const char *name;
|
|
+ struct module *owner;
|
|
+ struct device dev;
|
|
+ int stride;
|
|
+ int word_size;
|
|
+ int id;
|
|
+ int users;
|
|
+ size_t size;
|
|
+ bool read_only;
|
|
+ int flags;
|
|
+ struct bin_attribute eeprom;
|
|
+ struct device *base_dev;
|
|
+ nvmem_reg_read_t reg_read;
|
|
+ nvmem_reg_write_t reg_write;
|
|
+ void *priv;
|
|
+};
|
|
+
|
|
+struct nvmem_cell {
|
|
+ const char *name;
|
|
+ int offset;
|
|
+ int bytes;
|
|
+ int bit_offset;
|
|
+ int nbits;
|
|
+ struct nvmem_device___2 *nvmem;
|
|
+ struct list_head node;
|
|
+};
|
|
+
|
|
+struct pcibios_fwaddrmap {
|
|
+ struct list_head list;
|
|
+ struct pci_dev *dev;
|
|
+ resource_size_t fw_addr[17];
|
|
+};
|
|
+
|
|
+struct pci_check_idx_range {
|
|
+ int start;
|
|
+ int end;
|
|
+};
|
|
+
|
|
+struct pci_mmcfg_region {
|
|
+ struct list_head list;
|
|
+ struct resource res;
|
|
+ u64 address;
|
|
+ char *virt;
|
|
+ u16 segment;
|
|
+ u8 start_bus;
|
|
+ u8 end_bus;
|
|
+ char name[30];
|
|
+};
|
|
+
|
|
+struct acpi_table_mcfg {
|
|
+ struct acpi_table_header header;
|
|
+ u8 reserved[8];
|
|
+};
|
|
+
|
|
+struct acpi_mcfg_allocation {
|
|
+ u64 address;
|
|
+ u16 pci_segment;
|
|
+ u8 start_bus_number;
|
|
+ u8 end_bus_number;
|
|
+ u32 reserved;
|
|
+};
|
|
+
|
|
+struct pci_mmcfg_hostbridge_probe {
|
|
+ u32 bus;
|
|
+ u32 devfn;
|
|
+ u32 vendor;
|
|
+ u32 device;
|
|
+ const char * (*probe)();
|
|
+};
|
|
+
|
|
+typedef bool (*check_reserved_t)(u64, u64, unsigned int);
|
|
+
|
|
+struct pci_root_info {
|
|
+ struct acpi_pci_root_info common;
|
|
+ struct pci_sysdata sd;
|
|
+ bool mcfg_added;
|
|
+ u8 start_bus;
|
|
+ u8 end_bus;
|
|
+};
|
|
+
|
|
+struct irq_info___3 {
|
|
+ u8 bus;
|
|
+ u8 devfn;
|
|
+ struct {
|
|
+ u8 link;
|
|
+ u16 bitmap;
|
|
+ } __attribute__((packed)) irq[4];
|
|
+ u8 slot;
|
|
+ u8 rfu;
|
|
+};
|
|
+
|
|
+struct irq_routing_table {
|
|
+ u32 signature;
|
|
+ u16 version;
|
|
+ u16 size;
|
|
+ u8 rtr_bus;
|
|
+ u8 rtr_devfn;
|
|
+ u16 exclusive_irqs;
|
|
+ u16 rtr_vendor;
|
|
+ u16 rtr_device;
|
|
+ u32 miniport_data;
|
|
+ u8 rfu[11];
|
|
+ u8 checksum;
|
|
+ struct irq_info___3 slots[0];
|
|
+};
|
|
+
|
|
+struct irq_router {
|
|
+ char *name;
|
|
+ u16 vendor;
|
|
+ u16 device;
|
|
+ int (*get)(struct pci_dev *, struct pci_dev *, int);
|
|
+ int (*set)(struct pci_dev *, struct pci_dev *, int, int);
|
|
+};
|
|
+
|
|
+struct irq_router_handler {
|
|
+ u16 vendor;
|
|
+ int (*probe)(struct irq_router *, struct pci_dev *, u16);
|
|
+};
|
|
+
|
|
+struct pci_setup_rom {
|
|
+ struct setup_data data;
|
|
+ uint16_t vendor;
|
|
+ uint16_t devid;
|
|
+ uint64_t pcilen;
|
|
+ long unsigned int segment;
|
|
+ long unsigned int bus;
|
|
+ long unsigned int device;
|
|
+ long unsigned int function;
|
|
+ uint8_t romdata[0];
|
|
+};
|
|
+
|
|
+enum pci_bf_sort_state {
|
|
+ pci_bf_sort_default = 0,
|
|
+ pci_force_nobf = 1,
|
|
+ pci_force_bf = 2,
|
|
+ pci_dmi_bf = 3,
|
|
+};
|
|
+
|
|
+struct pci_root_res {
|
|
+ struct list_head list;
|
|
+ struct resource res;
|
|
+};
|
|
+
|
|
+struct pci_root_info___2 {
|
|
+ struct list_head list;
|
|
+ char name[12];
|
|
+ struct list_head resources;
|
|
+ struct resource busn;
|
|
+ int node;
|
|
+ int link;
|
|
+};
|
|
+
|
|
+struct amd_hostbridge {
|
|
+ u32 bus;
|
|
+ u32 slot;
|
|
+ u32 device;
|
|
+};
|
|
+
|
|
+struct saved_msr {
|
|
+ bool valid;
|
|
+ struct msr_info info;
|
|
+};
|
|
+
|
|
+struct saved_msrs {
|
|
+ unsigned int num;
|
|
+ struct saved_msr *array;
|
|
+};
|
|
+
|
|
+struct saved_context {
|
|
+ struct pt_regs regs;
|
|
+ u16 ds;
|
|
+ u16 es;
|
|
+ u16 fs;
|
|
+ u16 gs;
|
|
+ long unsigned int kernelmode_gs_base;
|
|
+ long unsigned int usermode_gs_base;
|
|
+ long unsigned int fs_base;
|
|
+ long unsigned int cr0;
|
|
+ long unsigned int cr2;
|
|
+ long unsigned int cr3;
|
|
+ long unsigned int cr4;
|
|
+ long unsigned int cr8;
|
|
+ u64 misc_enable;
|
|
+ bool misc_enable_saved;
|
|
+ struct saved_msrs saved_msrs;
|
|
+ long unsigned int efer;
|
|
+ u16 gdt_pad;
|
|
+ struct desc_ptr gdt_desc;
|
|
+ u16 idt_pad;
|
|
+ struct desc_ptr idt;
|
|
+ u16 ldt;
|
|
+ u16 tss;
|
|
+ long unsigned int tr;
|
|
+ long unsigned int safety;
|
|
+ long unsigned int return_address;
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
|
|
+
|
|
+struct restore_data_record {
|
|
+ long unsigned int jump_address;
|
|
+ long unsigned int jump_address_phys;
|
|
+ long unsigned int cr3;
|
|
+ long unsigned int magic;
|
|
+ u8 e820_digest[16];
|
|
+};
|
|
+
|
|
+struct mmsghdr {
|
|
+ struct user_msghdr msg_hdr;
|
|
+ unsigned int msg_len;
|
|
+};
|
|
+
|
|
+enum sock_shutdown_cmd {
|
|
+ SHUT_RD = 0,
|
|
+ SHUT_WR = 1,
|
|
+ SHUT_RDWR = 2,
|
|
+};
|
|
+
|
|
+struct ifconf {
|
|
+ int ifc_len;
|
|
+ union {
|
|
+ char *ifcu_buf;
|
|
+ struct ifreq *ifcu_req;
|
|
+ } ifc_ifcu;
|
|
+};
|
|
+
|
|
+struct compat_ifmap {
|
|
+ compat_ulong_t mem_start;
|
|
+ compat_ulong_t mem_end;
|
|
+ short unsigned int base_addr;
|
|
+ unsigned char irq;
|
|
+ unsigned char dma;
|
|
+ unsigned char port;
|
|
+};
|
|
+
|
|
+struct compat_if_settings {
|
|
+ unsigned int type;
|
|
+ unsigned int size;
|
|
+ compat_uptr_t ifs_ifsu;
|
|
+};
|
|
+
|
|
+struct compat_ifreq {
|
|
+ union {
|
|
+ char ifrn_name[16];
|
|
+ } ifr_ifrn;
|
|
+ union {
|
|
+ struct sockaddr ifru_addr;
|
|
+ struct sockaddr ifru_dstaddr;
|
|
+ struct sockaddr ifru_broadaddr;
|
|
+ struct sockaddr ifru_netmask;
|
|
+ struct sockaddr ifru_hwaddr;
|
|
+ short int ifru_flags;
|
|
+ compat_int_t ifru_ivalue;
|
|
+ compat_int_t ifru_mtu;
|
|
+ struct compat_ifmap ifru_map;
|
|
+ char ifru_slave[16];
|
|
+ char ifru_newname[16];
|
|
+ compat_caddr_t ifru_data;
|
|
+ struct compat_if_settings ifru_settings;
|
|
+ } ifr_ifru;
|
|
+};
|
|
+
|
|
+struct compat_ifconf {
|
|
+ compat_int_t ifc_len;
|
|
+ compat_caddr_t ifcbuf;
|
|
+};
|
|
+
|
|
+struct compat_ethtool_rx_flow_spec {
|
|
+ u32 flow_type;
|
|
+ union ethtool_flow_union h_u;
|
|
+ struct ethtool_flow_ext h_ext;
|
|
+ union ethtool_flow_union m_u;
|
|
+ struct ethtool_flow_ext m_ext;
|
|
+ compat_u64 ring_cookie;
|
|
+ u32 location;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct compat_ethtool_rxnfc {
|
|
+ u32 cmd;
|
|
+ u32 flow_type;
|
|
+ compat_u64 data;
|
|
+ struct compat_ethtool_rx_flow_spec fs;
|
|
+ u32 rule_cnt;
|
|
+ u32 rule_locs[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct compat_mmsghdr {
|
|
+ struct compat_msghdr msg_hdr;
|
|
+ compat_uint_t msg_len;
|
|
+};
|
|
+
|
|
+struct scm_ts_pktinfo {
|
|
+ __u32 if_index;
|
|
+ __u32 pkt_length;
|
|
+ __u32 reserved[2];
|
|
+};
|
|
+
|
|
+struct sock_skb_cb {
|
|
+ u32 dropcount;
|
|
+};
|
|
+
|
|
+struct in6_rtmsg {
|
|
+ struct in6_addr rtmsg_dst;
|
|
+ struct in6_addr rtmsg_src;
|
|
+ struct in6_addr rtmsg_gateway;
|
|
+ __u32 rtmsg_type;
|
|
+ __u16 rtmsg_dst_len;
|
|
+ __u16 rtmsg_src_len;
|
|
+ __u32 rtmsg_metric;
|
|
+ long unsigned int rtmsg_info;
|
|
+ __u32 rtmsg_flags;
|
|
+ int rtmsg_ifindex;
|
|
+};
|
|
+
|
|
+struct rtentry {
|
|
+ long unsigned int rt_pad1;
|
|
+ struct sockaddr rt_dst;
|
|
+ struct sockaddr rt_gateway;
|
|
+ struct sockaddr rt_genmask;
|
|
+ short unsigned int rt_flags;
|
|
+ short int rt_pad2;
|
|
+ long unsigned int rt_pad3;
|
|
+ void *rt_pad4;
|
|
+ short int rt_metric;
|
|
+ char *rt_dev;
|
|
+ long unsigned int rt_mtu;
|
|
+ long unsigned int rt_window;
|
|
+ short unsigned int rt_irtt;
|
|
+};
|
|
+
|
|
+struct sock_extended_err {
|
|
+ __u32 ee_errno;
|
|
+ __u8 ee_origin;
|
|
+ __u8 ee_type;
|
|
+ __u8 ee_code;
|
|
+ __u8 ee_pad;
|
|
+ __u32 ee_info;
|
|
+ __u32 ee_data;
|
|
+};
|
|
+
|
|
+struct scm_timestamping {
|
|
+ struct timespec ts[3];
|
|
+};
|
|
+
|
|
+struct sock_exterr_skb {
|
|
+ union {
|
|
+ struct inet_skb_parm h4;
|
|
+ struct inet6_skb_parm h6;
|
|
+ } header;
|
|
+ struct sock_extended_err ee;
|
|
+ u16 addr_offset;
|
|
+ __be16 port;
|
|
+ u8 opt_stats: 1;
|
|
+ u8 unused: 7;
|
|
+};
|
|
+
|
|
+struct used_address {
|
|
+ struct __kernel_sockaddr_storage name;
|
|
+ unsigned int name_len;
|
|
+ int: 32;
|
|
+};
|
|
+
|
|
+struct rtentry32 {
|
|
+ u32 rt_pad1;
|
|
+ struct sockaddr rt_dst;
|
|
+ struct sockaddr rt_gateway;
|
|
+ struct sockaddr rt_genmask;
|
|
+ short unsigned int rt_flags;
|
|
+ short int rt_pad2;
|
|
+ u32 rt_pad3;
|
|
+ unsigned char rt_tos;
|
|
+ unsigned char rt_class;
|
|
+ short int rt_pad4;
|
|
+ short int rt_metric;
|
|
+ u32 rt_dev;
|
|
+ u32 rt_mtu;
|
|
+ u32 rt_window;
|
|
+ short unsigned int rt_irtt;
|
|
+};
|
|
+
|
|
+struct in6_rtmsg32 {
|
|
+ struct in6_addr rtmsg_dst;
|
|
+ struct in6_addr rtmsg_src;
|
|
+ struct in6_addr rtmsg_gateway;
|
|
+ u32 rtmsg_type;
|
|
+ u16 rtmsg_dst_len;
|
|
+ u16 rtmsg_src_len;
|
|
+ u32 rtmsg_metric;
|
|
+ u32 rtmsg_info;
|
|
+ u32 rtmsg_flags;
|
|
+ s32 rtmsg_ifindex;
|
|
+};
|
|
+
|
|
+struct linger {
|
|
+ int l_onoff;
|
|
+ int l_linger;
|
|
+};
|
|
+
|
|
+struct ucred {
|
|
+ __u32 pid;
|
|
+ __u32 uid;
|
|
+ __u32 gid;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SKB_GSO_TCPV4 = 1,
|
|
+ SKB_GSO_DODGY = 2,
|
|
+ SKB_GSO_TCP_ECN = 4,
|
|
+ SKB_GSO_TCP_FIXEDID = 8,
|
|
+ SKB_GSO_TCPV6 = 16,
|
|
+ SKB_GSO_FCOE = 32,
|
|
+ SKB_GSO_GRE = 64,
|
|
+ SKB_GSO_GRE_CSUM = 128,
|
|
+ SKB_GSO_IPXIP4 = 256,
|
|
+ SKB_GSO_IPXIP6 = 512,
|
|
+ SKB_GSO_UDP_TUNNEL = 1024,
|
|
+ SKB_GSO_UDP_TUNNEL_CSUM = 2048,
|
|
+ SKB_GSO_PARTIAL = 4096,
|
|
+ SKB_GSO_TUNNEL_REMCSUM = 8192,
|
|
+ SKB_GSO_SCTP = 16384,
|
|
+ SKB_GSO_ESP = 32768,
|
|
+ SKB_GSO_UDP = 65536,
|
|
+ SKB_GSO_UDP_L4 = 131072,
|
|
+};
|
|
+
|
|
+struct prot_inuse {
|
|
+ int val[64];
|
|
+};
|
|
+
|
|
+struct offload_callbacks {
|
|
+ struct sk_buff * (*gso_segment)(struct sk_buff *, netdev_features_t);
|
|
+ struct sk_buff * (*gro_receive)(struct list_head *, struct sk_buff *);
|
|
+ int (*gro_complete)(struct sk_buff *, int);
|
|
+};
|
|
+
|
|
+enum txtime_flags {
|
|
+ SOF_TXTIME_DEADLINE_MODE = 1,
|
|
+ SOF_TXTIME_REPORT_ERRORS = 2,
|
|
+ SOF_TXTIME_FLAGS_LAST = 2,
|
|
+ SOF_TXTIME_FLAGS_MASK = 3,
|
|
+};
|
|
+
|
|
+struct sock_txtime {
|
|
+ __kernel_clockid_t clockid;
|
|
+ __u32 flags;
|
|
+};
|
|
+
|
|
+enum sk_pacing {
|
|
+ SK_PACING_NONE = 0,
|
|
+ SK_PACING_NEEDED = 1,
|
|
+ SK_PACING_FQ = 2,
|
|
+};
|
|
+
|
|
+struct sockcm_cookie {
|
|
+ u64 transmit_time;
|
|
+ u32 mark;
|
|
+ u16 tsflags;
|
|
+};
|
|
+
|
|
+struct minmax_sample {
|
|
+ u32 t;
|
|
+ u32 v;
|
|
+};
|
|
+
|
|
+struct minmax {
|
|
+ struct minmax_sample s[3];
|
|
+};
|
|
+
|
|
+struct tcp_fastopen_cookie {
|
|
+ union {
|
|
+ u8 val[16];
|
|
+ struct in6_addr addr;
|
|
+ };
|
|
+ s8 len;
|
|
+ bool exp;
|
|
+};
|
|
+
|
|
+struct tcp_sack_block {
|
|
+ u32 start_seq;
|
|
+ u32 end_seq;
|
|
+};
|
|
+
|
|
+struct tcp_options_received {
|
|
+ int ts_recent_stamp;
|
|
+ u32 ts_recent;
|
|
+ u32 rcv_tsval;
|
|
+ u32 rcv_tsecr;
|
|
+ u16 saw_tstamp: 1;
|
|
+ u16 tstamp_ok: 1;
|
|
+ u16 dsack: 1;
|
|
+ u16 wscale_ok: 1;
|
|
+ u16 sack_ok: 3;
|
|
+ u16 smc_ok: 1;
|
|
+ u16 snd_wscale: 4;
|
|
+ u16 rcv_wscale: 4;
|
|
+ u8 num_sacks;
|
|
+ u16 user_mss;
|
|
+ u16 mss_clamp;
|
|
+};
|
|
+
|
|
+struct tcp_rack {
|
|
+ u64 mstamp;
|
|
+ u32 rtt_us;
|
|
+ u32 end_seq;
|
|
+ u32 last_delivered;
|
|
+ u8 reo_wnd_steps;
|
|
+ u8 reo_wnd_persist: 5;
|
|
+ u8 dsack_seen: 1;
|
|
+ u8 advanced: 1;
|
|
+};
|
|
+
|
|
+struct tcp_sock_af_ops;
|
|
+
|
|
+struct tcp_md5sig_info;
|
|
+
|
|
+struct tcp_fastopen_request;
|
|
+
|
|
+struct tcp_sock {
|
|
+ struct inet_connection_sock inet_conn;
|
|
+ u16 tcp_header_len;
|
|
+ u16 gso_segs;
|
|
+ __be32 pred_flags;
|
|
+ u64 bytes_received;
|
|
+ u32 segs_in;
|
|
+ u32 data_segs_in;
|
|
+ u32 rcv_nxt;
|
|
+ u32 copied_seq;
|
|
+ u32 rcv_wup;
|
|
+ u32 snd_nxt;
|
|
+ u32 segs_out;
|
|
+ u32 data_segs_out;
|
|
+ u64 bytes_sent;
|
|
+ u64 bytes_acked;
|
|
+ u32 dsack_dups;
|
|
+ u32 snd_una;
|
|
+ u32 snd_sml;
|
|
+ u32 rcv_tstamp;
|
|
+ u32 lsndtime;
|
|
+ u32 last_oow_ack_time;
|
|
+ u32 compressed_ack_rcv_nxt;
|
|
+ u32 tsoffset;
|
|
+ struct list_head tsq_node;
|
|
+ struct list_head tsorted_sent_queue;
|
|
+ u32 snd_wl1;
|
|
+ u32 snd_wnd;
|
|
+ u32 max_window;
|
|
+ u32 mss_cache;
|
|
+ u32 window_clamp;
|
|
+ u32 rcv_ssthresh;
|
|
+ struct tcp_rack rack;
|
|
+ u16 advmss;
|
|
+ u8 compressed_ack;
|
|
+ u8 tlp_retrans: 1;
|
|
+ u8 unused_1: 7;
|
|
+ u32 chrono_start;
|
|
+ u32 chrono_stat[3];
|
|
+ u8 chrono_type: 2;
|
|
+ u8 rate_app_limited: 1;
|
|
+ u8 fastopen_connect: 1;
|
|
+ u8 fastopen_no_cookie: 1;
|
|
+ u8 is_sack_reneg: 1;
|
|
+ u8 unused: 2;
|
|
+ u8 nonagle: 4;
|
|
+ u8 thin_lto: 1;
|
|
+ u8 recvmsg_inq: 1;
|
|
+ u8 repair: 1;
|
|
+ u8 frto: 1;
|
|
+ u8 repair_queue;
|
|
+ u8 syn_data: 1;
|
|
+ u8 syn_fastopen: 1;
|
|
+ u8 syn_fastopen_exp: 1;
|
|
+ u8 syn_fastopen_ch: 1;
|
|
+ u8 syn_data_acked: 1;
|
|
+ u8 save_syn: 1;
|
|
+ u8 is_cwnd_limited: 1;
|
|
+ u8 syn_smc: 1;
|
|
+ u32 tlp_high_seq;
|
|
+ u64 tcp_mstamp;
|
|
+ u32 srtt_us;
|
|
+ u32 mdev_us;
|
|
+ u32 mdev_max_us;
|
|
+ u32 rttvar_us;
|
|
+ u32 rtt_seq;
|
|
+ struct minmax rtt_min;
|
|
+ u32 packets_out;
|
|
+ u32 retrans_out;
|
|
+ u32 max_packets_out;
|
|
+ u32 max_packets_seq;
|
|
+ u16 urg_data;
|
|
+ u8 ecn_flags;
|
|
+ u8 keepalive_probes;
|
|
+ u32 reordering;
|
|
+ u32 reord_seen;
|
|
+ u32 snd_up;
|
|
+ struct tcp_options_received rx_opt;
|
|
+ u32 snd_ssthresh;
|
|
+ u32 snd_cwnd;
|
|
+ u32 snd_cwnd_cnt;
|
|
+ u32 snd_cwnd_clamp;
|
|
+ u32 snd_cwnd_used;
|
|
+ u32 snd_cwnd_stamp;
|
|
+ u32 prior_cwnd;
|
|
+ u32 prr_delivered;
|
|
+ u32 prr_out;
|
|
+ u32 delivered;
|
|
+ u32 delivered_ce;
|
|
+ u32 lost;
|
|
+ u32 app_limited;
|
|
+ u64 first_tx_mstamp;
|
|
+ u64 delivered_mstamp;
|
|
+ u32 rate_delivered;
|
|
+ u32 rate_interval_us;
|
|
+ u32 rcv_wnd;
|
|
+ u32 write_seq;
|
|
+ u32 notsent_lowat;
|
|
+ u32 pushed_seq;
|
|
+ u32 lost_out;
|
|
+ u32 sacked_out;
|
|
+ struct hrtimer pacing_timer;
|
|
+ struct hrtimer compressed_ack_timer;
|
|
+ struct sk_buff *lost_skb_hint;
|
|
+ struct sk_buff *retransmit_skb_hint;
|
|
+ struct rb_root out_of_order_queue;
|
|
+ struct sk_buff *ooo_last_skb;
|
|
+ struct tcp_sack_block duplicate_sack[1];
|
|
+ struct tcp_sack_block selective_acks[4];
|
|
+ struct tcp_sack_block recv_sack_cache[4];
|
|
+ struct sk_buff *highest_sack;
|
|
+ int lost_cnt_hint;
|
|
+ u32 prior_ssthresh;
|
|
+ u32 high_seq;
|
|
+ u32 retrans_stamp;
|
|
+ u32 undo_marker;
|
|
+ int undo_retrans;
|
|
+ u64 bytes_retrans;
|
|
+ u32 total_retrans;
|
|
+ u32 urg_seq;
|
|
+ unsigned int keepalive_time;
|
|
+ unsigned int keepalive_intvl;
|
|
+ int linger2;
|
|
+ u8 bpf_sock_ops_cb_flags;
|
|
+ u32 rcv_rtt_last_tsecr;
|
|
+ struct {
|
|
+ u32 rtt_us;
|
|
+ u32 seq;
|
|
+ u64 time;
|
|
+ } rcv_rtt_est;
|
|
+ struct {
|
|
+ u32 space;
|
|
+ u32 seq;
|
|
+ u64 time;
|
|
+ } rcvq_space;
|
|
+ struct {
|
|
+ u32 probe_seq_start;
|
|
+ u32 probe_seq_end;
|
|
+ } mtu_probe;
|
|
+ u32 mtu_info;
|
|
+ const struct tcp_sock_af_ops *af_specific;
|
|
+ struct tcp_md5sig_info *md5sig_info;
|
|
+ struct tcp_fastopen_request *fastopen_req;
|
|
+ struct request_sock *fastopen_rsk;
|
|
+ u32 *saved_syn;
|
|
+};
|
|
+
|
|
+struct tcp_md5sig_key;
|
|
+
|
|
+struct tcp_sock_af_ops {
|
|
+ struct tcp_md5sig_key * (*md5_lookup)(const struct sock *, const struct sock *);
|
|
+ int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *);
|
|
+ int (*md5_parse)(struct sock *, int, char *, int);
|
|
+};
|
|
+
|
|
+struct tcp_md5sig_info {
|
|
+ struct hlist_head head;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct tcp_fastopen_request {
|
|
+ struct tcp_fastopen_cookie cookie;
|
|
+ struct msghdr *data;
|
|
+ size_t size;
|
|
+ int copied;
|
|
+};
|
|
+
|
|
+union tcp_md5_addr {
|
|
+ struct in_addr a4;
|
|
+ struct in6_addr a6;
|
|
+};
|
|
+
|
|
+struct tcp_md5sig_key {
|
|
+ struct hlist_node node;
|
|
+ u8 keylen;
|
|
+ u8 family;
|
|
+ union tcp_md5_addr addr;
|
|
+ u8 prefixlen;
|
|
+ u8 key[80];
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct net_protocol {
|
|
+ int (*early_demux)(struct sk_buff *);
|
|
+ int (*early_demux_handler)(struct sk_buff *);
|
|
+ int (*handler)(struct sk_buff *);
|
|
+ void (*err_handler)(struct sk_buff *, u32);
|
|
+ unsigned int no_policy: 1;
|
|
+ unsigned int netns_ok: 1;
|
|
+ unsigned int icmp_strict_tag_validation: 1;
|
|
+};
|
|
+
|
|
+struct inet6_protocol {
|
|
+ void (*early_demux)(struct sk_buff *);
|
|
+ void (*early_demux_handler)(struct sk_buff *);
|
|
+ int (*handler)(struct sk_buff *);
|
|
+ void (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32);
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct net_offload {
|
|
+ struct offload_callbacks callbacks;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct cgroup_cls_state {
|
|
+ struct cgroup_subsys_state css;
|
|
+ u32 classid;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SK_MEMINFO_RMEM_ALLOC = 0,
|
|
+ SK_MEMINFO_RCVBUF = 1,
|
|
+ SK_MEMINFO_WMEM_ALLOC = 2,
|
|
+ SK_MEMINFO_SNDBUF = 3,
|
|
+ SK_MEMINFO_FWD_ALLOC = 4,
|
|
+ SK_MEMINFO_WMEM_QUEUED = 5,
|
|
+ SK_MEMINFO_OPTMEM = 6,
|
|
+ SK_MEMINFO_BACKLOG = 7,
|
|
+ SK_MEMINFO_DROPS = 8,
|
|
+ SK_MEMINFO_VARS = 9,
|
|
+};
|
|
+
|
|
+enum sknetlink_groups {
|
|
+ SKNLGRP_NONE = 0,
|
|
+ SKNLGRP_INET_TCP_DESTROY = 1,
|
|
+ SKNLGRP_INET_UDP_DESTROY = 2,
|
|
+ SKNLGRP_INET6_TCP_DESTROY = 3,
|
|
+ SKNLGRP_INET6_UDP_DESTROY = 4,
|
|
+ __SKNLGRP_MAX = 5,
|
|
+};
|
|
+
|
|
+struct inet_request_sock {
|
|
+ struct request_sock req;
|
|
+ u16 snd_wscale: 4;
|
|
+ u16 rcv_wscale: 4;
|
|
+ u16 tstamp_ok: 1;
|
|
+ u16 sack_ok: 1;
|
|
+ u16 wscale_ok: 1;
|
|
+ u16 ecn_ok: 1;
|
|
+ u16 acked: 1;
|
|
+ u16 no_srccheck: 1;
|
|
+ u16 smc_ok: 1;
|
|
+ u16 comp_ok: 1;
|
|
+ u32 ir_mark;
|
|
+ union {
|
|
+ struct ip_options_rcu *ireq_opt;
|
|
+ struct {
|
|
+ struct ipv6_txoptions *ipv6_opt;
|
|
+ struct sk_buff *pktopts;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct tcp_request_sock_ops;
|
|
+
|
|
+struct tcp_request_sock {
|
|
+ struct inet_request_sock req;
|
|
+ const struct tcp_request_sock_ops *af_specific;
|
|
+ u64 snt_synack;
|
|
+ bool tfo_listener;
|
|
+ u32 txhash;
|
|
+ u32 rcv_isn;
|
|
+ u32 snt_isn;
|
|
+ u32 ts_off;
|
|
+ u32 last_oow_ack_time;
|
|
+ u32 rcv_nxt;
|
|
+};
|
|
+
|
|
+enum tcp_synack_type {
|
|
+ TCP_SYNACK_NORMAL = 0,
|
|
+ TCP_SYNACK_FASTOPEN = 1,
|
|
+ TCP_SYNACK_COOKIE = 2,
|
|
+};
|
|
+
|
|
+struct tcp_request_sock_ops {
|
|
+ u16 mss_clamp;
|
|
+ struct tcp_md5sig_key * (*req_md5_lookup)(const struct sock *, const struct sock *);
|
|
+ int (*calc_md5_hash)(char *, const struct tcp_md5sig_key *, const struct sock *, const struct sk_buff *);
|
|
+ void (*init_req)(struct request_sock *, const struct sock *, struct sk_buff *);
|
|
+ __u32 (*cookie_init_seq)(const struct sk_buff *, __u16 *);
|
|
+ struct dst_entry * (*route_req)(const struct sock *, struct flowi *, const struct request_sock *);
|
|
+ u32 (*init_seq)(const struct sk_buff *);
|
|
+ u32 (*init_ts_off)(const struct net *, const struct sk_buff *);
|
|
+ int (*send_synack)(const struct sock *, struct dst_entry *, struct flowi *, struct request_sock *, struct tcp_fastopen_cookie *, enum tcp_synack_type);
|
|
+};
|
|
+
|
|
+struct mmpin {
|
|
+ struct user_struct *user;
|
|
+ unsigned int num_pg;
|
|
+};
|
|
+
|
|
+struct ubuf_info {
|
|
+ void (*callback)(struct ubuf_info *, bool);
|
|
+ union {
|
|
+ struct {
|
|
+ long unsigned int desc;
|
|
+ void *ctx;
|
|
+ };
|
|
+ struct {
|
|
+ u32 id;
|
|
+ u16 len;
|
|
+ u16 zerocopy: 1;
|
|
+ u32 bytelen;
|
|
+ };
|
|
+ };
|
|
+ refcount_t refcnt;
|
|
+ struct mmpin mmp;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SKB_FCLONE_UNAVAILABLE = 0,
|
|
+ SKB_FCLONE_ORIG = 1,
|
|
+ SKB_FCLONE_CLONE = 2,
|
|
+};
|
|
+
|
|
+struct sk_buff_fclones {
|
|
+ struct sk_buff skb1;
|
|
+ struct sk_buff skb2;
|
|
+ refcount_t fclone_ref;
|
|
+};
|
|
+
|
|
+struct skb_seq_state {
|
|
+ __u32 lower_offset;
|
|
+ __u32 upper_offset;
|
|
+ __u32 frag_idx;
|
|
+ __u32 stepped_offset;
|
|
+ struct sk_buff *root_skb;
|
|
+ struct sk_buff *cur_skb;
|
|
+ __u8 *frag_data;
|
|
+};
|
|
+
|
|
+struct skb_gso_cb {
|
|
+ union {
|
|
+ int mac_offset;
|
|
+ int data_offset;
|
|
+ };
|
|
+ int encap_level;
|
|
+ __wsum csum;
|
|
+ __u16 csum_start;
|
|
+};
|
|
+
|
|
+struct napi_gro_cb {
|
|
+ void *frag0;
|
|
+ unsigned int frag0_len;
|
|
+ int data_offset;
|
|
+ u16 flush;
|
|
+ u16 flush_id;
|
|
+ u16 count;
|
|
+ u16 gro_remcsum_start;
|
|
+ long unsigned int age;
|
|
+ u16 proto;
|
|
+ u8 same_flow: 1;
|
|
+ u8 encap_mark: 1;
|
|
+ u8 csum_valid: 1;
|
|
+ u8 csum_cnt: 3;
|
|
+ u8 free: 2;
|
|
+ u8 is_ipv6: 1;
|
|
+ u8 is_fou: 1;
|
|
+ u8 is_atomic: 1;
|
|
+ u8 recursion_counter: 4;
|
|
+ __wsum csum;
|
|
+ struct sk_buff *last;
|
|
+};
|
|
+
|
|
+enum skb_free_reason {
|
|
+ SKB_REASON_CONSUMED = 0,
|
|
+ SKB_REASON_DROPPED = 1,
|
|
+};
|
|
+
|
|
+struct vlan_hdr {
|
|
+ __be16 h_vlan_TCI;
|
|
+ __be16 h_vlan_encapsulated_proto;
|
|
+};
|
|
+
|
|
+struct vlan_ethhdr {
|
|
+ unsigned char h_dest[6];
|
|
+ unsigned char h_source[6];
|
|
+ __be16 h_vlan_proto;
|
|
+ __be16 h_vlan_TCI;
|
|
+ __be16 h_vlan_encapsulated_proto;
|
|
+};
|
|
+
|
|
+struct qdisc_walker {
|
|
+ int stop;
|
|
+ int skip;
|
|
+ int count;
|
|
+ int (*fn)(struct Qdisc *, long unsigned int, struct qdisc_walker *);
|
|
+};
|
|
+
|
|
+struct ip_auth_hdr {
|
|
+ __u8 nexthdr;
|
|
+ __u8 hdrlen;
|
|
+ __be16 reserved;
|
|
+ __be32 spi;
|
|
+ __be32 seq_no;
|
|
+ __u8 auth_data[0];
|
|
+};
|
|
+
|
|
+struct frag_hdr {
|
|
+ __u8 nexthdr;
|
|
+ __u8 reserved;
|
|
+ __be16 frag_off;
|
|
+ __be32 identification;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SCM_TSTAMP_SND = 0,
|
|
+ SCM_TSTAMP_SCHED = 1,
|
|
+ SCM_TSTAMP_ACK = 2,
|
|
+};
|
|
+
|
|
+struct napi_alloc_cache {
|
|
+ struct page_frag_cache page;
|
|
+ unsigned int skb_count;
|
|
+ void *skb_cache[64];
|
|
+};
|
|
+
|
|
+struct scm_cookie {
|
|
+ struct pid *pid;
|
|
+ struct scm_fp_list *fp;
|
|
+ struct scm_creds creds;
|
|
+ u32 secid;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_STATS_UNSPEC = 0,
|
|
+ TCA_STATS_BASIC = 1,
|
|
+ TCA_STATS_RATE_EST = 2,
|
|
+ TCA_STATS_QUEUE = 3,
|
|
+ TCA_STATS_APP = 4,
|
|
+ TCA_STATS_RATE_EST64 = 5,
|
|
+ TCA_STATS_PAD = 6,
|
|
+ __TCA_STATS_MAX = 7,
|
|
+};
|
|
+
|
|
+struct gnet_stats_basic {
|
|
+ __u64 bytes;
|
|
+ __u32 packets;
|
|
+};
|
|
+
|
|
+struct gnet_stats_rate_est {
|
|
+ __u32 bps;
|
|
+ __u32 pps;
|
|
+};
|
|
+
|
|
+struct gnet_stats_rate_est64 {
|
|
+ __u64 bps;
|
|
+ __u64 pps;
|
|
+};
|
|
+
|
|
+struct gnet_estimator {
|
|
+ signed char interval;
|
|
+ unsigned char ewma_log;
|
|
+};
|
|
+
|
|
+struct net_rate_estimator {
|
|
+ struct gnet_stats_basic_packed *bstats;
|
|
+ spinlock_t *stats_lock;
|
|
+ seqcount_t *running;
|
|
+ struct gnet_stats_basic_cpu *cpu_bstats;
|
|
+ u8 ewma_log;
|
|
+ u8 intvl_log;
|
|
+ seqcount_t seq;
|
|
+ u32 last_packets;
|
|
+ u64 last_bytes;
|
|
+ u64 avpps;
|
|
+ u64 avbps;
|
|
+ long unsigned int next_jiffies;
|
|
+ struct timer_list timer;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct rtgenmsg {
|
|
+ unsigned char rtgen_family;
|
|
+};
|
|
+
|
|
+enum rtnetlink_groups {
|
|
+ RTNLGRP_NONE = 0,
|
|
+ RTNLGRP_LINK = 1,
|
|
+ RTNLGRP_NOTIFY = 2,
|
|
+ RTNLGRP_NEIGH = 3,
|
|
+ RTNLGRP_TC = 4,
|
|
+ RTNLGRP_IPV4_IFADDR = 5,
|
|
+ RTNLGRP_IPV4_MROUTE = 6,
|
|
+ RTNLGRP_IPV4_ROUTE = 7,
|
|
+ RTNLGRP_IPV4_RULE = 8,
|
|
+ RTNLGRP_IPV6_IFADDR = 9,
|
|
+ RTNLGRP_IPV6_MROUTE = 10,
|
|
+ RTNLGRP_IPV6_ROUTE = 11,
|
|
+ RTNLGRP_IPV6_IFINFO = 12,
|
|
+ RTNLGRP_DECnet_IFADDR = 13,
|
|
+ RTNLGRP_NOP2 = 14,
|
|
+ RTNLGRP_DECnet_ROUTE = 15,
|
|
+ RTNLGRP_DECnet_RULE = 16,
|
|
+ RTNLGRP_NOP4 = 17,
|
|
+ RTNLGRP_IPV6_PREFIX = 18,
|
|
+ RTNLGRP_IPV6_RULE = 19,
|
|
+ RTNLGRP_ND_USEROPT = 20,
|
|
+ RTNLGRP_PHONET_IFADDR = 21,
|
|
+ RTNLGRP_PHONET_ROUTE = 22,
|
|
+ RTNLGRP_DCB = 23,
|
|
+ RTNLGRP_IPV4_NETCONF = 24,
|
|
+ RTNLGRP_IPV6_NETCONF = 25,
|
|
+ RTNLGRP_MDB = 26,
|
|
+ RTNLGRP_MPLS_ROUTE = 27,
|
|
+ RTNLGRP_NSID = 28,
|
|
+ RTNLGRP_MPLS_NETCONF = 29,
|
|
+ RTNLGRP_IPV4_MROUTE_R = 30,
|
|
+ RTNLGRP_IPV6_MROUTE_R = 31,
|
|
+ __RTNLGRP_MAX = 32,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NETNSA_NONE = 0,
|
|
+ NETNSA_NSID = 1,
|
|
+ NETNSA_PID = 2,
|
|
+ NETNSA_FD = 3,
|
|
+ __NETNSA_MAX = 4,
|
|
+};
|
|
+
|
|
+enum rtnl_link_flags {
|
|
+ RTNL_FLAG_DOIT_UNLOCKED = 1,
|
|
+};
|
|
+
|
|
+struct rtnl_net_dump_cb {
|
|
+ struct net *net;
|
|
+ struct sk_buff *skb;
|
|
+ struct netlink_callback *cb;
|
|
+ int idx;
|
|
+ int s_idx;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_control {
|
|
+ u16 thoff;
|
|
+ u16 addr_type;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+enum flow_dissect_ret {
|
|
+ FLOW_DISSECT_RET_OUT_GOOD = 0,
|
|
+ FLOW_DISSECT_RET_OUT_BAD = 1,
|
|
+ FLOW_DISSECT_RET_PROTO_AGAIN = 2,
|
|
+ FLOW_DISSECT_RET_IPPROTO_AGAIN = 3,
|
|
+ FLOW_DISSECT_RET_CONTINUE = 4,
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_basic {
|
|
+ __be16 n_proto;
|
|
+ u8 ip_proto;
|
|
+ u8 padding;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_tags {
|
|
+ u32 flow_label;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_vlan {
|
|
+ u16 vlan_id: 12;
|
|
+ u16 vlan_priority: 3;
|
|
+ __be16 vlan_tpid;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_mpls {
|
|
+ u32 mpls_ttl: 8;
|
|
+ u32 mpls_bos: 1;
|
|
+ u32 mpls_tc: 3;
|
|
+ u32 mpls_label: 20;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_enc_opts {
|
|
+ u8 data[255];
|
|
+ u8 len;
|
|
+ __be16 dst_opt_type;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_keyid {
|
|
+ __be32 keyid;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_ipv4_addrs {
|
|
+ __be32 src;
|
|
+ __be32 dst;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_ipv6_addrs {
|
|
+ struct in6_addr src;
|
|
+ struct in6_addr dst;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_tipc {
|
|
+ __be32 key;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_addrs {
|
|
+ union {
|
|
+ struct flow_dissector_key_ipv4_addrs v4addrs;
|
|
+ struct flow_dissector_key_ipv6_addrs v6addrs;
|
|
+ struct flow_dissector_key_tipc tipckey;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_arp {
|
|
+ __u32 sip;
|
|
+ __u32 tip;
|
|
+ __u8 op;
|
|
+ unsigned char sha[6];
|
|
+ unsigned char tha[6];
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_ports {
|
|
+ union {
|
|
+ __be32 ports;
|
|
+ struct {
|
|
+ __be16 src;
|
|
+ __be16 dst;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_icmp {
|
|
+ union {
|
|
+ __be16 icmp;
|
|
+ struct {
|
|
+ u8 type;
|
|
+ u8 code;
|
|
+ };
|
|
+ };
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_eth_addrs {
|
|
+ unsigned char dst[6];
|
|
+ unsigned char src[6];
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_tcp {
|
|
+ __be16 flags;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key_ip {
|
|
+ __u8 tos;
|
|
+ __u8 ttl;
|
|
+};
|
|
+
|
|
+struct flow_dissector_key {
|
|
+ enum flow_dissector_key_id key_id;
|
|
+ size_t offset;
|
|
+};
|
|
+
|
|
+struct flow_keys_basic {
|
|
+ struct flow_dissector_key_control control;
|
|
+ struct flow_dissector_key_basic basic;
|
|
+};
|
|
+
|
|
+struct flow_keys {
|
|
+ struct flow_dissector_key_control control;
|
|
+ struct flow_dissector_key_basic basic;
|
|
+ struct flow_dissector_key_tags tags;
|
|
+ struct flow_dissector_key_vlan vlan;
|
|
+ struct flow_dissector_key_vlan cvlan;
|
|
+ struct flow_dissector_key_keyid keyid;
|
|
+ struct flow_dissector_key_ports ports;
|
|
+ struct flow_dissector_key_addrs addrs;
|
|
+};
|
|
+
|
|
+struct flow_keys_digest {
|
|
+ u8 data[16];
|
|
+};
|
|
+
|
|
+struct lwtunnel_state {
|
|
+ __u16 type;
|
|
+ __u16 flags;
|
|
+ __u16 headroom;
|
|
+ atomic_t refcnt;
|
|
+ int (*orig_output)(struct net *, struct sock *, struct sk_buff *);
|
|
+ int (*orig_input)(struct sk_buff *);
|
|
+ struct callback_head rcu;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+union tcp_word_hdr {
|
|
+ struct tcphdr hdr;
|
|
+ __be32 words[5];
|
|
+};
|
|
+
|
|
+enum devlink_dpipe_field_mapping_type {
|
|
+ DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0,
|
|
+ DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 1,
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_field {
|
|
+ const char *name;
|
|
+ unsigned int id;
|
|
+ unsigned int bitwidth;
|
|
+ enum devlink_dpipe_field_mapping_type mapping_type;
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_header {
|
|
+ const char *name;
|
|
+ unsigned int id;
|
|
+ struct devlink_dpipe_field *fields;
|
|
+ unsigned int fields_count;
|
|
+ bool global;
|
|
+};
|
|
+
|
|
+struct arphdr {
|
|
+ __be16 ar_hrd;
|
|
+ __be16 ar_pro;
|
|
+ unsigned char ar_hln;
|
|
+ unsigned char ar_pln;
|
|
+ __be16 ar_op;
|
|
+};
|
|
+
|
|
+struct switchdev_trans {
|
|
+ struct list_head item_list;
|
|
+ bool ph_prepare;
|
|
+};
|
|
+
|
|
+enum switchdev_attr_id {
|
|
+ SWITCHDEV_ATTR_ID_UNDEFINED = 0,
|
|
+ SWITCHDEV_ATTR_ID_PORT_PARENT_ID = 1,
|
|
+ SWITCHDEV_ATTR_ID_PORT_STP_STATE = 2,
|
|
+ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS = 3,
|
|
+ SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT = 4,
|
|
+ SWITCHDEV_ATTR_ID_PORT_MROUTER = 5,
|
|
+ SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME = 6,
|
|
+ SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING = 7,
|
|
+ SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED = 8,
|
|
+ SWITCHDEV_ATTR_ID_BRIDGE_MROUTER = 9,
|
|
+};
|
|
+
|
|
+struct switchdev_attr {
|
|
+ struct net_device *orig_dev;
|
|
+ enum switchdev_attr_id id;
|
|
+ u32 flags;
|
|
+ void *complete_priv;
|
|
+ void (*complete)(struct net_device *, int, void *);
|
|
+ union {
|
|
+ struct netdev_phys_item_id ppid;
|
|
+ u8 stp_state;
|
|
+ long unsigned int brport_flags;
|
|
+ long unsigned int brport_flags_support;
|
|
+ bool mrouter;
|
|
+ clock_t ageing_time;
|
|
+ bool vlan_filtering;
|
|
+ bool mc_disabled;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+enum switchdev_obj_id {
|
|
+ SWITCHDEV_OBJ_ID_UNDEFINED = 0,
|
|
+ SWITCHDEV_OBJ_ID_PORT_VLAN = 1,
|
|
+ SWITCHDEV_OBJ_ID_PORT_MDB = 2,
|
|
+ SWITCHDEV_OBJ_ID_HOST_MDB = 3,
|
|
+};
|
|
+
|
|
+struct switchdev_obj {
|
|
+ struct net_device *orig_dev;
|
|
+ enum switchdev_obj_id id;
|
|
+ u32 flags;
|
|
+ void *complete_priv;
|
|
+ void (*complete)(struct net_device *, int, void *);
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+enum lwtunnel_encap_types {
|
|
+ LWTUNNEL_ENCAP_NONE = 0,
|
|
+ LWTUNNEL_ENCAP_MPLS = 1,
|
|
+ LWTUNNEL_ENCAP_IP = 2,
|
|
+ LWTUNNEL_ENCAP_ILA = 3,
|
|
+ LWTUNNEL_ENCAP_IP6 = 4,
|
|
+ LWTUNNEL_ENCAP_SEG6 = 5,
|
|
+ LWTUNNEL_ENCAP_BPF = 6,
|
|
+ LWTUNNEL_ENCAP_SEG6_LOCAL = 7,
|
|
+ __LWTUNNEL_ENCAP_MAX = 8,
|
|
+};
|
|
+
|
|
+struct dst_cache_pcpu;
|
|
+
|
|
+struct dst_cache {
|
|
+ struct dst_cache_pcpu *cache;
|
|
+ long unsigned int reset_ts;
|
|
+};
|
|
+
|
|
+struct ipv6_stub {
|
|
+ int (*ipv6_sock_mc_join)(struct sock *, int, const struct in6_addr *);
|
|
+ int (*ipv6_sock_mc_drop)(struct sock *, int, const struct in6_addr *);
|
|
+ struct dst_entry * (*ipv6_dst_lookup_flow)(struct net *, const struct sock *, struct flowi6 *, const struct in6_addr *);
|
|
+ struct fib6_table * (*fib6_get_table)(struct net *, u32);
|
|
+ struct fib6_info * (*fib6_lookup)(struct net *, int, struct flowi6 *, int);
|
|
+ struct fib6_info * (*fib6_table_lookup)(struct net *, struct fib6_table *, int, struct flowi6 *, int);
|
|
+ struct fib6_info * (*fib6_multipath_select)(const struct net *, struct fib6_info *, struct flowi6 *, int, const struct sk_buff *, int);
|
|
+ u32 (*ip6_mtu_from_fib6)(struct fib6_info *, struct in6_addr *, struct in6_addr *);
|
|
+ void (*udpv6_encap_enable)();
|
|
+ void (*ndisc_send_na)(struct net_device *, const struct in6_addr *, const struct in6_addr *, bool, bool, bool, bool);
|
|
+ struct neigh_table *nd_tbl;
|
|
+};
|
|
+
|
|
+struct ipv6_bpf_stub {
|
|
+ int (*inet6_bind)(struct sock *, struct sockaddr *, int, bool, bool);
|
|
+};
|
|
+
|
|
+struct ip_tunnel_key {
|
|
+ __be64 tun_id;
|
|
+ union {
|
|
+ struct {
|
|
+ __be32 src;
|
|
+ __be32 dst;
|
|
+ } ipv4;
|
|
+ struct {
|
|
+ struct in6_addr src;
|
|
+ struct in6_addr dst;
|
|
+ } ipv6;
|
|
+ } u;
|
|
+ __be16 tun_flags;
|
|
+ u8 tos;
|
|
+ u8 ttl;
|
|
+ __be32 label;
|
|
+ __be16 tp_src;
|
|
+ __be16 tp_dst;
|
|
+};
|
|
+
|
|
+struct ip_tunnel_info {
|
|
+ struct ip_tunnel_key key;
|
|
+ struct dst_cache dst_cache;
|
|
+ u8 options_len;
|
|
+ u8 mode;
|
|
+};
|
|
+
|
|
+struct ip_tunnel_encap {
|
|
+ u16 type;
|
|
+ u16 flags;
|
|
+ __be16 sport;
|
|
+ __be16 dport;
|
|
+};
|
|
+
|
|
+struct ip_tunnel_encap_ops {
|
|
+ size_t (*encap_hlen)(struct ip_tunnel_encap *);
|
|
+ int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi4 *);
|
|
+};
|
|
+
|
|
+enum metadata_type {
|
|
+ METADATA_IP_TUNNEL = 0,
|
|
+ METADATA_HW_PORT_MUX = 1,
|
|
+};
|
|
+
|
|
+struct hw_port_info {
|
|
+ struct net_device *lower_dev;
|
|
+ u32 port_id;
|
|
+};
|
|
+
|
|
+struct metadata_dst {
|
|
+ struct dst_entry dst;
|
|
+ enum metadata_type type;
|
|
+ union {
|
|
+ struct ip_tunnel_info tun_info;
|
|
+ struct hw_port_info port_info;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct gre_base_hdr {
|
|
+ __be16 flags;
|
|
+ __be16 protocol;
|
|
+};
|
|
+
|
|
+struct gre_full_hdr {
|
|
+ struct gre_base_hdr fixed_header;
|
|
+ __be16 csum;
|
|
+ __be16 reserved1;
|
|
+ __be32 key;
|
|
+ __be32 seq;
|
|
+};
|
|
+
|
|
+struct pptp_gre_header {
|
|
+ struct gre_base_hdr gre_hd;
|
|
+ __be16 payload_len;
|
|
+ __be16 call_id;
|
|
+ __be32 seq;
|
|
+ __be32 ack;
|
|
+};
|
|
+
|
|
+struct tipc_basic_hdr {
|
|
+ __be32 w[4];
|
|
+};
|
|
+
|
|
+enum l2tp_debug_flags {
|
|
+ L2TP_MSG_DEBUG = 1,
|
|
+ L2TP_MSG_CONTROL = 2,
|
|
+ L2TP_MSG_SEQ = 4,
|
|
+ L2TP_MSG_DATA = 8,
|
|
+};
|
|
+
|
|
+struct pppoe_tag {
|
|
+ __be16 tag_type;
|
|
+ __be16 tag_len;
|
|
+ char tag_data[0];
|
|
+};
|
|
+
|
|
+struct pppoe_hdr {
|
|
+ __u8 type: 4;
|
|
+ __u8 ver: 4;
|
|
+ __u8 code;
|
|
+ __be16 sid;
|
|
+ __be16 length;
|
|
+ struct pppoe_tag tag[0];
|
|
+};
|
|
+
|
|
+struct mpls_label {
|
|
+ __be32 entry;
|
|
+};
|
|
+
|
|
+enum batadv_packettype {
|
|
+ BATADV_IV_OGM = 0,
|
|
+ BATADV_BCAST = 1,
|
|
+ BATADV_CODED = 2,
|
|
+ BATADV_ELP = 3,
|
|
+ BATADV_OGM2 = 4,
|
|
+ BATADV_UNICAST = 64,
|
|
+ BATADV_UNICAST_FRAG = 65,
|
|
+ BATADV_UNICAST_4ADDR = 66,
|
|
+ BATADV_ICMP = 67,
|
|
+ BATADV_UNICAST_TVLV = 68,
|
|
+};
|
|
+
|
|
+struct batadv_unicast_packet {
|
|
+ __u8 packet_type;
|
|
+ __u8 version;
|
|
+ __u8 ttl;
|
|
+ __u8 ttvn;
|
|
+ __u8 dest[6];
|
|
+};
|
|
+
|
|
+struct _flow_keys_digest_data {
|
|
+ __be16 n_proto;
|
|
+ u8 ip_proto;
|
|
+ u8 padding;
|
|
+ __be32 ports;
|
|
+ __be32 src;
|
|
+ __be32 dst;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IF_OPER_UNKNOWN = 0,
|
|
+ IF_OPER_NOTPRESENT = 1,
|
|
+ IF_OPER_DOWN = 2,
|
|
+ IF_OPER_LOWERLAYERDOWN = 3,
|
|
+ IF_OPER_TESTING = 4,
|
|
+ IF_OPER_DORMANT = 5,
|
|
+ IF_OPER_UP = 6,
|
|
+};
|
|
+
|
|
+struct ipv4_devconf {
|
|
+ void *sysctl;
|
|
+ int data[32];
|
|
+ long unsigned int state[1];
|
|
+};
|
|
+
|
|
+enum nf_dev_hooks {
|
|
+ NF_NETDEV_INGRESS = 0,
|
|
+ NF_NETDEV_NUMHOOKS = 1,
|
|
+};
|
|
+
|
|
+struct ifbond {
|
|
+ __s32 bond_mode;
|
|
+ __s32 num_slaves;
|
|
+ __s32 miimon;
|
|
+};
|
|
+
|
|
+typedef struct ifbond ifbond;
|
|
+
|
|
+struct ifslave {
|
|
+ __s32 slave_id;
|
|
+ char slave_name[16];
|
|
+ __s8 link;
|
|
+ __s8 state;
|
|
+ __u32 link_failure_count;
|
|
+};
|
|
+
|
|
+typedef struct ifslave ifslave;
|
|
+
|
|
+enum netdev_state_t {
|
|
+ __LINK_STATE_START = 0,
|
|
+ __LINK_STATE_PRESENT = 1,
|
|
+ __LINK_STATE_NOCARRIER = 2,
|
|
+ __LINK_STATE_LINKWATCH_PENDING = 3,
|
|
+ __LINK_STATE_DORMANT = 4,
|
|
+};
|
|
+
|
|
+struct netdev_boot_setup {
|
|
+ char name[16];
|
|
+ struct ifmap map;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NAPIF_STATE_SCHED = 1,
|
|
+ NAPIF_STATE_MISSED = 2,
|
|
+ NAPIF_STATE_DISABLE = 4,
|
|
+ NAPIF_STATE_NPSVC = 8,
|
|
+ NAPIF_STATE_HASHED = 16,
|
|
+ NAPIF_STATE_NO_BUSY_POLL = 32,
|
|
+ NAPIF_STATE_IN_BUSY_POLL = 64,
|
|
+};
|
|
+
|
|
+enum gro_result {
|
|
+ GRO_MERGED = 0,
|
|
+ GRO_MERGED_FREE = 1,
|
|
+ GRO_HELD = 2,
|
|
+ GRO_NORMAL = 3,
|
|
+ GRO_DROP = 4,
|
|
+ GRO_CONSUMED = 5,
|
|
+};
|
|
+
|
|
+typedef enum gro_result gro_result_t;
|
|
+
|
|
+enum netdev_queue_state_t {
|
|
+ __QUEUE_STATE_DRV_XOFF = 0,
|
|
+ __QUEUE_STATE_STACK_XOFF = 1,
|
|
+ __QUEUE_STATE_FROZEN = 2,
|
|
+};
|
|
+
|
|
+struct netpoll;
|
|
+
|
|
+struct netpoll_info {
|
|
+ refcount_t refcnt;
|
|
+ struct semaphore dev_lock;
|
|
+ struct sk_buff_head txq;
|
|
+ struct delayed_work tx_work;
|
|
+ struct netpoll *netpoll;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct udp_tunnel_info {
|
|
+ short unsigned int type;
|
|
+ sa_family_t sa_family;
|
|
+ __be16 port;
|
|
+};
|
|
+
|
|
+struct in_ifaddr;
|
|
+
|
|
+struct ip_mc_list;
|
|
+
|
|
+struct in_device {
|
|
+ struct net_device *dev;
|
|
+ refcount_t refcnt;
|
|
+ int dead;
|
|
+ struct in_ifaddr *ifa_list;
|
|
+ struct ip_mc_list *mc_list;
|
|
+ struct ip_mc_list **mc_hash;
|
|
+ int mc_count;
|
|
+ spinlock_t mc_tomb_lock;
|
|
+ struct ip_mc_list *mc_tomb;
|
|
+ long unsigned int mr_v1_seen;
|
|
+ long unsigned int mr_v2_seen;
|
|
+ long unsigned int mr_maxdelay;
|
|
+ long unsigned int mr_qi;
|
|
+ long unsigned int mr_qri;
|
|
+ unsigned char mr_qrv;
|
|
+ unsigned char mr_gq_running;
|
|
+ unsigned char mr_ifc_count;
|
|
+ struct timer_list mr_gq_timer;
|
|
+ struct timer_list mr_ifc_timer;
|
|
+ struct neigh_parms *arp_parms;
|
|
+ struct ipv4_devconf cnf;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+struct packet_type {
|
|
+ __be16 type;
|
|
+ struct net_device *dev;
|
|
+ int (*func)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
|
|
+ void (*list_func)(struct list_head *, struct packet_type *, struct net_device *);
|
|
+ bool (*id_match)(struct packet_type *, struct sock *);
|
|
+ void *af_packet_priv;
|
|
+ struct list_head list;
|
|
+ long unsigned int kabi_reserved1;
|
|
+ long unsigned int kabi_reserved2;
|
|
+ long unsigned int kabi_reserved3;
|
|
+ long unsigned int kabi_reserved4;
|
|
+};
|
|
+
|
|
+struct packet_offload {
|
|
+ __be16 type;
|
|
+ u16 priority;
|
|
+ struct offload_callbacks callbacks;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct netdev_notifier_info_ext {
|
|
+ struct netdev_notifier_info info;
|
|
+ union {
|
|
+ u32 mtu;
|
|
+ } ext;
|
|
+};
|
|
+
|
|
+struct netdev_notifier_change_info {
|
|
+ struct netdev_notifier_info info;
|
|
+ unsigned int flags_changed;
|
|
+};
|
|
+
|
|
+struct netdev_notifier_changeupper_info {
|
|
+ struct netdev_notifier_info info;
|
|
+ struct net_device *upper_dev;
|
|
+ bool master;
|
|
+ bool linking;
|
|
+ void *upper_info;
|
|
+};
|
|
+
|
|
+struct netdev_notifier_changelowerstate_info {
|
|
+ struct netdev_notifier_info info;
|
|
+ void *lower_state_info;
|
|
+};
|
|
+
|
|
+typedef int (*bpf_op_t)(struct net_device *, struct netdev_bpf *);
|
|
+
|
|
+struct netdev_bonding_info {
|
|
+ ifslave slave;
|
|
+ ifbond master;
|
|
+};
|
|
+
|
|
+struct netdev_notifier_bonding_info {
|
|
+ struct netdev_notifier_info info;
|
|
+ struct netdev_bonding_info bonding_info;
|
|
+};
|
|
+
|
|
+union inet_addr {
|
|
+ __u32 all[4];
|
|
+ __be32 ip;
|
|
+ __be32 ip6[4];
|
|
+ struct in_addr in;
|
|
+ struct in6_addr in6;
|
|
+};
|
|
+
|
|
+struct netpoll {
|
|
+ struct net_device *dev;
|
|
+ char dev_name[16];
|
|
+ const char *name;
|
|
+ union inet_addr local_ip;
|
|
+ union inet_addr remote_ip;
|
|
+ bool ipv6;
|
|
+ u16 local_port;
|
|
+ u16 remote_port;
|
|
+ u8 remote_mac[6];
|
|
+ struct work_struct cleanup_work;
|
|
+};
|
|
+
|
|
+enum qdisc_state_t {
|
|
+ __QDISC_STATE_SCHED = 0,
|
|
+ __QDISC_STATE_DEACTIVATED = 1,
|
|
+};
|
|
+
|
|
+struct tcf_walker {
|
|
+ int stop;
|
|
+ int skip;
|
|
+ int count;
|
|
+ long unsigned int cookie;
|
|
+ int (*fn)(struct tcf_proto *, void *, struct tcf_walker *);
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IPV4_DEVCONF_FORWARDING = 1,
|
|
+ IPV4_DEVCONF_MC_FORWARDING = 2,
|
|
+ IPV4_DEVCONF_PROXY_ARP = 3,
|
|
+ IPV4_DEVCONF_ACCEPT_REDIRECTS = 4,
|
|
+ IPV4_DEVCONF_SECURE_REDIRECTS = 5,
|
|
+ IPV4_DEVCONF_SEND_REDIRECTS = 6,
|
|
+ IPV4_DEVCONF_SHARED_MEDIA = 7,
|
|
+ IPV4_DEVCONF_RP_FILTER = 8,
|
|
+ IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE = 9,
|
|
+ IPV4_DEVCONF_BOOTP_RELAY = 10,
|
|
+ IPV4_DEVCONF_LOG_MARTIANS = 11,
|
|
+ IPV4_DEVCONF_TAG = 12,
|
|
+ IPV4_DEVCONF_ARPFILTER = 13,
|
|
+ IPV4_DEVCONF_MEDIUM_ID = 14,
|
|
+ IPV4_DEVCONF_NOXFRM = 15,
|
|
+ IPV4_DEVCONF_NOPOLICY = 16,
|
|
+ IPV4_DEVCONF_FORCE_IGMP_VERSION = 17,
|
|
+ IPV4_DEVCONF_ARP_ANNOUNCE = 18,
|
|
+ IPV4_DEVCONF_ARP_IGNORE = 19,
|
|
+ IPV4_DEVCONF_PROMOTE_SECONDARIES = 20,
|
|
+ IPV4_DEVCONF_ARP_ACCEPT = 21,
|
|
+ IPV4_DEVCONF_ARP_NOTIFY = 22,
|
|
+ IPV4_DEVCONF_ACCEPT_LOCAL = 23,
|
|
+ IPV4_DEVCONF_SRC_VMARK = 24,
|
|
+ IPV4_DEVCONF_PROXY_ARP_PVLAN = 25,
|
|
+ IPV4_DEVCONF_ROUTE_LOCALNET = 26,
|
|
+ IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL = 27,
|
|
+ IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL = 28,
|
|
+ IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 29,
|
|
+ IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 30,
|
|
+ IPV4_DEVCONF_DROP_GRATUITOUS_ARP = 31,
|
|
+ IPV4_DEVCONF_BC_FORWARDING = 32,
|
|
+ __IPV4_DEVCONF_MAX = 33,
|
|
+};
|
|
+
|
|
+struct in_ifaddr {
|
|
+ struct hlist_node hash;
|
|
+ struct in_ifaddr *ifa_next;
|
|
+ struct in_device *ifa_dev;
|
|
+ struct callback_head callback_head;
|
|
+ __be32 ifa_local;
|
|
+ __be32 ifa_address;
|
|
+ __be32 ifa_mask;
|
|
+ __u32 ifa_rt_priority;
|
|
+ __be32 ifa_broadcast;
|
|
+ unsigned char ifa_scope;
|
|
+ unsigned char ifa_prefixlen;
|
|
+ __u32 ifa_flags;
|
|
+ char ifa_label[16];
|
|
+ __u32 ifa_valid_lft;
|
|
+ __u32 ifa_preferred_lft;
|
|
+ long unsigned int ifa_cstamp;
|
|
+ long unsigned int ifa_tstamp;
|
|
+};
|
|
+
|
|
+struct dev_kfree_skb_cb {
|
|
+ enum skb_free_reason reason;
|
|
+};
|
|
+
|
|
+struct netdev_adjacent {
|
|
+ struct net_device *dev;
|
|
+ bool master;
|
|
+ u16 ref_nr;
|
|
+ void *private;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+typedef struct sk_buff *pto_T_____30;
|
|
+
|
|
+typedef __u32 pao_T_____8;
|
|
+
|
|
+typedef u16 pao_T_____9;
|
|
+
|
|
+struct ethtool_value {
|
|
+ __u32 cmd;
|
|
+ __u32 data;
|
|
+};
|
|
+
|
|
+enum tunable_id {
|
|
+ ETHTOOL_ID_UNSPEC = 0,
|
|
+ ETHTOOL_RX_COPYBREAK = 1,
|
|
+ ETHTOOL_TX_COPYBREAK = 2,
|
|
+ ETHTOOL_PFC_PREVENTION_TOUT = 3,
|
|
+ __ETHTOOL_TUNABLE_COUNT = 4,
|
|
+};
|
|
+
|
|
+enum tunable_type_id {
|
|
+ ETHTOOL_TUNABLE_UNSPEC = 0,
|
|
+ ETHTOOL_TUNABLE_U8 = 1,
|
|
+ ETHTOOL_TUNABLE_U16 = 2,
|
|
+ ETHTOOL_TUNABLE_U32 = 3,
|
|
+ ETHTOOL_TUNABLE_U64 = 4,
|
|
+ ETHTOOL_TUNABLE_STRING = 5,
|
|
+ ETHTOOL_TUNABLE_S8 = 6,
|
|
+ ETHTOOL_TUNABLE_S16 = 7,
|
|
+ ETHTOOL_TUNABLE_S32 = 8,
|
|
+ ETHTOOL_TUNABLE_S64 = 9,
|
|
+};
|
|
+
|
|
+enum phy_tunable_id {
|
|
+ ETHTOOL_PHY_ID_UNSPEC = 0,
|
|
+ ETHTOOL_PHY_DOWNSHIFT = 1,
|
|
+ __ETHTOOL_PHY_TUNABLE_COUNT = 2,
|
|
+};
|
|
+
|
|
+enum ethtool_stringset {
|
|
+ ETH_SS_TEST = 0,
|
|
+ ETH_SS_STATS = 1,
|
|
+ ETH_SS_PRIV_FLAGS = 2,
|
|
+ ETH_SS_NTUPLE_FILTERS = 3,
|
|
+ ETH_SS_FEATURES = 4,
|
|
+ ETH_SS_RSS_HASH_FUNCS = 5,
|
|
+ ETH_SS_TUNABLES = 6,
|
|
+ ETH_SS_PHY_STATS = 7,
|
|
+ ETH_SS_PHY_TUNABLES = 8,
|
|
+};
|
|
+
|
|
+struct ethtool_gstrings {
|
|
+ __u32 cmd;
|
|
+ __u32 string_set;
|
|
+ __u32 len;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct ethtool_sset_info {
|
|
+ __u32 cmd;
|
|
+ __u32 reserved;
|
|
+ __u64 sset_mask;
|
|
+ __u32 data[0];
|
|
+};
|
|
+
|
|
+struct ethtool_perm_addr {
|
|
+ __u32 cmd;
|
|
+ __u32 size;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+enum ethtool_flags {
|
|
+ ETH_FLAG_TXVLAN = 128,
|
|
+ ETH_FLAG_RXVLAN = 256,
|
|
+ ETH_FLAG_LRO = 32768,
|
|
+ ETH_FLAG_NTUPLE = 134217728,
|
|
+ ETH_FLAG_RXHASH = 268435456,
|
|
+};
|
|
+
|
|
+struct ethtool_rxfh {
|
|
+ __u32 cmd;
|
|
+ __u32 rss_context;
|
|
+ __u32 indir_size;
|
|
+ __u32 key_size;
|
|
+ __u8 hfunc;
|
|
+ __u8 rsvd8[3];
|
|
+ __u32 rsvd32;
|
|
+ __u32 rss_config[0];
|
|
+};
|
|
+
|
|
+struct ethtool_get_features_block {
|
|
+ __u32 available;
|
|
+ __u32 requested;
|
|
+ __u32 active;
|
|
+ __u32 never_changed;
|
|
+};
|
|
+
|
|
+struct ethtool_gfeatures {
|
|
+ __u32 cmd;
|
|
+ __u32 size;
|
|
+ struct ethtool_get_features_block features[0];
|
|
+};
|
|
+
|
|
+struct ethtool_set_features_block {
|
|
+ __u32 valid;
|
|
+ __u32 requested;
|
|
+};
|
|
+
|
|
+struct ethtool_sfeatures {
|
|
+ __u32 cmd;
|
|
+ __u32 size;
|
|
+ struct ethtool_set_features_block features[0];
|
|
+};
|
|
+
|
|
+enum ethtool_sfeatures_retval_bits {
|
|
+ ETHTOOL_F_UNSUPPORTED__BIT = 0,
|
|
+ ETHTOOL_F_WISH__BIT = 1,
|
|
+ ETHTOOL_F_COMPAT__BIT = 2,
|
|
+};
|
|
+
|
|
+struct ethtool_per_queue_op {
|
|
+ __u32 cmd;
|
|
+ __u32 sub_command;
|
|
+ __u32 queue_mask[128];
|
|
+ char data[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ ETH_RSS_HASH_TOP_BIT = 0,
|
|
+ ETH_RSS_HASH_XOR_BIT = 1,
|
|
+ ETH_RSS_HASH_CRC32_BIT = 2,
|
|
+ ETH_RSS_HASH_FUNCS_COUNT = 3,
|
|
+};
|
|
+
|
|
+struct ethtool_link_usettings {
|
|
+ struct ethtool_link_settings base;
|
|
+ struct {
|
|
+ __u32 supported[2];
|
|
+ __u32 advertising[2];
|
|
+ __u32 lp_advertising[2];
|
|
+ } link_modes;
|
|
+};
|
|
+
|
|
+struct netdev_hw_addr {
|
|
+ struct list_head list;
|
|
+ unsigned char addr[32];
|
|
+ unsigned char type;
|
|
+ bool global_use;
|
|
+ int sync_cnt;
|
|
+ int refcount;
|
|
+ int synced;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NDA_UNSPEC = 0,
|
|
+ NDA_DST = 1,
|
|
+ NDA_LLADDR = 2,
|
|
+ NDA_CACHEINFO = 3,
|
|
+ NDA_PROBES = 4,
|
|
+ NDA_VLAN = 5,
|
|
+ NDA_PORT = 6,
|
|
+ NDA_VNI = 7,
|
|
+ NDA_IFINDEX = 8,
|
|
+ NDA_MASTER = 9,
|
|
+ NDA_LINK_NETNSID = 10,
|
|
+ NDA_SRC_VNI = 11,
|
|
+ __NDA_MAX = 12,
|
|
+};
|
|
+
|
|
+struct nda_cacheinfo {
|
|
+ __u32 ndm_confirmed;
|
|
+ __u32 ndm_used;
|
|
+ __u32 ndm_updated;
|
|
+ __u32 ndm_refcnt;
|
|
+};
|
|
+
|
|
+struct ndt_stats {
|
|
+ __u64 ndts_allocs;
|
|
+ __u64 ndts_destroys;
|
|
+ __u64 ndts_hash_grows;
|
|
+ __u64 ndts_res_failed;
|
|
+ __u64 ndts_lookups;
|
|
+ __u64 ndts_hits;
|
|
+ __u64 ndts_rcv_probes_mcast;
|
|
+ __u64 ndts_rcv_probes_ucast;
|
|
+ __u64 ndts_periodic_gc_runs;
|
|
+ __u64 ndts_forced_gc_runs;
|
|
+ __u64 ndts_table_fulls;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NDTPA_UNSPEC = 0,
|
|
+ NDTPA_IFINDEX = 1,
|
|
+ NDTPA_REFCNT = 2,
|
|
+ NDTPA_REACHABLE_TIME = 3,
|
|
+ NDTPA_BASE_REACHABLE_TIME = 4,
|
|
+ NDTPA_RETRANS_TIME = 5,
|
|
+ NDTPA_GC_STALETIME = 6,
|
|
+ NDTPA_DELAY_PROBE_TIME = 7,
|
|
+ NDTPA_QUEUE_LEN = 8,
|
|
+ NDTPA_APP_PROBES = 9,
|
|
+ NDTPA_UCAST_PROBES = 10,
|
|
+ NDTPA_MCAST_PROBES = 11,
|
|
+ NDTPA_ANYCAST_DELAY = 12,
|
|
+ NDTPA_PROXY_DELAY = 13,
|
|
+ NDTPA_PROXY_QLEN = 14,
|
|
+ NDTPA_LOCKTIME = 15,
|
|
+ NDTPA_QUEUE_LENBYTES = 16,
|
|
+ NDTPA_MCAST_REPROBES = 17,
|
|
+ NDTPA_PAD = 18,
|
|
+ __NDTPA_MAX = 19,
|
|
+};
|
|
+
|
|
+struct ndtmsg {
|
|
+ __u8 ndtm_family;
|
|
+ __u8 ndtm_pad1;
|
|
+ __u16 ndtm_pad2;
|
|
+};
|
|
+
|
|
+struct ndt_config {
|
|
+ __u16 ndtc_key_len;
|
|
+ __u16 ndtc_entry_size;
|
|
+ __u32 ndtc_entries;
|
|
+ __u32 ndtc_last_flush;
|
|
+ __u32 ndtc_last_rand;
|
|
+ __u32 ndtc_hash_rnd;
|
|
+ __u32 ndtc_hash_mask;
|
|
+ __u32 ndtc_hash_chain_gc;
|
|
+ __u32 ndtc_proxy_qlen;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NDTA_UNSPEC = 0,
|
|
+ NDTA_NAME = 1,
|
|
+ NDTA_THRESH1 = 2,
|
|
+ NDTA_THRESH2 = 3,
|
|
+ NDTA_THRESH3 = 4,
|
|
+ NDTA_CONFIG = 5,
|
|
+ NDTA_PARMS = 6,
|
|
+ NDTA_STATS = 7,
|
|
+ NDTA_GC_INTERVAL = 8,
|
|
+ NDTA_PAD = 9,
|
|
+ __NDTA_MAX = 10,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ RTN_UNSPEC = 0,
|
|
+ RTN_UNICAST = 1,
|
|
+ RTN_LOCAL = 2,
|
|
+ RTN_BROADCAST = 3,
|
|
+ RTN_ANYCAST = 4,
|
|
+ RTN_MULTICAST = 5,
|
|
+ RTN_BLACKHOLE = 6,
|
|
+ RTN_UNREACHABLE = 7,
|
|
+ RTN_PROHIBIT = 8,
|
|
+ RTN_THROW = 9,
|
|
+ RTN_NAT = 10,
|
|
+ RTN_XRESOLVE = 11,
|
|
+ __RTN_MAX = 12,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NEIGH_ARP_TABLE = 0,
|
|
+ NEIGH_ND_TABLE = 1,
|
|
+ NEIGH_DN_TABLE = 2,
|
|
+ NEIGH_NR_TABLES = 3,
|
|
+ NEIGH_LINK_TABLE = 3,
|
|
+};
|
|
+
|
|
+struct neigh_seq_state {
|
|
+ struct seq_net_private p;
|
|
+ struct neigh_table *tbl;
|
|
+ struct neigh_hash_table *nht;
|
|
+ void * (*neigh_sub_iter)(struct neigh_seq_state *, struct neighbour *, loff_t *);
|
|
+ unsigned int bucket;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct neighbour_cb {
|
|
+ long unsigned int sched_next;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+enum netevent_notif_type {
|
|
+ NETEVENT_NEIGH_UPDATE = 1,
|
|
+ NETEVENT_REDIRECT = 2,
|
|
+ NETEVENT_DELAY_PROBE_TIME_UPDATE = 3,
|
|
+ NETEVENT_IPV4_MPATH_HASH_UPDATE = 4,
|
|
+ NETEVENT_IPV6_MPATH_HASH_UPDATE = 5,
|
|
+ NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE = 6,
|
|
+};
|
|
+
|
|
+struct neigh_sysctl_table {
|
|
+ struct ctl_table_header *sysctl_header;
|
|
+ struct ctl_table neigh_vars[21];
|
|
+};
|
|
+
|
|
+struct netlink_dump_control {
|
|
+ int (*start)(struct netlink_callback *);
|
|
+ int (*dump)(struct sk_buff *, struct netlink_callback *);
|
|
+ int (*done)(struct netlink_callback *);
|
|
+ void *data;
|
|
+ struct module *module;
|
|
+ u16 min_dump_alloc;
|
|
+};
|
|
+
|
|
+struct rtnl_link_stats {
|
|
+ __u32 rx_packets;
|
|
+ __u32 tx_packets;
|
|
+ __u32 rx_bytes;
|
|
+ __u32 tx_bytes;
|
|
+ __u32 rx_errors;
|
|
+ __u32 tx_errors;
|
|
+ __u32 rx_dropped;
|
|
+ __u32 tx_dropped;
|
|
+ __u32 multicast;
|
|
+ __u32 collisions;
|
|
+ __u32 rx_length_errors;
|
|
+ __u32 rx_over_errors;
|
|
+ __u32 rx_crc_errors;
|
|
+ __u32 rx_frame_errors;
|
|
+ __u32 rx_fifo_errors;
|
|
+ __u32 rx_missed_errors;
|
|
+ __u32 tx_aborted_errors;
|
|
+ __u32 tx_carrier_errors;
|
|
+ __u32 tx_fifo_errors;
|
|
+ __u32 tx_heartbeat_errors;
|
|
+ __u32 tx_window_errors;
|
|
+ __u32 rx_compressed;
|
|
+ __u32 tx_compressed;
|
|
+ __u32 rx_nohandler;
|
|
+};
|
|
+
|
|
+struct rtnl_link_ifmap {
|
|
+ __u64 mem_start;
|
|
+ __u64 mem_end;
|
|
+ __u64 base_addr;
|
|
+ __u16 irq;
|
|
+ __u8 dma;
|
|
+ __u8 port;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_UNSPEC = 0,
|
|
+ IFLA_ADDRESS = 1,
|
|
+ IFLA_BROADCAST = 2,
|
|
+ IFLA_IFNAME = 3,
|
|
+ IFLA_MTU = 4,
|
|
+ IFLA_LINK = 5,
|
|
+ IFLA_QDISC = 6,
|
|
+ IFLA_STATS = 7,
|
|
+ IFLA_COST = 8,
|
|
+ IFLA_PRIORITY = 9,
|
|
+ IFLA_MASTER = 10,
|
|
+ IFLA_WIRELESS = 11,
|
|
+ IFLA_PROTINFO = 12,
|
|
+ IFLA_TXQLEN = 13,
|
|
+ IFLA_MAP = 14,
|
|
+ IFLA_WEIGHT = 15,
|
|
+ IFLA_OPERSTATE = 16,
|
|
+ IFLA_LINKMODE = 17,
|
|
+ IFLA_LINKINFO = 18,
|
|
+ IFLA_NET_NS_PID = 19,
|
|
+ IFLA_IFALIAS = 20,
|
|
+ IFLA_NUM_VF = 21,
|
|
+ IFLA_VFINFO_LIST = 22,
|
|
+ IFLA_STATS64 = 23,
|
|
+ IFLA_VF_PORTS = 24,
|
|
+ IFLA_PORT_SELF = 25,
|
|
+ IFLA_AF_SPEC = 26,
|
|
+ IFLA_GROUP = 27,
|
|
+ IFLA_NET_NS_FD = 28,
|
|
+ IFLA_EXT_MASK = 29,
|
|
+ IFLA_PROMISCUITY = 30,
|
|
+ IFLA_NUM_TX_QUEUES = 31,
|
|
+ IFLA_NUM_RX_QUEUES = 32,
|
|
+ IFLA_CARRIER = 33,
|
|
+ IFLA_PHYS_PORT_ID = 34,
|
|
+ IFLA_CARRIER_CHANGES = 35,
|
|
+ IFLA_PHYS_SWITCH_ID = 36,
|
|
+ IFLA_LINK_NETNSID = 37,
|
|
+ IFLA_PHYS_PORT_NAME = 38,
|
|
+ IFLA_PROTO_DOWN = 39,
|
|
+ IFLA_GSO_MAX_SEGS = 40,
|
|
+ IFLA_GSO_MAX_SIZE = 41,
|
|
+ IFLA_PAD = 42,
|
|
+ IFLA_XDP = 43,
|
|
+ IFLA_EVENT = 44,
|
|
+ IFLA_NEW_NETNSID = 45,
|
|
+ IFLA_IF_NETNSID = 46,
|
|
+ IFLA_CARRIER_UP_COUNT = 47,
|
|
+ IFLA_CARRIER_DOWN_COUNT = 48,
|
|
+ IFLA_NEW_IFINDEX = 49,
|
|
+ IFLA_MIN_MTU = 50,
|
|
+ IFLA_MAX_MTU = 51,
|
|
+ __IFLA_MAX = 52,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_BRPORT_UNSPEC = 0,
|
|
+ IFLA_BRPORT_STATE = 1,
|
|
+ IFLA_BRPORT_PRIORITY = 2,
|
|
+ IFLA_BRPORT_COST = 3,
|
|
+ IFLA_BRPORT_MODE = 4,
|
|
+ IFLA_BRPORT_GUARD = 5,
|
|
+ IFLA_BRPORT_PROTECT = 6,
|
|
+ IFLA_BRPORT_FAST_LEAVE = 7,
|
|
+ IFLA_BRPORT_LEARNING = 8,
|
|
+ IFLA_BRPORT_UNICAST_FLOOD = 9,
|
|
+ IFLA_BRPORT_PROXYARP = 10,
|
|
+ IFLA_BRPORT_LEARNING_SYNC = 11,
|
|
+ IFLA_BRPORT_PROXYARP_WIFI = 12,
|
|
+ IFLA_BRPORT_ROOT_ID = 13,
|
|
+ IFLA_BRPORT_BRIDGE_ID = 14,
|
|
+ IFLA_BRPORT_DESIGNATED_PORT = 15,
|
|
+ IFLA_BRPORT_DESIGNATED_COST = 16,
|
|
+ IFLA_BRPORT_ID = 17,
|
|
+ IFLA_BRPORT_NO = 18,
|
|
+ IFLA_BRPORT_TOPOLOGY_CHANGE_ACK = 19,
|
|
+ IFLA_BRPORT_CONFIG_PENDING = 20,
|
|
+ IFLA_BRPORT_MESSAGE_AGE_TIMER = 21,
|
|
+ IFLA_BRPORT_FORWARD_DELAY_TIMER = 22,
|
|
+ IFLA_BRPORT_HOLD_TIMER = 23,
|
|
+ IFLA_BRPORT_FLUSH = 24,
|
|
+ IFLA_BRPORT_MULTICAST_ROUTER = 25,
|
|
+ IFLA_BRPORT_PAD = 26,
|
|
+ IFLA_BRPORT_MCAST_FLOOD = 27,
|
|
+ IFLA_BRPORT_MCAST_TO_UCAST = 28,
|
|
+ IFLA_BRPORT_VLAN_TUNNEL = 29,
|
|
+ IFLA_BRPORT_BCAST_FLOOD = 30,
|
|
+ IFLA_BRPORT_GROUP_FWD_MASK = 31,
|
|
+ IFLA_BRPORT_NEIGH_SUPPRESS = 32,
|
|
+ IFLA_BRPORT_ISOLATED = 33,
|
|
+ IFLA_BRPORT_BACKUP_PORT = 34,
|
|
+ __IFLA_BRPORT_MAX = 35,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_INFO_UNSPEC = 0,
|
|
+ IFLA_INFO_KIND = 1,
|
|
+ IFLA_INFO_DATA = 2,
|
|
+ IFLA_INFO_XSTATS = 3,
|
|
+ IFLA_INFO_SLAVE_KIND = 4,
|
|
+ IFLA_INFO_SLAVE_DATA = 5,
|
|
+ __IFLA_INFO_MAX = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_VF_INFO_UNSPEC = 0,
|
|
+ IFLA_VF_INFO = 1,
|
|
+ __IFLA_VF_INFO_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_VF_UNSPEC = 0,
|
|
+ IFLA_VF_MAC = 1,
|
|
+ IFLA_VF_VLAN = 2,
|
|
+ IFLA_VF_TX_RATE = 3,
|
|
+ IFLA_VF_SPOOFCHK = 4,
|
|
+ IFLA_VF_LINK_STATE = 5,
|
|
+ IFLA_VF_RATE = 6,
|
|
+ IFLA_VF_RSS_QUERY_EN = 7,
|
|
+ IFLA_VF_STATS = 8,
|
|
+ IFLA_VF_TRUST = 9,
|
|
+ IFLA_VF_IB_NODE_GUID = 10,
|
|
+ IFLA_VF_IB_PORT_GUID = 11,
|
|
+ IFLA_VF_VLAN_LIST = 12,
|
|
+ __IFLA_VF_MAX = 13,
|
|
+};
|
|
+
|
|
+struct ifla_vf_mac {
|
|
+ __u32 vf;
|
|
+ __u8 mac[32];
|
|
+};
|
|
+
|
|
+struct ifla_vf_vlan {
|
|
+ __u32 vf;
|
|
+ __u32 vlan;
|
|
+ __u32 qos;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_VF_VLAN_INFO_UNSPEC = 0,
|
|
+ IFLA_VF_VLAN_INFO = 1,
|
|
+ __IFLA_VF_VLAN_INFO_MAX = 2,
|
|
+};
|
|
+
|
|
+struct ifla_vf_vlan_info {
|
|
+ __u32 vf;
|
|
+ __u32 vlan;
|
|
+ __u32 qos;
|
|
+ __be16 vlan_proto;
|
|
+};
|
|
+
|
|
+struct ifla_vf_tx_rate {
|
|
+ __u32 vf;
|
|
+ __u32 rate;
|
|
+};
|
|
+
|
|
+struct ifla_vf_rate {
|
|
+ __u32 vf;
|
|
+ __u32 min_tx_rate;
|
|
+ __u32 max_tx_rate;
|
|
+};
|
|
+
|
|
+struct ifla_vf_spoofchk {
|
|
+ __u32 vf;
|
|
+ __u32 setting;
|
|
+};
|
|
+
|
|
+struct ifla_vf_guid {
|
|
+ __u32 vf;
|
|
+ __u64 guid;
|
|
+};
|
|
+
|
|
+struct ifla_vf_link_state {
|
|
+ __u32 vf;
|
|
+ __u32 link_state;
|
|
+};
|
|
+
|
|
+struct ifla_vf_rss_query_en {
|
|
+ __u32 vf;
|
|
+ __u32 setting;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_VF_STATS_RX_PACKETS = 0,
|
|
+ IFLA_VF_STATS_TX_PACKETS = 1,
|
|
+ IFLA_VF_STATS_RX_BYTES = 2,
|
|
+ IFLA_VF_STATS_TX_BYTES = 3,
|
|
+ IFLA_VF_STATS_BROADCAST = 4,
|
|
+ IFLA_VF_STATS_MULTICAST = 5,
|
|
+ IFLA_VF_STATS_PAD = 6,
|
|
+ IFLA_VF_STATS_RX_DROPPED = 7,
|
|
+ IFLA_VF_STATS_TX_DROPPED = 8,
|
|
+ __IFLA_VF_STATS_MAX = 9,
|
|
+};
|
|
+
|
|
+struct ifla_vf_trust {
|
|
+ __u32 vf;
|
|
+ __u32 setting;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_VF_PORT_UNSPEC = 0,
|
|
+ IFLA_VF_PORT = 1,
|
|
+ __IFLA_VF_PORT_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_PORT_UNSPEC = 0,
|
|
+ IFLA_PORT_VF = 1,
|
|
+ IFLA_PORT_PROFILE = 2,
|
|
+ IFLA_PORT_VSI_TYPE = 3,
|
|
+ IFLA_PORT_INSTANCE_UUID = 4,
|
|
+ IFLA_PORT_HOST_UUID = 5,
|
|
+ IFLA_PORT_REQUEST = 6,
|
|
+ IFLA_PORT_RESPONSE = 7,
|
|
+ __IFLA_PORT_MAX = 8,
|
|
+};
|
|
+
|
|
+struct if_stats_msg {
|
|
+ __u8 family;
|
|
+ __u8 pad1;
|
|
+ __u16 pad2;
|
|
+ __u32 ifindex;
|
|
+ __u32 filter_mask;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_STATS_UNSPEC = 0,
|
|
+ IFLA_STATS_LINK_64 = 1,
|
|
+ IFLA_STATS_LINK_XSTATS = 2,
|
|
+ IFLA_STATS_LINK_XSTATS_SLAVE = 3,
|
|
+ IFLA_STATS_LINK_OFFLOAD_XSTATS = 4,
|
|
+ IFLA_STATS_AF_SPEC = 5,
|
|
+ __IFLA_STATS_MAX = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_OFFLOAD_XSTATS_UNSPEC = 0,
|
|
+ IFLA_OFFLOAD_XSTATS_CPU_HIT = 1,
|
|
+ __IFLA_OFFLOAD_XSTATS_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ XDP_ATTACHED_NONE = 0,
|
|
+ XDP_ATTACHED_DRV = 1,
|
|
+ XDP_ATTACHED_SKB = 2,
|
|
+ XDP_ATTACHED_HW = 3,
|
|
+ XDP_ATTACHED_MULTI = 4,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_XDP_UNSPEC = 0,
|
|
+ IFLA_XDP_FD = 1,
|
|
+ IFLA_XDP_ATTACHED = 2,
|
|
+ IFLA_XDP_FLAGS = 3,
|
|
+ IFLA_XDP_PROG_ID = 4,
|
|
+ IFLA_XDP_DRV_PROG_ID = 5,
|
|
+ IFLA_XDP_SKB_PROG_ID = 6,
|
|
+ IFLA_XDP_HW_PROG_ID = 7,
|
|
+ __IFLA_XDP_MAX = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_EVENT_NONE = 0,
|
|
+ IFLA_EVENT_REBOOT = 1,
|
|
+ IFLA_EVENT_FEATURES = 2,
|
|
+ IFLA_EVENT_BONDING_FAILOVER = 3,
|
|
+ IFLA_EVENT_NOTIFY_PEERS = 4,
|
|
+ IFLA_EVENT_IGMP_RESEND = 5,
|
|
+ IFLA_EVENT_BONDING_OPTIONS = 6,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_BRIDGE_FLAGS = 0,
|
|
+ IFLA_BRIDGE_MODE = 1,
|
|
+ IFLA_BRIDGE_VLAN_INFO = 2,
|
|
+ IFLA_BRIDGE_VLAN_TUNNEL_INFO = 3,
|
|
+ __IFLA_BRIDGE_MAX = 4,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BR_MCAST_DIR_RX = 0,
|
|
+ BR_MCAST_DIR_TX = 1,
|
|
+ BR_MCAST_DIR_SIZE = 2,
|
|
+};
|
|
+
|
|
+enum rtattr_type_t {
|
|
+ RTA_UNSPEC = 0,
|
|
+ RTA_DST = 1,
|
|
+ RTA_SRC = 2,
|
|
+ RTA_IIF = 3,
|
|
+ RTA_OIF = 4,
|
|
+ RTA_GATEWAY = 5,
|
|
+ RTA_PRIORITY = 6,
|
|
+ RTA_PREFSRC = 7,
|
|
+ RTA_METRICS = 8,
|
|
+ RTA_MULTIPATH = 9,
|
|
+ RTA_PROTOINFO = 10,
|
|
+ RTA_FLOW = 11,
|
|
+ RTA_CACHEINFO = 12,
|
|
+ RTA_SESSION = 13,
|
|
+ RTA_MP_ALGO = 14,
|
|
+ RTA_TABLE = 15,
|
|
+ RTA_MARK = 16,
|
|
+ RTA_MFC_STATS = 17,
|
|
+ RTA_VIA = 18,
|
|
+ RTA_NEWDST = 19,
|
|
+ RTA_PREF = 20,
|
|
+ RTA_ENCAP_TYPE = 21,
|
|
+ RTA_ENCAP = 22,
|
|
+ RTA_EXPIRES = 23,
|
|
+ RTA_PAD = 24,
|
|
+ RTA_UID = 25,
|
|
+ RTA_TTL_PROPAGATE = 26,
|
|
+ RTA_IP_PROTO = 27,
|
|
+ RTA_SPORT = 28,
|
|
+ RTA_DPORT = 29,
|
|
+ __RTA_MAX = 30,
|
|
+};
|
|
+
|
|
+struct rta_cacheinfo {
|
|
+ __u32 rta_clntref;
|
|
+ __u32 rta_lastuse;
|
|
+ __s32 rta_expires;
|
|
+ __u32 rta_error;
|
|
+ __u32 rta_used;
|
|
+ __u32 rta_id;
|
|
+ __u32 rta_ts;
|
|
+ __u32 rta_tsage;
|
|
+};
|
|
+
|
|
+struct ifinfomsg {
|
|
+ unsigned char ifi_family;
|
|
+ unsigned char __ifi_pad;
|
|
+ short unsigned int ifi_type;
|
|
+ int ifi_index;
|
|
+ unsigned int ifi_flags;
|
|
+ unsigned int ifi_change;
|
|
+};
|
|
+
|
|
+typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *);
|
|
+
|
|
+typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
|
|
+
|
|
+struct rtnl_af_ops {
|
|
+ struct list_head list;
|
|
+ int family;
|
|
+ int (*fill_link_af)(struct sk_buff *, const struct net_device *, u32);
|
|
+ size_t (*get_link_af_size)(const struct net_device *, u32);
|
|
+ int (*validate_link_af)(const struct net_device *, const struct nlattr *);
|
|
+ int (*set_link_af)(struct net_device *, const struct nlattr *);
|
|
+ int (*fill_stats_af)(struct sk_buff *, const struct net_device *);
|
|
+ size_t (*get_stats_af_size)(const struct net_device *);
|
|
+};
|
|
+
|
|
+struct rtnl_link {
|
|
+ rtnl_doit_func doit;
|
|
+ rtnl_dumpit_func dumpit;
|
|
+ struct module *owner;
|
|
+ unsigned int flags;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IF_LINK_MODE_DEFAULT = 0,
|
|
+ IF_LINK_MODE_DORMANT = 1,
|
|
+};
|
|
+
|
|
+enum lw_bits {
|
|
+ LW_URGENT = 0,
|
|
+};
|
|
+
|
|
+struct seg6_pernet_data {
|
|
+ struct mutex lock;
|
|
+ struct in6_addr *tun_src;
|
|
+};
|
|
+
|
|
+enum bpf_adj_room_mode {
|
|
+ BPF_ADJ_ROOM_NET = 0,
|
|
+};
|
|
+
|
|
+enum bpf_hdr_start_off {
|
|
+ BPF_HDR_START_MAC = 0,
|
|
+ BPF_HDR_START_NET = 1,
|
|
+};
|
|
+
|
|
+struct bpf_tunnel_key {
|
|
+ __u32 tunnel_id;
|
|
+ union {
|
|
+ __u32 remote_ipv4;
|
|
+ __u32 remote_ipv6[4];
|
|
+ };
|
|
+ __u8 tunnel_tos;
|
|
+ __u8 tunnel_ttl;
|
|
+ __u16 tunnel_ext;
|
|
+ __u32 tunnel_label;
|
|
+};
|
|
+
|
|
+struct bpf_xfrm_state {
|
|
+ __u32 reqid;
|
|
+ __u32 spi;
|
|
+ __u16 family;
|
|
+ __u16 ext;
|
|
+ union {
|
|
+ __u32 remote_ipv4;
|
|
+ __u32 remote_ipv6[4];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct bpf_sock {
|
|
+ __u32 bound_dev_if;
|
|
+ __u32 family;
|
|
+ __u32 type;
|
|
+ __u32 protocol;
|
|
+ __u32 mark;
|
|
+ __u32 priority;
|
|
+ __u32 src_ip4;
|
|
+ __u32 src_ip6[4];
|
|
+ __u32 src_port;
|
|
+};
|
|
+
|
|
+struct sk_reuseport_md {
|
|
+ void *data;
|
|
+ void *data_end;
|
|
+ __u32 len;
|
|
+ __u32 eth_protocol;
|
|
+ __u32 ip_protocol;
|
|
+ __u32 bind_inany;
|
|
+ __u32 hash;
|
|
+};
|
|
+
|
|
+struct bpf_sock_addr {
|
|
+ __u32 user_family;
|
|
+ __u32 user_ip4;
|
|
+ __u32 user_ip6[4];
|
|
+ __u32 user_port;
|
|
+ __u32 family;
|
|
+ __u32 type;
|
|
+ __u32 protocol;
|
|
+ __u32 msg_src_ip4;
|
|
+ __u32 msg_src_ip6[4];
|
|
+};
|
|
+
|
|
+struct bpf_sock_ops {
|
|
+ __u32 op;
|
|
+ union {
|
|
+ __u32 args[4];
|
|
+ __u32 reply;
|
|
+ __u32 replylong[4];
|
|
+ };
|
|
+ __u32 family;
|
|
+ __u32 remote_ip4;
|
|
+ __u32 local_ip4;
|
|
+ __u32 remote_ip6[4];
|
|
+ __u32 local_ip6[4];
|
|
+ __u32 remote_port;
|
|
+ __u32 local_port;
|
|
+ __u32 is_fullsock;
|
|
+ __u32 snd_cwnd;
|
|
+ __u32 srtt_us;
|
|
+ __u32 bpf_sock_ops_cb_flags;
|
|
+ __u32 state;
|
|
+ __u32 rtt_min;
|
|
+ __u32 snd_ssthresh;
|
|
+ __u32 rcv_nxt;
|
|
+ __u32 snd_nxt;
|
|
+ __u32 snd_una;
|
|
+ __u32 mss_cache;
|
|
+ __u32 ecn_flags;
|
|
+ __u32 rate_delivered;
|
|
+ __u32 rate_interval_us;
|
|
+ __u32 packets_out;
|
|
+ __u32 retrans_out;
|
|
+ __u32 total_retrans;
|
|
+ __u32 segs_in;
|
|
+ __u32 data_segs_in;
|
|
+ __u32 segs_out;
|
|
+ __u32 data_segs_out;
|
|
+ __u32 lost_out;
|
|
+ __u32 sacked_out;
|
|
+ __u32 sk_txhash;
|
|
+ __u64 bytes_received;
|
|
+ __u64 bytes_acked;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BPF_FIB_LKUP_RET_SUCCESS = 0,
|
|
+ BPF_FIB_LKUP_RET_BLACKHOLE = 1,
|
|
+ BPF_FIB_LKUP_RET_UNREACHABLE = 2,
|
|
+ BPF_FIB_LKUP_RET_PROHIBIT = 3,
|
|
+ BPF_FIB_LKUP_RET_NOT_FWDED = 4,
|
|
+ BPF_FIB_LKUP_RET_FWD_DISABLED = 5,
|
|
+ BPF_FIB_LKUP_RET_UNSUPP_LWT = 6,
|
|
+ BPF_FIB_LKUP_RET_NO_NEIGH = 7,
|
|
+ BPF_FIB_LKUP_RET_FRAG_NEEDED = 8,
|
|
+};
|
|
+
|
|
+struct bpf_fib_lookup {
|
|
+ __u8 family;
|
|
+ __u8 l4_protocol;
|
|
+ __be16 sport;
|
|
+ __be16 dport;
|
|
+ __u16 tot_len;
|
|
+ __u32 ifindex;
|
|
+ union {
|
|
+ __u8 tos;
|
|
+ __be32 flowinfo;
|
|
+ __u32 rt_metric;
|
|
+ };
|
|
+ union {
|
|
+ __be32 ipv4_src;
|
|
+ __u32 ipv6_src[4];
|
|
+ };
|
|
+ union {
|
|
+ __be32 ipv4_dst;
|
|
+ __u32 ipv6_dst[4];
|
|
+ };
|
|
+ __be16 h_vlan_proto;
|
|
+ __be16 h_vlan_TCI;
|
|
+ __u8 smac[6];
|
|
+ __u8 dmac[6];
|
|
+};
|
|
+
|
|
+enum rt_scope_t {
|
|
+ RT_SCOPE_UNIVERSE = 0,
|
|
+ RT_SCOPE_SITE = 200,
|
|
+ RT_SCOPE_LINK = 253,
|
|
+ RT_SCOPE_HOST = 254,
|
|
+ RT_SCOPE_NOWHERE = 255,
|
|
+};
|
|
+
|
|
+enum rt_class_t {
|
|
+ RT_TABLE_UNSPEC = 0,
|
|
+ RT_TABLE_COMPAT = 252,
|
|
+ RT_TABLE_DEFAULT = 253,
|
|
+ RT_TABLE_MAIN = 254,
|
|
+ RT_TABLE_LOCAL = 255,
|
|
+ RT_TABLE_MAX = -1,
|
|
+};
|
|
+
|
|
+struct bpf_skb_data_end {
|
|
+ struct qdisc_skb_cb qdisc_cb;
|
|
+ void *data_meta;
|
|
+ void *data_end;
|
|
+};
|
|
+
|
|
+typedef int (*bpf_aux_classic_check_t)(struct sock_filter *, unsigned int);
|
|
+
|
|
+struct fib_nh_exception {
|
|
+ struct fib_nh_exception *fnhe_next;
|
|
+ int fnhe_genid;
|
|
+ __be32 fnhe_daddr;
|
|
+ u32 fnhe_pmtu;
|
|
+ bool fnhe_mtu_locked;
|
|
+ __be32 fnhe_gw;
|
|
+ long unsigned int fnhe_expires;
|
|
+ struct rtable *fnhe_rth_input;
|
|
+ struct rtable *fnhe_rth_output;
|
|
+ long unsigned int fnhe_stamp;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct fnhe_hash_bucket {
|
|
+ struct fib_nh_exception *chain;
|
|
+};
|
|
+
|
|
+struct fib_info;
|
|
+
|
|
+struct fib_nh {
|
|
+ struct net_device *nh_dev;
|
|
+ struct hlist_node nh_hash;
|
|
+ struct fib_info *nh_parent;
|
|
+ unsigned int nh_flags;
|
|
+ unsigned char nh_scope;
|
|
+ int nh_weight;
|
|
+ atomic_t nh_upper_bound;
|
|
+ __u32 nh_tclassid;
|
|
+ int nh_oif;
|
|
+ __be32 nh_gw;
|
|
+ __be32 nh_saddr;
|
|
+ int nh_saddr_genid;
|
|
+ struct rtable **nh_pcpu_rth_output;
|
|
+ struct rtable *nh_rth_input;
|
|
+ struct fnhe_hash_bucket *nh_exceptions;
|
|
+ struct lwtunnel_state *nh_lwtstate;
|
|
+};
|
|
+
|
|
+struct fib_info {
|
|
+ struct hlist_node fib_hash;
|
|
+ struct hlist_node fib_lhash;
|
|
+ struct net *fib_net;
|
|
+ int fib_treeref;
|
|
+ refcount_t fib_clntref;
|
|
+ unsigned int fib_flags;
|
|
+ unsigned char fib_dead;
|
|
+ unsigned char fib_protocol;
|
|
+ unsigned char fib_scope;
|
|
+ unsigned char fib_type;
|
|
+ __be32 fib_prefsrc;
|
|
+ u32 fib_tb_id;
|
|
+ u32 fib_priority;
|
|
+ struct dst_metrics *fib_metrics;
|
|
+ int fib_nhs;
|
|
+ struct callback_head rcu;
|
|
+ struct fib_nh fib_nh[0];
|
|
+};
|
|
+
|
|
+struct fib_result {
|
|
+ __be32 prefix;
|
|
+ unsigned char prefixlen;
|
|
+ unsigned char nh_sel;
|
|
+ unsigned char type;
|
|
+ unsigned char scope;
|
|
+ u32 tclassid;
|
|
+ struct fib_info *fi;
|
|
+ struct fib_table *table;
|
|
+ struct hlist_head *fa_head;
|
|
+};
|
|
+
|
|
+struct _bpf_dtab_netdev {
|
|
+ struct net_device *dev;
|
|
+};
|
|
+
|
|
+struct ipv6_sr_hdr {
|
|
+ __u8 nexthdr;
|
|
+ __u8 hdrlen;
|
|
+ __u8 type;
|
|
+ __u8 segments_left;
|
|
+ __u8 first_segment;
|
|
+ __u8 flags;
|
|
+ __u16 tag;
|
|
+ struct in6_addr segments[0];
|
|
+};
|
|
+
|
|
+struct seg6_bpf_srh_state {
|
|
+ struct ipv6_sr_hdr *srh;
|
|
+ u16 hdrlen;
|
|
+ bool valid;
|
|
+};
|
|
+
|
|
+struct bpf_scratchpad {
|
|
+ union {
|
|
+ __be32 diff[128];
|
|
+ u8 buff[512];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct sk_reuseport_kern {
|
|
+ struct sk_buff *skb;
|
|
+ struct sock *sk;
|
|
+ struct sock *selected_sk;
|
|
+ void *data_end;
|
|
+ u32 hash;
|
|
+ u32 reuseport_id;
|
|
+ bool bind_inany;
|
|
+};
|
|
+
|
|
+struct bpf_dtab_netdev___2;
|
|
+
|
|
+struct bpf_cpu_map_entry___2;
|
|
+
|
|
+struct sock_diag_req {
|
|
+ __u8 sdiag_family;
|
|
+ __u8 sdiag_protocol;
|
|
+};
|
|
+
|
|
+struct sock_diag_handler {
|
|
+ __u8 family;
|
|
+ int (*dump)(struct sk_buff *, struct nlmsghdr *);
|
|
+ int (*get_info)(struct sk_buff *, struct sock *);
|
|
+ int (*destroy)(struct sk_buff *, struct nlmsghdr *);
|
|
+};
|
|
+
|
|
+struct broadcast_sk {
|
|
+ struct sock *sk;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+typedef int gifconf_func_t(struct net_device *, char *, int, int);
|
|
+
|
|
+struct hwtstamp_config {
|
|
+ int flags;
|
|
+ int tx_type;
|
|
+ int rx_filter;
|
|
+};
|
|
+
|
|
+enum hwtstamp_tx_types {
|
|
+ HWTSTAMP_TX_OFF = 0,
|
|
+ HWTSTAMP_TX_ON = 1,
|
|
+ HWTSTAMP_TX_ONESTEP_SYNC = 2,
|
|
+};
|
|
+
|
|
+enum hwtstamp_rx_filters {
|
|
+ HWTSTAMP_FILTER_NONE = 0,
|
|
+ HWTSTAMP_FILTER_ALL = 1,
|
|
+ HWTSTAMP_FILTER_SOME = 2,
|
|
+ HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 3,
|
|
+ HWTSTAMP_FILTER_PTP_V1_L4_SYNC = 4,
|
|
+ HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ = 5,
|
|
+ HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 6,
|
|
+ HWTSTAMP_FILTER_PTP_V2_L4_SYNC = 7,
|
|
+ HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ = 8,
|
|
+ HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 9,
|
|
+ HWTSTAMP_FILTER_PTP_V2_L2_SYNC = 10,
|
|
+ HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ = 11,
|
|
+ HWTSTAMP_FILTER_PTP_V2_EVENT = 12,
|
|
+ HWTSTAMP_FILTER_PTP_V2_SYNC = 13,
|
|
+ HWTSTAMP_FILTER_PTP_V2_DELAY_REQ = 14,
|
|
+ HWTSTAMP_FILTER_NTP_ALL = 15,
|
|
+};
|
|
+
|
|
+struct tso_t {
|
|
+ int next_frag_idx;
|
|
+ void *data;
|
|
+ size_t size;
|
|
+ u16 ip_id;
|
|
+ bool ipv6;
|
|
+ u32 tcp_seq;
|
|
+};
|
|
+
|
|
+struct fib_notifier_info {
|
|
+ struct net *net;
|
|
+ int family;
|
|
+ struct netlink_ext_ack *extack;
|
|
+};
|
|
+
|
|
+enum fib_event_type {
|
|
+ FIB_EVENT_ENTRY_REPLACE = 0,
|
|
+ FIB_EVENT_ENTRY_APPEND = 1,
|
|
+ FIB_EVENT_ENTRY_ADD = 2,
|
|
+ FIB_EVENT_ENTRY_DEL = 3,
|
|
+ FIB_EVENT_RULE_ADD = 4,
|
|
+ FIB_EVENT_RULE_DEL = 5,
|
|
+ FIB_EVENT_NH_ADD = 6,
|
|
+ FIB_EVENT_NH_DEL = 7,
|
|
+ FIB_EVENT_VIF_ADD = 8,
|
|
+ FIB_EVENT_VIF_DEL = 9,
|
|
+};
|
|
+
|
|
+struct zero_copy_allocator {
|
|
+ void (*free)(struct zero_copy_allocator *, long unsigned int);
|
|
+};
|
|
+
|
|
+struct xdp_attachment_info {
|
|
+ struct bpf_prog *prog;
|
|
+ u32 flags;
|
|
+};
|
|
+
|
|
+struct pp_alloc_cache {
|
|
+ u32 count;
|
|
+ void *cache[128];
|
|
+};
|
|
+
|
|
+struct page_pool_params {
|
|
+ unsigned int flags;
|
|
+ unsigned int order;
|
|
+ unsigned int pool_size;
|
|
+ int nid;
|
|
+ struct device *dev;
|
|
+ enum dma_data_direction dma_dir;
|
|
+};
|
|
+
|
|
+struct page_pool {
|
|
+ struct callback_head rcu;
|
|
+ struct page_pool_params p;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct pp_alloc_cache alloc;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct ptr_ring ring;
|
|
+};
|
|
+
|
|
+struct xdp_mem_allocator {
|
|
+ struct xdp_mem_info mem;
|
|
+ union {
|
|
+ void *allocator;
|
|
+ struct page_pool *page_pool;
|
|
+ struct zero_copy_allocator *zc_alloc;
|
|
+ };
|
|
+ struct rhash_head node;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct rx_queue_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct netdev_rx_queue *, char *);
|
|
+ ssize_t (*store)(struct netdev_rx_queue *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct netdev_queue_attribute {
|
|
+ struct attribute attr;
|
|
+ ssize_t (*show)(struct netdev_queue *, char *);
|
|
+ ssize_t (*store)(struct netdev_queue *, const char *, size_t);
|
|
+};
|
|
+
|
|
+struct inet6_ifaddr {
|
|
+ struct in6_addr addr;
|
|
+ __u32 prefix_len;
|
|
+ __u32 rt_priority;
|
|
+ __u32 valid_lft;
|
|
+ __u32 prefered_lft;
|
|
+ refcount_t refcnt;
|
|
+ spinlock_t lock;
|
|
+ int state;
|
|
+ __u32 flags;
|
|
+ __u8 dad_probes;
|
|
+ __u8 stable_privacy_retry;
|
|
+ __u16 scope;
|
|
+ __u64 dad_nonce;
|
|
+ long unsigned int cstamp;
|
|
+ long unsigned int tstamp;
|
|
+ struct delayed_work dad_work;
|
|
+ struct inet6_dev *idev;
|
|
+ struct fib6_info *rt;
|
|
+ struct hlist_node addr_lst;
|
|
+ struct list_head if_list;
|
|
+ struct list_head tmp_list;
|
|
+ struct inet6_ifaddr *ifpub;
|
|
+ int regen_count;
|
|
+ bool tokenized;
|
|
+ struct callback_head rcu;
|
|
+ struct in6_addr peer_addr;
|
|
+};
|
|
+
|
|
+struct fib_rule_uid_range {
|
|
+ __u32 start;
|
|
+ __u32 end;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FRA_UNSPEC = 0,
|
|
+ FRA_DST = 1,
|
|
+ FRA_SRC = 2,
|
|
+ FRA_IIFNAME = 3,
|
|
+ FRA_GOTO = 4,
|
|
+ FRA_UNUSED2 = 5,
|
|
+ FRA_PRIORITY = 6,
|
|
+ FRA_UNUSED3 = 7,
|
|
+ FRA_UNUSED4 = 8,
|
|
+ FRA_UNUSED5 = 9,
|
|
+ FRA_FWMARK = 10,
|
|
+ FRA_FLOW = 11,
|
|
+ FRA_TUN_ID = 12,
|
|
+ FRA_SUPPRESS_IFGROUP = 13,
|
|
+ FRA_SUPPRESS_PREFIXLEN = 14,
|
|
+ FRA_TABLE = 15,
|
|
+ FRA_FWMASK = 16,
|
|
+ FRA_OIFNAME = 17,
|
|
+ FRA_PAD = 18,
|
|
+ FRA_L3MDEV = 19,
|
|
+ FRA_UID_RANGE = 20,
|
|
+ FRA_PROTOCOL = 21,
|
|
+ FRA_IP_PROTO = 22,
|
|
+ FRA_SPORT_RANGE = 23,
|
|
+ FRA_DPORT_RANGE = 24,
|
|
+ __FRA_MAX = 25,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FR_ACT_UNSPEC = 0,
|
|
+ FR_ACT_TO_TBL = 1,
|
|
+ FR_ACT_GOTO = 2,
|
|
+ FR_ACT_NOP = 3,
|
|
+ FR_ACT_RES3 = 4,
|
|
+ FR_ACT_RES4 = 5,
|
|
+ FR_ACT_BLACKHOLE = 6,
|
|
+ FR_ACT_UNREACHABLE = 7,
|
|
+ FR_ACT_PROHIBIT = 8,
|
|
+ __FR_ACT_MAX = 9,
|
|
+};
|
|
+
|
|
+struct fib_rule_notifier_info {
|
|
+ struct fib_notifier_info info;
|
|
+ struct fib_rule *rule;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_kfree_skb {
|
|
+ struct trace_entry ent;
|
|
+ void *skbaddr;
|
|
+ void *location;
|
|
+ short unsigned int protocol;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_consume_skb {
|
|
+ struct trace_entry ent;
|
|
+ void *skbaddr;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_skb_copy_datagram_iovec {
|
|
+ struct trace_entry ent;
|
|
+ const void *skbaddr;
|
|
+ int len;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_kfree_skb {};
|
|
+
|
|
+struct trace_event_data_offsets_consume_skb {};
|
|
+
|
|
+struct trace_event_data_offsets_skb_copy_datagram_iovec {};
|
|
+
|
|
+struct trace_event_raw_net_dev_start_xmit {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ u16 queue_mapping;
|
|
+ const void *skbaddr;
|
|
+ bool vlan_tagged;
|
|
+ u16 vlan_proto;
|
|
+ u16 vlan_tci;
|
|
+ u16 protocol;
|
|
+ u8 ip_summed;
|
|
+ unsigned int len;
|
|
+ unsigned int data_len;
|
|
+ int network_offset;
|
|
+ bool transport_offset_valid;
|
|
+ int transport_offset;
|
|
+ u8 tx_flags;
|
|
+ u16 gso_size;
|
|
+ u16 gso_segs;
|
|
+ u16 gso_type;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_net_dev_xmit {
|
|
+ struct trace_entry ent;
|
|
+ void *skbaddr;
|
|
+ unsigned int len;
|
|
+ int rc;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_net_dev_template {
|
|
+ struct trace_entry ent;
|
|
+ void *skbaddr;
|
|
+ unsigned int len;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_net_dev_rx_verbose_template {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_name;
|
|
+ unsigned int napi_id;
|
|
+ u16 queue_mapping;
|
|
+ const void *skbaddr;
|
|
+ bool vlan_tagged;
|
|
+ u16 vlan_proto;
|
|
+ u16 vlan_tci;
|
|
+ u16 protocol;
|
|
+ u8 ip_summed;
|
|
+ u32 hash;
|
|
+ bool l4_hash;
|
|
+ unsigned int len;
|
|
+ unsigned int data_len;
|
|
+ unsigned int truesize;
|
|
+ bool mac_header_valid;
|
|
+ int mac_header;
|
|
+ unsigned char nr_frags;
|
|
+ u16 gso_size;
|
|
+ u16 gso_type;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_net_dev_start_xmit {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_net_dev_xmit {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_net_dev_template {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_net_dev_rx_verbose_template {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_napi_poll {
|
|
+ struct trace_entry ent;
|
|
+ struct napi_struct *napi;
|
|
+ u32 __data_loc_dev_name;
|
|
+ int work;
|
|
+ int budget;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_napi_poll {
|
|
+ u32 dev_name;
|
|
+};
|
|
+
|
|
+enum tcp_ca_state {
|
|
+ TCP_CA_Open = 0,
|
|
+ TCP_CA_Disorder = 1,
|
|
+ TCP_CA_CWR = 2,
|
|
+ TCP_CA_Recovery = 3,
|
|
+ TCP_CA_Loss = 4,
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sock_rcvqueue_full {
|
|
+ struct trace_entry ent;
|
|
+ int rmem_alloc;
|
|
+ unsigned int truesize;
|
|
+ int sk_rcvbuf;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_sock_exceed_buf_limit {
|
|
+ struct trace_entry ent;
|
|
+ char name[32];
|
|
+ long int *sysctl_mem;
|
|
+ long int allocated;
|
|
+ int sysctl_rmem;
|
|
+ int rmem_alloc;
|
|
+ int sysctl_wmem;
|
|
+ int wmem_alloc;
|
|
+ int wmem_queued;
|
|
+ int kind;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_inet_sock_set_state {
|
|
+ struct trace_entry ent;
|
|
+ const void *skaddr;
|
|
+ int oldstate;
|
|
+ int newstate;
|
|
+ __u16 sport;
|
|
+ __u16 dport;
|
|
+ __u16 family;
|
|
+ __u8 protocol;
|
|
+ __u8 saddr[4];
|
|
+ __u8 daddr[4];
|
|
+ __u8 saddr_v6[16];
|
|
+ __u8 daddr_v6[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_sock_rcvqueue_full {};
|
|
+
|
|
+struct trace_event_data_offsets_sock_exceed_buf_limit {};
|
|
+
|
|
+struct trace_event_data_offsets_inet_sock_set_state {};
|
|
+
|
|
+struct trace_event_raw_udp_fail_queue_rcv_skb {
|
|
+ struct trace_entry ent;
|
|
+ int rc;
|
|
+ __u16 lport;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_udp_fail_queue_rcv_skb {};
|
|
+
|
|
+struct trace_event_raw_tcp_event_sk_skb {
|
|
+ struct trace_entry ent;
|
|
+ const void *skbaddr;
|
|
+ const void *skaddr;
|
|
+ __u16 sport;
|
|
+ __u16 dport;
|
|
+ __u8 saddr[4];
|
|
+ __u8 daddr[4];
|
|
+ __u8 saddr_v6[16];
|
|
+ __u8 daddr_v6[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_tcp_event_sk {
|
|
+ struct trace_entry ent;
|
|
+ const void *skaddr;
|
|
+ __u16 sport;
|
|
+ __u16 dport;
|
|
+ __u8 saddr[4];
|
|
+ __u8 daddr[4];
|
|
+ __u8 saddr_v6[16];
|
|
+ __u8 daddr_v6[16];
|
|
+ __u64 sock_cookie;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_tcp_retransmit_synack {
|
|
+ struct trace_entry ent;
|
|
+ const void *skaddr;
|
|
+ const void *req;
|
|
+ __u16 sport;
|
|
+ __u16 dport;
|
|
+ __u8 saddr[4];
|
|
+ __u8 daddr[4];
|
|
+ __u8 saddr_v6[16];
|
|
+ __u8 daddr_v6[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_tcp_probe {
|
|
+ struct trace_entry ent;
|
|
+ __u8 saddr[28];
|
|
+ __u8 daddr[28];
|
|
+ __u16 sport;
|
|
+ __u16 dport;
|
|
+ __u32 mark;
|
|
+ __u16 data_len;
|
|
+ __u32 snd_nxt;
|
|
+ __u32 snd_una;
|
|
+ __u32 snd_cwnd;
|
|
+ __u32 ssthresh;
|
|
+ __u32 snd_wnd;
|
|
+ __u32 srtt;
|
|
+ __u32 rcv_wnd;
|
|
+ __u64 sock_cookie;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_tcp_event_sk_skb {};
|
|
+
|
|
+struct trace_event_data_offsets_tcp_event_sk {};
|
|
+
|
|
+struct trace_event_data_offsets_tcp_retransmit_synack {};
|
|
+
|
|
+struct trace_event_data_offsets_tcp_probe {};
|
|
+
|
|
+struct trace_event_raw_fib_table_lookup {
|
|
+ struct trace_entry ent;
|
|
+ u32 tb_id;
|
|
+ int err;
|
|
+ int oif;
|
|
+ int iif;
|
|
+ u8 proto;
|
|
+ __u8 tos;
|
|
+ __u8 scope;
|
|
+ __u8 flags;
|
|
+ __u8 src[4];
|
|
+ __u8 dst[4];
|
|
+ __u8 gw[4];
|
|
+ __u8 saddr[4];
|
|
+ u16 sport;
|
|
+ u16 dport;
|
|
+ u32 __data_loc_name;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_fib_table_lookup {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_qdisc_dequeue {
|
|
+ struct trace_entry ent;
|
|
+ struct Qdisc *qdisc;
|
|
+ const struct netdev_queue *txq;
|
|
+ int packets;
|
|
+ void *skbaddr;
|
|
+ int ifindex;
|
|
+ u32 handle;
|
|
+ u32 parent;
|
|
+ long unsigned int txq_state;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_qdisc_dequeue {};
|
|
+
|
|
+struct br_mcast_stats {
|
|
+ __u64 igmp_v1queries[2];
|
|
+ __u64 igmp_v2queries[2];
|
|
+ __u64 igmp_v3queries[2];
|
|
+ __u64 igmp_leaves[2];
|
|
+ __u64 igmp_v1reports[2];
|
|
+ __u64 igmp_v2reports[2];
|
|
+ __u64 igmp_v3reports[2];
|
|
+ __u64 igmp_parse_errors;
|
|
+ __u64 mld_v1queries[2];
|
|
+ __u64 mld_v2queries[2];
|
|
+ __u64 mld_leaves[2];
|
|
+ __u64 mld_v1reports[2];
|
|
+ __u64 mld_v2reports[2];
|
|
+ __u64 mld_parse_errors;
|
|
+ __u64 mcast_bytes[2];
|
|
+ __u64 mcast_packets[2];
|
|
+};
|
|
+
|
|
+struct br_ip {
|
|
+ union {
|
|
+ __be32 ip4;
|
|
+ struct in6_addr ip6;
|
|
+ } u;
|
|
+ __be16 proto;
|
|
+ __u16 vid;
|
|
+};
|
|
+
|
|
+struct bridge_id {
|
|
+ unsigned char prio[2];
|
|
+ unsigned char addr[6];
|
|
+};
|
|
+
|
|
+typedef struct bridge_id bridge_id;
|
|
+
|
|
+struct mac_addr {
|
|
+ unsigned char addr[6];
|
|
+};
|
|
+
|
|
+typedef struct mac_addr mac_addr;
|
|
+
|
|
+typedef __u16 port_id;
|
|
+
|
|
+struct bridge_mcast_own_query {
|
|
+ struct timer_list timer;
|
|
+ u32 startup_sent;
|
|
+};
|
|
+
|
|
+struct bridge_mcast_other_query {
|
|
+ struct timer_list timer;
|
|
+ long unsigned int delay_time;
|
|
+};
|
|
+
|
|
+struct net_bridge_port;
|
|
+
|
|
+struct bridge_mcast_querier {
|
|
+ struct br_ip addr;
|
|
+ struct net_bridge_port *port;
|
|
+};
|
|
+
|
|
+struct net_bridge;
|
|
+
|
|
+struct net_bridge_vlan_group;
|
|
+
|
|
+struct bridge_mcast_stats;
|
|
+
|
|
+struct net_bridge_port {
|
|
+ struct net_bridge *br;
|
|
+ struct net_device *dev;
|
|
+ struct list_head list;
|
|
+ long unsigned int flags;
|
|
+ struct net_bridge_vlan_group *vlgrp;
|
|
+ struct net_bridge_port *backup_port;
|
|
+ u8 priority;
|
|
+ u8 state;
|
|
+ u16 port_no;
|
|
+ unsigned char topology_change_ack;
|
|
+ unsigned char config_pending;
|
|
+ port_id port_id;
|
|
+ port_id designated_port;
|
|
+ bridge_id designated_root;
|
|
+ bridge_id designated_bridge;
|
|
+ u32 path_cost;
|
|
+ u32 designated_cost;
|
|
+ long unsigned int designated_age;
|
|
+ struct timer_list forward_delay_timer;
|
|
+ struct timer_list hold_timer;
|
|
+ struct timer_list message_age_timer;
|
|
+ struct kobject kobj;
|
|
+ struct callback_head rcu;
|
|
+ struct bridge_mcast_own_query ip4_own_query;
|
|
+ struct bridge_mcast_own_query ip6_own_query;
|
|
+ unsigned char multicast_router;
|
|
+ struct bridge_mcast_stats *mcast_stats;
|
|
+ struct timer_list multicast_router_timer;
|
|
+ struct hlist_head mglist;
|
|
+ struct hlist_node rlist;
|
|
+ char sysfs_name[16];
|
|
+ struct netpoll *np;
|
|
+ int offload_fwd_mark;
|
|
+ u16 group_fwd_mask;
|
|
+ u16 backup_redirected_cnt;
|
|
+};
|
|
+
|
|
+struct bridge_mcast_stats {
|
|
+ struct br_mcast_stats mstats;
|
|
+ struct u64_stats_sync syncp;
|
|
+};
|
|
+
|
|
+struct net_bridge_mdb_htable;
|
|
+
|
|
+struct net_bridge {
|
|
+ spinlock_t lock;
|
|
+ spinlock_t hash_lock;
|
|
+ struct list_head port_list;
|
|
+ struct net_device *dev;
|
|
+ struct pcpu_sw_netstats *stats;
|
|
+ u8 vlan_enabled;
|
|
+ u8 vlan_stats_enabled;
|
|
+ __be16 vlan_proto;
|
|
+ u16 default_pvid;
|
|
+ struct net_bridge_vlan_group *vlgrp;
|
|
+ struct rhashtable fdb_hash_tbl;
|
|
+ union {
|
|
+ struct rtable fake_rtable;
|
|
+ struct rt6_info fake_rt6_info;
|
|
+ };
|
|
+ bool nf_call_iptables;
|
|
+ bool nf_call_ip6tables;
|
|
+ bool nf_call_arptables;
|
|
+ u16 group_fwd_mask;
|
|
+ u16 group_fwd_mask_required;
|
|
+ bridge_id designated_root;
|
|
+ bridge_id bridge_id;
|
|
+ u32 root_path_cost;
|
|
+ unsigned char topology_change;
|
|
+ unsigned char topology_change_detected;
|
|
+ u16 root_port;
|
|
+ long unsigned int max_age;
|
|
+ long unsigned int hello_time;
|
|
+ long unsigned int forward_delay;
|
|
+ long unsigned int ageing_time;
|
|
+ long unsigned int bridge_max_age;
|
|
+ long unsigned int bridge_hello_time;
|
|
+ long unsigned int bridge_forward_delay;
|
|
+ long unsigned int bridge_ageing_time;
|
|
+ u8 group_addr[6];
|
|
+ bool group_addr_set;
|
|
+ enum {
|
|
+ BR_NO_STP = 0,
|
|
+ BR_KERNEL_STP = 1,
|
|
+ BR_USER_STP = 2,
|
|
+ } stp_enabled;
|
|
+ unsigned char multicast_router;
|
|
+ u8 multicast_disabled: 1;
|
|
+ u8 multicast_querier: 1;
|
|
+ u8 multicast_query_use_ifaddr: 1;
|
|
+ u8 has_ipv6_addr: 1;
|
|
+ u8 multicast_stats_enabled: 1;
|
|
+ u32 hash_elasticity;
|
|
+ u32 hash_max;
|
|
+ u32 multicast_last_member_count;
|
|
+ u32 multicast_startup_query_count;
|
|
+ u8 multicast_igmp_version;
|
|
+ long unsigned int multicast_last_member_interval;
|
|
+ long unsigned int multicast_membership_interval;
|
|
+ long unsigned int multicast_querier_interval;
|
|
+ long unsigned int multicast_query_interval;
|
|
+ long unsigned int multicast_query_response_interval;
|
|
+ long unsigned int multicast_startup_query_interval;
|
|
+ spinlock_t multicast_lock;
|
|
+ struct net_bridge_mdb_htable *mdb;
|
|
+ struct hlist_head router_list;
|
|
+ struct timer_list multicast_router_timer;
|
|
+ struct bridge_mcast_other_query ip4_other_query;
|
|
+ struct bridge_mcast_own_query ip4_own_query;
|
|
+ struct bridge_mcast_querier ip4_querier;
|
|
+ struct bridge_mcast_stats *mcast_stats;
|
|
+ struct bridge_mcast_other_query ip6_other_query;
|
|
+ struct bridge_mcast_own_query ip6_own_query;
|
|
+ struct bridge_mcast_querier ip6_querier;
|
|
+ u8 multicast_mld_version;
|
|
+ struct timer_list hello_timer;
|
|
+ struct timer_list tcn_timer;
|
|
+ struct timer_list topology_change_timer;
|
|
+ struct delayed_work gc_work;
|
|
+ struct kobject *ifobj;
|
|
+ u32 auto_cnt;
|
|
+ int offload_fwd_mark;
|
|
+ bool neigh_suppress_enabled;
|
|
+ bool mtu_set_by_user;
|
|
+ struct hlist_head fdb_list;
|
|
+};
|
|
+
|
|
+struct net_bridge_vlan_group {
|
|
+ struct rhashtable vlan_hash;
|
|
+ struct rhashtable tunnel_hash;
|
|
+ struct list_head vlan_list;
|
|
+ u16 num_vlans;
|
|
+ u16 pvid;
|
|
+};
|
|
+
|
|
+struct net_bridge_fdb_key {
|
|
+ mac_addr addr;
|
|
+ u16 vlan_id;
|
|
+};
|
|
+
|
|
+struct net_bridge_fdb_entry {
|
|
+ struct rhash_head rhnode;
|
|
+ struct net_bridge_port *dst;
|
|
+ struct net_bridge_fdb_key key;
|
|
+ struct hlist_node fdb_node;
|
|
+ unsigned char is_local: 1;
|
|
+ unsigned char is_static: 1;
|
|
+ unsigned char added_by_user: 1;
|
|
+ unsigned char added_by_external_learn: 1;
|
|
+ unsigned char offloaded: 1;
|
|
+ long: 59;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long unsigned int updated;
|
|
+ long unsigned int used;
|
|
+ struct callback_head rcu;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct net_bridge_mdb_htable {
|
|
+ struct hlist_head *mhash;
|
|
+ struct callback_head rcu;
|
|
+ struct net_bridge_mdb_htable *old;
|
|
+ u32 size;
|
|
+ u32 max;
|
|
+ u32 secret;
|
|
+ u32 ver;
|
|
+};
|
|
+
|
|
+struct nf_br_ops {
|
|
+ int (*br_dev_xmit_hook)(struct sk_buff *);
|
|
+};
|
|
+
|
|
+struct trace_event_raw_br_fdb_add {
|
|
+ struct trace_entry ent;
|
|
+ u8 ndm_flags;
|
|
+ u32 __data_loc_dev;
|
|
+ unsigned char addr[6];
|
|
+ u16 vid;
|
|
+ u16 nlh_flags;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_br_fdb_external_learn_add {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_br_dev;
|
|
+ u32 __data_loc_dev;
|
|
+ unsigned char addr[6];
|
|
+ u16 vid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_fdb_delete {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_br_dev;
|
|
+ u32 __data_loc_dev;
|
|
+ unsigned char addr[6];
|
|
+ u16 vid;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_raw_br_fdb_update {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_br_dev;
|
|
+ u32 __data_loc_dev;
|
|
+ unsigned char addr[6];
|
|
+ u16 vid;
|
|
+ bool added_by_user;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_br_fdb_add {
|
|
+ u32 dev;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_br_fdb_external_learn_add {
|
|
+ u32 br_dev;
|
|
+ u32 dev;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_fdb_delete {
|
|
+ u32 br_dev;
|
|
+ u32 dev;
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_br_fdb_update {
|
|
+ u32 br_dev;
|
|
+ u32 dev;
|
|
+};
|
|
+
|
|
+struct net_dm_drop_point {
|
|
+ __u8 pc[8];
|
|
+ __u32 count;
|
|
+};
|
|
+
|
|
+struct net_dm_alert_msg {
|
|
+ __u32 entries;
|
|
+ struct net_dm_drop_point points[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NET_DM_CMD_UNSPEC = 0,
|
|
+ NET_DM_CMD_ALERT = 1,
|
|
+ NET_DM_CMD_CONFIG = 2,
|
|
+ NET_DM_CMD_START = 3,
|
|
+ NET_DM_CMD_STOP = 4,
|
|
+ _NET_DM_CMD_MAX = 5,
|
|
+};
|
|
+
|
|
+struct per_cpu_dm_data {
|
|
+ spinlock_t lock;
|
|
+ struct sk_buff *skb;
|
|
+ struct work_struct dm_alert_work;
|
|
+ struct timer_list send_timer;
|
|
+};
|
|
+
|
|
+struct dm_hw_stat_delta {
|
|
+ struct net_device *dev;
|
|
+ long unsigned int last_rx;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+ long unsigned int last_drop_val;
|
|
+};
|
|
+
|
|
+struct update_classid_context {
|
|
+ u32 classid;
|
|
+ unsigned int batch;
|
|
+};
|
|
+
|
|
+struct rtnexthop {
|
|
+ short unsigned int rtnh_len;
|
|
+ unsigned char rtnh_flags;
|
|
+ unsigned char rtnh_hops;
|
|
+ int rtnh_ifindex;
|
|
+};
|
|
+
|
|
+struct lwtunnel_encap_ops {
|
|
+ int (*build_state)(struct nlattr *, unsigned int, const void *, struct lwtunnel_state **, struct netlink_ext_ack *);
|
|
+ void (*destroy_state)(struct lwtunnel_state *);
|
|
+ int (*output)(struct net *, struct sock *, struct sk_buff *);
|
|
+ int (*input)(struct sk_buff *);
|
|
+ int (*fill_encap)(struct sk_buff *, struct lwtunnel_state *);
|
|
+ int (*get_encap_size)(struct lwtunnel_state *);
|
|
+ int (*cmp_encap)(struct lwtunnel_state *, struct lwtunnel_state *);
|
|
+ int (*xmit)(struct sk_buff *);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+enum bpf_ret_code {
|
|
+ BPF_OK = 0,
|
|
+ BPF_DROP = 2,
|
|
+ BPF_REDIRECT = 7,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LWT_BPF_PROG_UNSPEC = 0,
|
|
+ LWT_BPF_PROG_FD = 1,
|
|
+ LWT_BPF_PROG_NAME = 2,
|
|
+ __LWT_BPF_PROG_MAX = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LWT_BPF_UNSPEC = 0,
|
|
+ LWT_BPF_IN = 1,
|
|
+ LWT_BPF_OUT = 2,
|
|
+ LWT_BPF_XMIT = 3,
|
|
+ LWT_BPF_XMIT_HEADROOM = 4,
|
|
+ __LWT_BPF_MAX = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ LWTUNNEL_XMIT_DONE = 0,
|
|
+ LWTUNNEL_XMIT_CONTINUE = 1,
|
|
+};
|
|
+
|
|
+struct bpf_lwt_prog {
|
|
+ struct bpf_prog *prog;
|
|
+ char *name;
|
|
+};
|
|
+
|
|
+struct bpf_lwt {
|
|
+ struct bpf_lwt_prog in;
|
|
+ struct bpf_lwt_prog out;
|
|
+ struct bpf_lwt_prog xmit;
|
|
+ int family;
|
|
+};
|
|
+
|
|
+struct dst_cache_pcpu {
|
|
+ long unsigned int refresh_ts;
|
|
+ struct dst_entry *dst;
|
|
+ u32 cookie;
|
|
+ union {
|
|
+ struct in_addr in_saddr;
|
|
+ struct in6_addr in6_saddr;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum devlink_command {
|
|
+ DEVLINK_CMD_UNSPEC = 0,
|
|
+ DEVLINK_CMD_GET = 1,
|
|
+ DEVLINK_CMD_SET = 2,
|
|
+ DEVLINK_CMD_NEW = 3,
|
|
+ DEVLINK_CMD_DEL = 4,
|
|
+ DEVLINK_CMD_PORT_GET = 5,
|
|
+ DEVLINK_CMD_PORT_SET = 6,
|
|
+ DEVLINK_CMD_PORT_NEW = 7,
|
|
+ DEVLINK_CMD_PORT_DEL = 8,
|
|
+ DEVLINK_CMD_PORT_SPLIT = 9,
|
|
+ DEVLINK_CMD_PORT_UNSPLIT = 10,
|
|
+ DEVLINK_CMD_SB_GET = 11,
|
|
+ DEVLINK_CMD_SB_SET = 12,
|
|
+ DEVLINK_CMD_SB_NEW = 13,
|
|
+ DEVLINK_CMD_SB_DEL = 14,
|
|
+ DEVLINK_CMD_SB_POOL_GET = 15,
|
|
+ DEVLINK_CMD_SB_POOL_SET = 16,
|
|
+ DEVLINK_CMD_SB_POOL_NEW = 17,
|
|
+ DEVLINK_CMD_SB_POOL_DEL = 18,
|
|
+ DEVLINK_CMD_SB_PORT_POOL_GET = 19,
|
|
+ DEVLINK_CMD_SB_PORT_POOL_SET = 20,
|
|
+ DEVLINK_CMD_SB_PORT_POOL_NEW = 21,
|
|
+ DEVLINK_CMD_SB_PORT_POOL_DEL = 22,
|
|
+ DEVLINK_CMD_SB_TC_POOL_BIND_GET = 23,
|
|
+ DEVLINK_CMD_SB_TC_POOL_BIND_SET = 24,
|
|
+ DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 25,
|
|
+ DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 26,
|
|
+ DEVLINK_CMD_SB_OCC_SNAPSHOT = 27,
|
|
+ DEVLINK_CMD_SB_OCC_MAX_CLEAR = 28,
|
|
+ DEVLINK_CMD_ESWITCH_GET = 29,
|
|
+ DEVLINK_CMD_ESWITCH_SET = 30,
|
|
+ DEVLINK_CMD_DPIPE_TABLE_GET = 31,
|
|
+ DEVLINK_CMD_DPIPE_ENTRIES_GET = 32,
|
|
+ DEVLINK_CMD_DPIPE_HEADERS_GET = 33,
|
|
+ DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 34,
|
|
+ DEVLINK_CMD_RESOURCE_SET = 35,
|
|
+ DEVLINK_CMD_RESOURCE_DUMP = 36,
|
|
+ DEVLINK_CMD_RELOAD = 37,
|
|
+ DEVLINK_CMD_PARAM_GET = 38,
|
|
+ DEVLINK_CMD_PARAM_SET = 39,
|
|
+ DEVLINK_CMD_PARAM_NEW = 40,
|
|
+ DEVLINK_CMD_PARAM_DEL = 41,
|
|
+ DEVLINK_CMD_REGION_GET = 42,
|
|
+ DEVLINK_CMD_REGION_SET = 43,
|
|
+ DEVLINK_CMD_REGION_NEW = 44,
|
|
+ DEVLINK_CMD_REGION_DEL = 45,
|
|
+ DEVLINK_CMD_REGION_READ = 46,
|
|
+ __DEVLINK_CMD_MAX = 47,
|
|
+ DEVLINK_CMD_MAX = 46,
|
|
+};
|
|
+
|
|
+enum devlink_port_type {
|
|
+ DEVLINK_PORT_TYPE_NOTSET = 0,
|
|
+ DEVLINK_PORT_TYPE_AUTO = 1,
|
|
+ DEVLINK_PORT_TYPE_ETH = 2,
|
|
+ DEVLINK_PORT_TYPE_IB = 3,
|
|
+};
|
|
+
|
|
+enum devlink_sb_pool_type {
|
|
+ DEVLINK_SB_POOL_TYPE_INGRESS = 0,
|
|
+ DEVLINK_SB_POOL_TYPE_EGRESS = 1,
|
|
+};
|
|
+
|
|
+enum devlink_sb_threshold_type {
|
|
+ DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0,
|
|
+ DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 1,
|
|
+};
|
|
+
|
|
+enum devlink_port_flavour {
|
|
+ DEVLINK_PORT_FLAVOUR_PHYSICAL = 0,
|
|
+ DEVLINK_PORT_FLAVOUR_CPU = 1,
|
|
+ DEVLINK_PORT_FLAVOUR_DSA = 2,
|
|
+};
|
|
+
|
|
+enum devlink_param_cmode {
|
|
+ DEVLINK_PARAM_CMODE_RUNTIME = 0,
|
|
+ DEVLINK_PARAM_CMODE_DRIVERINIT = 1,
|
|
+ DEVLINK_PARAM_CMODE_PERMANENT = 2,
|
|
+ __DEVLINK_PARAM_CMODE_MAX = 3,
|
|
+ DEVLINK_PARAM_CMODE_MAX = 2,
|
|
+};
|
|
+
|
|
+enum devlink_attr {
|
|
+ DEVLINK_ATTR_UNSPEC = 0,
|
|
+ DEVLINK_ATTR_BUS_NAME = 1,
|
|
+ DEVLINK_ATTR_DEV_NAME = 2,
|
|
+ DEVLINK_ATTR_PORT_INDEX = 3,
|
|
+ DEVLINK_ATTR_PORT_TYPE = 4,
|
|
+ DEVLINK_ATTR_PORT_DESIRED_TYPE = 5,
|
|
+ DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 6,
|
|
+ DEVLINK_ATTR_PORT_NETDEV_NAME = 7,
|
|
+ DEVLINK_ATTR_PORT_IBDEV_NAME = 8,
|
|
+ DEVLINK_ATTR_PORT_SPLIT_COUNT = 9,
|
|
+ DEVLINK_ATTR_PORT_SPLIT_GROUP = 10,
|
|
+ DEVLINK_ATTR_SB_INDEX = 11,
|
|
+ DEVLINK_ATTR_SB_SIZE = 12,
|
|
+ DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 13,
|
|
+ DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 14,
|
|
+ DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 15,
|
|
+ DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 16,
|
|
+ DEVLINK_ATTR_SB_POOL_INDEX = 17,
|
|
+ DEVLINK_ATTR_SB_POOL_TYPE = 18,
|
|
+ DEVLINK_ATTR_SB_POOL_SIZE = 19,
|
|
+ DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 20,
|
|
+ DEVLINK_ATTR_SB_THRESHOLD = 21,
|
|
+ DEVLINK_ATTR_SB_TC_INDEX = 22,
|
|
+ DEVLINK_ATTR_SB_OCC_CUR = 23,
|
|
+ DEVLINK_ATTR_SB_OCC_MAX = 24,
|
|
+ DEVLINK_ATTR_ESWITCH_MODE = 25,
|
|
+ DEVLINK_ATTR_ESWITCH_INLINE_MODE = 26,
|
|
+ DEVLINK_ATTR_DPIPE_TABLES = 27,
|
|
+ DEVLINK_ATTR_DPIPE_TABLE = 28,
|
|
+ DEVLINK_ATTR_DPIPE_TABLE_NAME = 29,
|
|
+ DEVLINK_ATTR_DPIPE_TABLE_SIZE = 30,
|
|
+ DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 31,
|
|
+ DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 32,
|
|
+ DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 33,
|
|
+ DEVLINK_ATTR_DPIPE_ENTRIES = 34,
|
|
+ DEVLINK_ATTR_DPIPE_ENTRY = 35,
|
|
+ DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 36,
|
|
+ DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 37,
|
|
+ DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 38,
|
|
+ DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 39,
|
|
+ DEVLINK_ATTR_DPIPE_MATCH = 40,
|
|
+ DEVLINK_ATTR_DPIPE_MATCH_VALUE = 41,
|
|
+ DEVLINK_ATTR_DPIPE_MATCH_TYPE = 42,
|
|
+ DEVLINK_ATTR_DPIPE_ACTION = 43,
|
|
+ DEVLINK_ATTR_DPIPE_ACTION_VALUE = 44,
|
|
+ DEVLINK_ATTR_DPIPE_ACTION_TYPE = 45,
|
|
+ DEVLINK_ATTR_DPIPE_VALUE = 46,
|
|
+ DEVLINK_ATTR_DPIPE_VALUE_MASK = 47,
|
|
+ DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 48,
|
|
+ DEVLINK_ATTR_DPIPE_HEADERS = 49,
|
|
+ DEVLINK_ATTR_DPIPE_HEADER = 50,
|
|
+ DEVLINK_ATTR_DPIPE_HEADER_NAME = 51,
|
|
+ DEVLINK_ATTR_DPIPE_HEADER_ID = 52,
|
|
+ DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 53,
|
|
+ DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 54,
|
|
+ DEVLINK_ATTR_DPIPE_HEADER_INDEX = 55,
|
|
+ DEVLINK_ATTR_DPIPE_FIELD = 56,
|
|
+ DEVLINK_ATTR_DPIPE_FIELD_NAME = 57,
|
|
+ DEVLINK_ATTR_DPIPE_FIELD_ID = 58,
|
|
+ DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 59,
|
|
+ DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 60,
|
|
+ DEVLINK_ATTR_PAD = 61,
|
|
+ DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 62,
|
|
+ DEVLINK_ATTR_RESOURCE_LIST = 63,
|
|
+ DEVLINK_ATTR_RESOURCE = 64,
|
|
+ DEVLINK_ATTR_RESOURCE_NAME = 65,
|
|
+ DEVLINK_ATTR_RESOURCE_ID = 66,
|
|
+ DEVLINK_ATTR_RESOURCE_SIZE = 67,
|
|
+ DEVLINK_ATTR_RESOURCE_SIZE_NEW = 68,
|
|
+ DEVLINK_ATTR_RESOURCE_SIZE_VALID = 69,
|
|
+ DEVLINK_ATTR_RESOURCE_SIZE_MIN = 70,
|
|
+ DEVLINK_ATTR_RESOURCE_SIZE_MAX = 71,
|
|
+ DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 72,
|
|
+ DEVLINK_ATTR_RESOURCE_UNIT = 73,
|
|
+ DEVLINK_ATTR_RESOURCE_OCC = 74,
|
|
+ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 75,
|
|
+ DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 76,
|
|
+ DEVLINK_ATTR_PORT_FLAVOUR = 77,
|
|
+ DEVLINK_ATTR_PORT_NUMBER = 78,
|
|
+ DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER = 79,
|
|
+ DEVLINK_ATTR_PARAM = 80,
|
|
+ DEVLINK_ATTR_PARAM_NAME = 81,
|
|
+ DEVLINK_ATTR_PARAM_GENERIC = 82,
|
|
+ DEVLINK_ATTR_PARAM_TYPE = 83,
|
|
+ DEVLINK_ATTR_PARAM_VALUES_LIST = 84,
|
|
+ DEVLINK_ATTR_PARAM_VALUE = 85,
|
|
+ DEVLINK_ATTR_PARAM_VALUE_DATA = 86,
|
|
+ DEVLINK_ATTR_PARAM_VALUE_CMODE = 87,
|
|
+ DEVLINK_ATTR_REGION_NAME = 88,
|
|
+ DEVLINK_ATTR_REGION_SIZE = 89,
|
|
+ DEVLINK_ATTR_REGION_SNAPSHOTS = 90,
|
|
+ DEVLINK_ATTR_REGION_SNAPSHOT = 91,
|
|
+ DEVLINK_ATTR_REGION_SNAPSHOT_ID = 92,
|
|
+ DEVLINK_ATTR_REGION_CHUNKS = 93,
|
|
+ DEVLINK_ATTR_REGION_CHUNK = 94,
|
|
+ DEVLINK_ATTR_REGION_CHUNK_DATA = 95,
|
|
+ DEVLINK_ATTR_REGION_CHUNK_ADDR = 96,
|
|
+ DEVLINK_ATTR_REGION_CHUNK_LEN = 97,
|
|
+ __DEVLINK_ATTR_MAX = 98,
|
|
+ DEVLINK_ATTR_MAX = 97,
|
|
+};
|
|
+
|
|
+enum devlink_dpipe_match_type {
|
|
+ DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0,
|
|
+};
|
|
+
|
|
+enum devlink_dpipe_action_type {
|
|
+ DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0,
|
|
+};
|
|
+
|
|
+enum devlink_dpipe_field_ethernet_id {
|
|
+ DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC = 0,
|
|
+};
|
|
+
|
|
+enum devlink_dpipe_field_ipv4_id {
|
|
+ DEVLINK_DPIPE_FIELD_IPV4_DST_IP = 0,
|
|
+};
|
|
+
|
|
+enum devlink_dpipe_field_ipv6_id {
|
|
+ DEVLINK_DPIPE_FIELD_IPV6_DST_IP = 0,
|
|
+};
|
|
+
|
|
+enum devlink_dpipe_header_id {
|
|
+ DEVLINK_DPIPE_HEADER_ETHERNET = 0,
|
|
+ DEVLINK_DPIPE_HEADER_IPV4 = 1,
|
|
+ DEVLINK_DPIPE_HEADER_IPV6 = 2,
|
|
+};
|
|
+
|
|
+enum devlink_resource_unit {
|
|
+ DEVLINK_RESOURCE_UNIT_ENTRY = 0,
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_headers;
|
|
+
|
|
+struct devlink_ops;
|
|
+
|
|
+struct devlink {
|
|
+ struct list_head list;
|
|
+ struct list_head port_list;
|
|
+ struct list_head sb_list;
|
|
+ struct list_head dpipe_table_list;
|
|
+ struct list_head resource_list;
|
|
+ struct list_head param_list;
|
|
+ struct list_head region_list;
|
|
+ u32 snapshot_id;
|
|
+ struct devlink_dpipe_headers *dpipe_headers;
|
|
+ const struct devlink_ops *ops;
|
|
+ struct device *dev;
|
|
+ possible_net_t _net;
|
|
+ struct mutex lock;
|
|
+ long: 64;
|
|
+ char priv[0];
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_headers {
|
|
+ struct devlink_dpipe_header **headers;
|
|
+ unsigned int headers_count;
|
|
+};
|
|
+
|
|
+struct devlink_port;
|
|
+
|
|
+struct devlink_sb_pool_info;
|
|
+
|
|
+struct devlink_ops {
|
|
+ int (*reload)(struct devlink *, struct netlink_ext_ack *);
|
|
+ int (*port_type_set)(struct devlink_port *, enum devlink_port_type);
|
|
+ int (*port_split)(struct devlink *, unsigned int, unsigned int, struct netlink_ext_ack *);
|
|
+ int (*port_unsplit)(struct devlink *, unsigned int, struct netlink_ext_ack *);
|
|
+ int (*sb_pool_get)(struct devlink *, unsigned int, u16, struct devlink_sb_pool_info *);
|
|
+ int (*sb_pool_set)(struct devlink *, unsigned int, u16, u32, enum devlink_sb_threshold_type);
|
|
+ int (*sb_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *);
|
|
+ int (*sb_port_pool_set)(struct devlink_port *, unsigned int, u16, u32);
|
|
+ int (*sb_tc_pool_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16 *, u32 *);
|
|
+ int (*sb_tc_pool_bind_set)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u16, u32);
|
|
+ int (*sb_occ_snapshot)(struct devlink *, unsigned int);
|
|
+ int (*sb_occ_max_clear)(struct devlink *, unsigned int);
|
|
+ int (*sb_occ_port_pool_get)(struct devlink_port *, unsigned int, u16, u32 *, u32 *);
|
|
+ int (*sb_occ_tc_port_bind_get)(struct devlink_port *, unsigned int, u16, enum devlink_sb_pool_type, u32 *, u32 *);
|
|
+ int (*eswitch_mode_get)(struct devlink *, u16 *);
|
|
+ int (*eswitch_mode_set)(struct devlink *, u16);
|
|
+ int (*eswitch_inline_mode_get)(struct devlink *, u8 *);
|
|
+ int (*eswitch_inline_mode_set)(struct devlink *, u8);
|
|
+ int (*eswitch_encap_mode_get)(struct devlink *, u8 *);
|
|
+ int (*eswitch_encap_mode_set)(struct devlink *, u8);
|
|
+};
|
|
+
|
|
+struct devlink_port_attrs {
|
|
+ bool set;
|
|
+ enum devlink_port_flavour flavour;
|
|
+ u32 port_number;
|
|
+ bool split;
|
|
+ u32 split_subport_number;
|
|
+};
|
|
+
|
|
+struct devlink_port {
|
|
+ struct list_head list;
|
|
+ struct devlink *devlink;
|
|
+ unsigned int index;
|
|
+ bool registered;
|
|
+ enum devlink_port_type type;
|
|
+ enum devlink_port_type desired_type;
|
|
+ void *type_dev;
|
|
+ struct devlink_port_attrs attrs;
|
|
+};
|
|
+
|
|
+struct devlink_sb_pool_info {
|
|
+ enum devlink_sb_pool_type pool_type;
|
|
+ u32 size;
|
|
+ enum devlink_sb_threshold_type threshold_type;
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_match {
|
|
+ enum devlink_dpipe_match_type type;
|
|
+ unsigned int header_index;
|
|
+ struct devlink_dpipe_header *header;
|
|
+ unsigned int field_id;
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_action {
|
|
+ enum devlink_dpipe_action_type type;
|
|
+ unsigned int header_index;
|
|
+ struct devlink_dpipe_header *header;
|
|
+ unsigned int field_id;
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_value {
|
|
+ union {
|
|
+ struct devlink_dpipe_action *action;
|
|
+ struct devlink_dpipe_match *match;
|
|
+ };
|
|
+ unsigned int mapping_value;
|
|
+ bool mapping_valid;
|
|
+ unsigned int value_size;
|
|
+ void *value;
|
|
+ void *mask;
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_entry {
|
|
+ u64 index;
|
|
+ struct devlink_dpipe_value *match_values;
|
|
+ unsigned int match_values_count;
|
|
+ struct devlink_dpipe_value *action_values;
|
|
+ unsigned int action_values_count;
|
|
+ u64 counter;
|
|
+ bool counter_valid;
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_dump_ctx {
|
|
+ struct genl_info *info;
|
|
+ enum devlink_command cmd;
|
|
+ struct sk_buff *skb;
|
|
+ struct nlattr *nest;
|
|
+ void *hdr;
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_table_ops;
|
|
+
|
|
+struct devlink_dpipe_table {
|
|
+ void *priv;
|
|
+ struct list_head list;
|
|
+ const char *name;
|
|
+ bool counters_enabled;
|
|
+ bool counter_control_extern;
|
|
+ bool resource_valid;
|
|
+ u64 resource_id;
|
|
+ u64 resource_units;
|
|
+ struct devlink_dpipe_table_ops *table_ops;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct devlink_dpipe_table_ops {
|
|
+ int (*actions_dump)(void *, struct sk_buff *);
|
|
+ int (*matches_dump)(void *, struct sk_buff *);
|
|
+ int (*entries_dump)(void *, bool, struct devlink_dpipe_dump_ctx *);
|
|
+ int (*counters_set_update)(void *, bool);
|
|
+ u64 (*size_get)(void *);
|
|
+};
|
|
+
|
|
+struct devlink_resource_size_params {
|
|
+ u64 size_min;
|
|
+ u64 size_max;
|
|
+ u64 size_granularity;
|
|
+ enum devlink_resource_unit unit;
|
|
+};
|
|
+
|
|
+typedef u64 devlink_resource_occ_get_t(void *);
|
|
+
|
|
+struct devlink_resource {
|
|
+ const char *name;
|
|
+ u64 id;
|
|
+ u64 size;
|
|
+ u64 size_new;
|
|
+ bool size_valid;
|
|
+ struct devlink_resource *parent;
|
|
+ struct devlink_resource_size_params size_params;
|
|
+ struct list_head list;
|
|
+ struct list_head resource_list;
|
|
+ devlink_resource_occ_get_t *occ_get;
|
|
+ void *occ_get_priv;
|
|
+};
|
|
+
|
|
+enum devlink_param_type {
|
|
+ DEVLINK_PARAM_TYPE_U8 = 0,
|
|
+ DEVLINK_PARAM_TYPE_U16 = 1,
|
|
+ DEVLINK_PARAM_TYPE_U32 = 2,
|
|
+ DEVLINK_PARAM_TYPE_STRING = 3,
|
|
+ DEVLINK_PARAM_TYPE_BOOL = 4,
|
|
+};
|
|
+
|
|
+union devlink_param_value {
|
|
+ u8 vu8;
|
|
+ u16 vu16;
|
|
+ u32 vu32;
|
|
+ char vstr[32];
|
|
+ bool vbool;
|
|
+};
|
|
+
|
|
+struct devlink_param_gset_ctx {
|
|
+ union devlink_param_value val;
|
|
+ enum devlink_param_cmode cmode;
|
|
+};
|
|
+
|
|
+struct devlink_param {
|
|
+ u32 id;
|
|
+ const char *name;
|
|
+ bool generic;
|
|
+ enum devlink_param_type type;
|
|
+ long unsigned int supported_cmodes;
|
|
+ int (*get)(struct devlink *, u32, struct devlink_param_gset_ctx *);
|
|
+ int (*set)(struct devlink *, u32, struct devlink_param_gset_ctx *);
|
|
+ int (*validate)(struct devlink *, u32, union devlink_param_value, struct netlink_ext_ack *);
|
|
+};
|
|
+
|
|
+struct devlink_param_item {
|
|
+ struct list_head list;
|
|
+ const struct devlink_param *param;
|
|
+ union devlink_param_value driverinit_value;
|
|
+ bool driverinit_value_valid;
|
|
+};
|
|
+
|
|
+enum devlink_param_generic_id {
|
|
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET = 0,
|
|
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS = 1,
|
|
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV = 2,
|
|
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT = 3,
|
|
+ __DEVLINK_PARAM_GENERIC_ID_MAX = 4,
|
|
+ DEVLINK_PARAM_GENERIC_ID_MAX = 3,
|
|
+};
|
|
+
|
|
+typedef void devlink_snapshot_data_dest_t(const void *);
|
|
+
|
|
+struct trace_event_raw_devlink_hwmsg {
|
|
+ struct trace_entry ent;
|
|
+ u32 __data_loc_bus_name;
|
|
+ u32 __data_loc_dev_name;
|
|
+ u32 __data_loc_driver_name;
|
|
+ bool incoming;
|
|
+ long unsigned int type;
|
|
+ u32 __data_loc_buf;
|
|
+ size_t len;
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_devlink_hwmsg {
|
|
+ u32 bus_name;
|
|
+ u32 dev_name;
|
|
+ u32 driver_name;
|
|
+ u32 buf;
|
|
+};
|
|
+
|
|
+struct devlink_sb {
|
|
+ struct list_head list;
|
|
+ unsigned int index;
|
|
+ u32 size;
|
|
+ u16 ingress_pools_count;
|
|
+ u16 egress_pools_count;
|
|
+ u16 ingress_tc_count;
|
|
+ u16 egress_tc_count;
|
|
+};
|
|
+
|
|
+struct devlink_region {
|
|
+ struct devlink *devlink;
|
|
+ struct list_head list;
|
|
+ const char *name;
|
|
+ struct list_head snapshot_list;
|
|
+ u32 max_snapshots;
|
|
+ u32 cur_snapshots;
|
|
+ u64 size;
|
|
+};
|
|
+
|
|
+struct devlink_snapshot {
|
|
+ struct list_head list;
|
|
+ struct devlink_region *region;
|
|
+ devlink_snapshot_data_dest_t *data_destructor;
|
|
+ u64 data_len;
|
|
+ u8 *data;
|
|
+ u32 id;
|
|
+};
|
|
+
|
|
+enum devlink_multicast_groups {
|
|
+ DEVLINK_MCGRP_CONFIG = 0,
|
|
+};
|
|
+
|
|
+struct gro_cell;
|
|
+
|
|
+struct gro_cells {
|
|
+ struct gro_cell *cells;
|
|
+};
|
|
+
|
|
+struct gro_cell {
|
|
+ struct sk_buff_head napi_skbs;
|
|
+ struct napi_struct napi;
|
|
+};
|
|
+
|
|
+struct group_req {
|
|
+ __u32 gr_interface;
|
|
+ int: 32;
|
|
+ struct __kernel_sockaddr_storage gr_group;
|
|
+};
|
|
+
|
|
+struct group_source_req {
|
|
+ __u32 gsr_interface;
|
|
+ int: 32;
|
|
+ struct __kernel_sockaddr_storage gsr_group;
|
|
+ struct __kernel_sockaddr_storage gsr_source;
|
|
+};
|
|
+
|
|
+struct group_filter {
|
|
+ __u32 gf_interface;
|
|
+ int: 32;
|
|
+ struct __kernel_sockaddr_storage gf_group;
|
|
+ __u32 gf_fmode;
|
|
+ __u32 gf_numsrc;
|
|
+ struct __kernel_sockaddr_storage gf_slist[1];
|
|
+};
|
|
+
|
|
+struct compat_cmsghdr {
|
|
+ compat_size_t cmsg_len;
|
|
+ compat_int_t cmsg_level;
|
|
+ compat_int_t cmsg_type;
|
|
+};
|
|
+
|
|
+struct compat_group_req {
|
|
+ __u32 gr_interface;
|
|
+ struct __kernel_sockaddr_storage gr_group;
|
|
+};
|
|
+
|
|
+struct compat_group_source_req {
|
|
+ __u32 gsr_interface;
|
|
+ struct __kernel_sockaddr_storage gsr_group;
|
|
+ struct __kernel_sockaddr_storage gsr_source;
|
|
+};
|
|
+
|
|
+struct compat_group_filter {
|
|
+ __u32 gf_interface;
|
|
+ struct __kernel_sockaddr_storage gf_group;
|
|
+ __u32 gf_fmode;
|
|
+ __u32 gf_numsrc;
|
|
+ struct __kernel_sockaddr_storage gf_slist[1];
|
|
+};
|
|
+
|
|
+typedef struct sk_buff * (*gro_receive_t)(struct list_head *, struct sk_buff *);
|
|
+
|
|
+struct fch_hdr {
|
|
+ __u8 daddr[6];
|
|
+ __u8 saddr[6];
|
|
+};
|
|
+
|
|
+struct fcllc {
|
|
+ __u8 dsap;
|
|
+ __u8 ssap;
|
|
+ __u8 llc;
|
|
+ __u8 protid[3];
|
|
+ __be16 ethertype;
|
|
+};
|
|
+
|
|
+enum macvlan_mode {
|
|
+ MACVLAN_MODE_PRIVATE = 1,
|
|
+ MACVLAN_MODE_VEPA = 2,
|
|
+ MACVLAN_MODE_BRIDGE = 4,
|
|
+ MACVLAN_MODE_PASSTHRU = 8,
|
|
+ MACVLAN_MODE_SOURCE = 16,
|
|
+};
|
|
+
|
|
+struct tc_ratespec {
|
|
+ unsigned char cell_log;
|
|
+ __u8 linklayer;
|
|
+ short unsigned int overhead;
|
|
+ short int cell_align;
|
|
+ short unsigned int mpu;
|
|
+ __u32 rate;
|
|
+};
|
|
+
|
|
+struct tc_prio_qopt {
|
|
+ int bands;
|
|
+ __u8 priomap[16];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_UNSPEC = 0,
|
|
+ TCA_KIND = 1,
|
|
+ TCA_OPTIONS = 2,
|
|
+ TCA_STATS = 3,
|
|
+ TCA_XSTATS = 4,
|
|
+ TCA_RATE = 5,
|
|
+ TCA_FCNT = 6,
|
|
+ TCA_STATS2 = 7,
|
|
+ TCA_STAB = 8,
|
|
+ TCA_PAD = 9,
|
|
+ TCA_DUMP_INVISIBLE = 10,
|
|
+ TCA_CHAIN = 11,
|
|
+ TCA_HW_OFFLOAD = 12,
|
|
+ TCA_INGRESS_BLOCK = 13,
|
|
+ TCA_EGRESS_BLOCK = 14,
|
|
+ __TCA_MAX = 15,
|
|
+};
|
|
+
|
|
+struct vlan_pcpu_stats {
|
|
+ u64 rx_packets;
|
|
+ u64 rx_bytes;
|
|
+ u64 rx_multicast;
|
|
+ u64 tx_packets;
|
|
+ u64 tx_bytes;
|
|
+ struct u64_stats_sync syncp;
|
|
+ u32 rx_errors;
|
|
+ u32 tx_dropped;
|
|
+};
|
|
+
|
|
+struct netpoll___2;
|
|
+
|
|
+struct skb_array {
|
|
+ struct ptr_ring ring;
|
|
+};
|
|
+
|
|
+struct macvlan_port;
|
|
+
|
|
+struct macvlan_dev {
|
|
+ struct net_device *dev;
|
|
+ struct list_head list;
|
|
+ struct hlist_node hlist;
|
|
+ struct macvlan_port *port;
|
|
+ struct net_device *lowerdev;
|
|
+ void *accel_priv;
|
|
+ struct vlan_pcpu_stats *pcpu_stats;
|
|
+ long unsigned int mc_filter[4];
|
|
+ netdev_features_t set_features;
|
|
+ enum macvlan_mode mode;
|
|
+ u16 flags;
|
|
+ int nest_level;
|
|
+ unsigned int macaddr_count;
|
|
+ struct netpoll___2 *netpoll;
|
|
+};
|
|
+
|
|
+struct psched_ratecfg {
|
|
+ u64 rate_bytes_ps;
|
|
+ u32 mult;
|
|
+ u16 overhead;
|
|
+ u8 linklayer;
|
|
+ u8 shift;
|
|
+};
|
|
+
|
|
+struct mini_Qdisc_pair {
|
|
+ struct mini_Qdisc miniq1;
|
|
+ struct mini_Qdisc miniq2;
|
|
+ struct mini_Qdisc **p_miniq;
|
|
+};
|
|
+
|
|
+struct pfifo_fast_priv {
|
|
+ struct skb_array q[3];
|
|
+};
|
|
+
|
|
+struct tc_qopt_offload_stats {
|
|
+ struct gnet_stats_basic_packed *bstats;
|
|
+ struct gnet_stats_queue *qstats;
|
|
+};
|
|
+
|
|
+enum tc_mq_command {
|
|
+ TC_MQ_CREATE = 0,
|
|
+ TC_MQ_DESTROY = 1,
|
|
+ TC_MQ_STATS = 2,
|
|
+};
|
|
+
|
|
+struct tc_mq_qopt_offload {
|
|
+ enum tc_mq_command command;
|
|
+ u32 handle;
|
|
+ struct tc_qopt_offload_stats stats;
|
|
+};
|
|
+
|
|
+struct mq_sched {
|
|
+ struct Qdisc **qdiscs;
|
|
+};
|
|
+
|
|
+enum tc_link_layer {
|
|
+ TC_LINKLAYER_UNAWARE = 0,
|
|
+ TC_LINKLAYER_ETHERNET = 1,
|
|
+ TC_LINKLAYER_ATM = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_STAB_UNSPEC = 0,
|
|
+ TCA_STAB_BASE = 1,
|
|
+ TCA_STAB_DATA = 2,
|
|
+ __TCA_STAB_MAX = 3,
|
|
+};
|
|
+
|
|
+struct qdisc_rate_table {
|
|
+ struct tc_ratespec rate;
|
|
+ u32 data[256];
|
|
+ struct qdisc_rate_table *next;
|
|
+ int refcnt;
|
|
+};
|
|
+
|
|
+struct Qdisc_class_common {
|
|
+ u32 classid;
|
|
+ struct hlist_node hnode;
|
|
+};
|
|
+
|
|
+struct Qdisc_class_hash {
|
|
+ struct hlist_head *hash;
|
|
+ unsigned int hashsize;
|
|
+ unsigned int hashmask;
|
|
+ unsigned int hashelems;
|
|
+};
|
|
+
|
|
+struct qdisc_watchdog {
|
|
+ u64 last_expires;
|
|
+ struct hrtimer timer;
|
|
+ struct Qdisc *qdisc;
|
|
+};
|
|
+
|
|
+struct check_loop_arg {
|
|
+ struct qdisc_walker w;
|
|
+ struct Qdisc *p;
|
|
+ int depth;
|
|
+};
|
|
+
|
|
+struct tcf_bind_args {
|
|
+ struct tcf_walker w;
|
|
+ u32 classid;
|
|
+ long unsigned int cl;
|
|
+};
|
|
+
|
|
+struct qdisc_dump_args {
|
|
+ struct qdisc_walker w;
|
|
+ struct sk_buff *skb;
|
|
+ struct netlink_callback *cb;
|
|
+};
|
|
+
|
|
+enum net_xmit_qdisc_t {
|
|
+ __NET_XMIT_STOLEN = 65536,
|
|
+ __NET_XMIT_BYPASS = 131072,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_ACT_UNSPEC = 0,
|
|
+ TCA_ACT_KIND = 1,
|
|
+ TCA_ACT_OPTIONS = 2,
|
|
+ TCA_ACT_INDEX = 3,
|
|
+ TCA_ACT_STATS = 4,
|
|
+ TCA_ACT_PAD = 5,
|
|
+ TCA_ACT_COOKIE = 6,
|
|
+ __TCA_ACT_MAX = 7,
|
|
+};
|
|
+
|
|
+struct tcf_t {
|
|
+ __u64 install;
|
|
+ __u64 lastuse;
|
|
+ __u64 expires;
|
|
+ __u64 firstuse;
|
|
+};
|
|
+
|
|
+typedef void tcf_chain_head_change_t(struct tcf_proto *, void *);
|
|
+
|
|
+struct tcf_idrinfo {
|
|
+ spinlock_t lock;
|
|
+ struct idr action_idr;
|
|
+ struct net *net;
|
|
+};
|
|
+
|
|
+struct tc_action_ops;
|
|
+
|
|
+struct tc_cookie;
|
|
+
|
|
+struct tc_action {
|
|
+ const struct tc_action_ops *ops;
|
|
+ __u32 type;
|
|
+ __u32 order;
|
|
+ struct tcf_idrinfo *idrinfo;
|
|
+ u32 tcfa_index;
|
|
+ refcount_t tcfa_refcnt;
|
|
+ atomic_t tcfa_bindcnt;
|
|
+ int tcfa_action;
|
|
+ struct tcf_t tcfa_tm;
|
|
+ struct gnet_stats_basic_packed tcfa_bstats;
|
|
+ struct gnet_stats_queue tcfa_qstats;
|
|
+ struct net_rate_estimator *tcfa_rate_est;
|
|
+ spinlock_t tcfa_lock;
|
|
+ struct gnet_stats_basic_cpu *cpu_bstats;
|
|
+ struct gnet_stats_queue *cpu_qstats;
|
|
+ struct tc_cookie *act_cookie;
|
|
+ struct tcf_chain *goto_chain;
|
|
+};
|
|
+
|
|
+struct tc_action_ops {
|
|
+ struct list_head head;
|
|
+ char kind[16];
|
|
+ __u32 type;
|
|
+ size_t size;
|
|
+ struct module *owner;
|
|
+ int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
|
|
+ int (*dump)(struct sk_buff *, struct tc_action *, int, int);
|
|
+ void (*cleanup)(struct tc_action *);
|
|
+ int (*lookup)(struct net *, struct tc_action **, u32, struct netlink_ext_ack *);
|
|
+ int (*init)(struct net *, struct nlattr *, struct nlattr *, struct tc_action **, int, int, bool, struct netlink_ext_ack *);
|
|
+ int (*walk)(struct net *, struct sk_buff *, struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *);
|
|
+ void (*stats_update)(struct tc_action *, u64, u32, u64);
|
|
+ size_t (*get_fill_size)(const struct tc_action *);
|
|
+ struct net_device * (*get_dev)(const struct tc_action *);
|
|
+ void (*put_dev)(struct net_device *);
|
|
+};
|
|
+
|
|
+struct tc_cookie {
|
|
+ u8 *data;
|
|
+ u32 len;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+enum tcf_block_binder_type {
|
|
+ TCF_BLOCK_BINDER_TYPE_UNSPEC = 0,
|
|
+ TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS = 1,
|
|
+ TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS = 2,
|
|
+};
|
|
+
|
|
+struct tcf_block_ext_info {
|
|
+ enum tcf_block_binder_type binder_type;
|
|
+ tcf_chain_head_change_t *chain_head_change;
|
|
+ void *chain_head_change_priv;
|
|
+ u32 block_index;
|
|
+};
|
|
+
|
|
+struct tcf_exts {
|
|
+ __u32 type;
|
|
+ int nr_actions;
|
|
+ struct tc_action **actions;
|
|
+ struct net *net;
|
|
+ int action;
|
|
+ int police;
|
|
+};
|
|
+
|
|
+enum tc_block_command {
|
|
+ TC_BLOCK_BIND = 0,
|
|
+ TC_BLOCK_UNBIND = 1,
|
|
+};
|
|
+
|
|
+struct tc_block_offload {
|
|
+ enum tc_block_command command;
|
|
+ enum tcf_block_binder_type binder_type;
|
|
+ struct tcf_block *block;
|
|
+ struct netlink_ext_ack *extack;
|
|
+};
|
|
+
|
|
+struct tcf_filter_chain_list_item {
|
|
+ struct list_head list;
|
|
+ tcf_chain_head_change_t *chain_head_change;
|
|
+ void *chain_head_change_priv;
|
|
+};
|
|
+
|
|
+struct tcf_net {
|
|
+ struct idr idr;
|
|
+};
|
|
+
|
|
+struct tcf_block_owner_item {
|
|
+ struct list_head list;
|
|
+ struct Qdisc *q;
|
|
+ enum tcf_block_binder_type binder_type;
|
|
+};
|
|
+
|
|
+struct tcf_block_cb {
|
|
+ struct list_head list;
|
|
+ tc_setup_cb_t *cb;
|
|
+ void *cb_ident;
|
|
+ void *cb_priv;
|
|
+ unsigned int refcnt;
|
|
+};
|
|
+
|
|
+struct tcf_chain_info {
|
|
+ struct tcf_proto **pprev;
|
|
+ struct tcf_proto *next;
|
|
+};
|
|
+
|
|
+struct tcf_dump_args {
|
|
+ struct tcf_walker w;
|
|
+ struct sk_buff *skb;
|
|
+ struct netlink_callback *cb;
|
|
+ struct tcf_block *block;
|
|
+ struct Qdisc *q;
|
|
+ u32 parent;
|
|
+};
|
|
+
|
|
+struct tcamsg {
|
|
+ unsigned char tca_family;
|
|
+ unsigned char tca__pad1;
|
|
+ short unsigned int tca__pad2;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_ROOT_UNSPEC = 0,
|
|
+ TCA_ROOT_TAB = 1,
|
|
+ TCA_ROOT_FLAGS = 2,
|
|
+ TCA_ROOT_COUNT = 3,
|
|
+ TCA_ROOT_TIME_DELTA = 4,
|
|
+ __TCA_ROOT_MAX = 5,
|
|
+};
|
|
+
|
|
+struct tc_action_net {
|
|
+ struct tcf_idrinfo *idrinfo;
|
|
+ const struct tc_action_ops *ops;
|
|
+};
|
|
+
|
|
+struct tcf_action_net {
|
|
+ struct rhashtable egdev_ht;
|
|
+};
|
|
+
|
|
+struct tcf_action_egdev_cb {
|
|
+ struct list_head list;
|
|
+ tc_setup_cb_t *cb;
|
|
+ void *cb_priv;
|
|
+};
|
|
+
|
|
+struct tcf_action_egdev {
|
|
+ struct rhash_head ht_node;
|
|
+ const struct net_device *dev;
|
|
+ unsigned int refcnt;
|
|
+ struct list_head cb_list;
|
|
+};
|
|
+
|
|
+struct tc_fifo_qopt {
|
|
+ __u32 limit;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_FQ_CODEL_UNSPEC = 0,
|
|
+ TCA_FQ_CODEL_TARGET = 1,
|
|
+ TCA_FQ_CODEL_LIMIT = 2,
|
|
+ TCA_FQ_CODEL_INTERVAL = 3,
|
|
+ TCA_FQ_CODEL_ECN = 4,
|
|
+ TCA_FQ_CODEL_FLOWS = 5,
|
|
+ TCA_FQ_CODEL_QUANTUM = 6,
|
|
+ TCA_FQ_CODEL_CE_THRESHOLD = 7,
|
|
+ TCA_FQ_CODEL_DROP_BATCH_SIZE = 8,
|
|
+ TCA_FQ_CODEL_MEMORY_LIMIT = 9,
|
|
+ __TCA_FQ_CODEL_MAX = 10,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_FQ_CODEL_XSTATS_QDISC = 0,
|
|
+ TCA_FQ_CODEL_XSTATS_CLASS = 1,
|
|
+};
|
|
+
|
|
+struct tc_fq_codel_qd_stats {
|
|
+ __u32 maxpacket;
|
|
+ __u32 drop_overlimit;
|
|
+ __u32 ecn_mark;
|
|
+ __u32 new_flow_count;
|
|
+ __u32 new_flows_len;
|
|
+ __u32 old_flows_len;
|
|
+ __u32 ce_mark;
|
|
+ __u32 memory_usage;
|
|
+ __u32 drop_overmemory;
|
|
+};
|
|
+
|
|
+struct tc_fq_codel_cl_stats {
|
|
+ __s32 deficit;
|
|
+ __u32 ldelay;
|
|
+ __u32 count;
|
|
+ __u32 lastcount;
|
|
+ __u32 dropping;
|
|
+ __s32 drop_next;
|
|
+};
|
|
+
|
|
+struct tc_fq_codel_xstats {
|
|
+ __u32 type;
|
|
+ union {
|
|
+ struct tc_fq_codel_qd_stats qdisc_stats;
|
|
+ struct tc_fq_codel_cl_stats class_stats;
|
|
+ };
|
|
+};
|
|
+
|
|
+enum {
|
|
+ INET_ECN_NOT_ECT = 0,
|
|
+ INET_ECN_ECT_1 = 1,
|
|
+ INET_ECN_ECT_0 = 2,
|
|
+ INET_ECN_CE = 3,
|
|
+ INET_ECN_MASK = 3,
|
|
+};
|
|
+
|
|
+typedef u32 codel_time_t;
|
|
+
|
|
+typedef s32 codel_tdiff_t;
|
|
+
|
|
+struct codel_params {
|
|
+ codel_time_t target;
|
|
+ codel_time_t ce_threshold;
|
|
+ codel_time_t interval;
|
|
+ u32 mtu;
|
|
+ bool ecn;
|
|
+};
|
|
+
|
|
+struct codel_vars {
|
|
+ u32 count;
|
|
+ u32 lastcount;
|
|
+ bool dropping;
|
|
+ u16 rec_inv_sqrt;
|
|
+ codel_time_t first_above_time;
|
|
+ codel_time_t drop_next;
|
|
+ codel_time_t ldelay;
|
|
+};
|
|
+
|
|
+struct codel_stats {
|
|
+ u32 maxpacket;
|
|
+ u32 drop_count;
|
|
+ u32 drop_len;
|
|
+ u32 ecn_mark;
|
|
+ u32 ce_mark;
|
|
+};
|
|
+
|
|
+typedef u32 (*codel_skb_len_t)(const struct sk_buff *);
|
|
+
|
|
+typedef codel_time_t (*codel_skb_time_t)(const struct sk_buff *);
|
|
+
|
|
+typedef void (*codel_skb_drop_t)(struct sk_buff *, void *);
|
|
+
|
|
+typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *, void *);
|
|
+
|
|
+struct codel_skb_cb {
|
|
+ codel_time_t enqueue_time;
|
|
+ unsigned int mem_usage;
|
|
+};
|
|
+
|
|
+struct fq_codel_flow {
|
|
+ struct sk_buff *head;
|
|
+ struct sk_buff *tail;
|
|
+ struct list_head flowchain;
|
|
+ int deficit;
|
|
+ u32 dropped;
|
|
+ struct codel_vars cvars;
|
|
+};
|
|
+
|
|
+struct fq_codel_sched_data {
|
|
+ struct tcf_proto *filter_list;
|
|
+ struct tcf_block *block;
|
|
+ struct fq_codel_flow *flows;
|
|
+ u32 *backlogs;
|
|
+ u32 flows_cnt;
|
|
+ u32 quantum;
|
|
+ u32 drop_batch_size;
|
|
+ u32 memory_limit;
|
|
+ struct codel_params cparams;
|
|
+ struct codel_stats cstats;
|
|
+ u32 memory_usage;
|
|
+ u32 drop_overmemory;
|
|
+ u32 drop_overlimit;
|
|
+ u32 new_flow_count;
|
|
+ struct list_head new_flows;
|
|
+ struct list_head old_flows;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_CGROUP_UNSPEC = 0,
|
|
+ TCA_CGROUP_ACT = 1,
|
|
+ TCA_CGROUP_POLICE = 2,
|
|
+ TCA_CGROUP_EMATCHES = 3,
|
|
+ __TCA_CGROUP_MAX = 4,
|
|
+};
|
|
+
|
|
+struct tcf_ematch_tree_hdr {
|
|
+ __u16 nmatches;
|
|
+ __u16 progid;
|
|
+};
|
|
+
|
|
+struct tcf_pkt_info {
|
|
+ unsigned char *ptr;
|
|
+ int nexthdr;
|
|
+};
|
|
+
|
|
+struct tcf_ematch_ops;
|
|
+
|
|
+struct tcf_ematch {
|
|
+ struct tcf_ematch_ops *ops;
|
|
+ long unsigned int data;
|
|
+ unsigned int datalen;
|
|
+ u16 matchid;
|
|
+ u16 flags;
|
|
+ struct net *net;
|
|
+};
|
|
+
|
|
+struct tcf_ematch_ops {
|
|
+ int kind;
|
|
+ int datalen;
|
|
+ int (*change)(struct net *, void *, int, struct tcf_ematch *);
|
|
+ int (*match)(struct sk_buff *, struct tcf_ematch *, struct tcf_pkt_info *);
|
|
+ void (*destroy)(struct tcf_ematch *);
|
|
+ int (*dump)(struct sk_buff *, struct tcf_ematch *);
|
|
+ struct module *owner;
|
|
+ struct list_head link;
|
|
+};
|
|
+
|
|
+struct tcf_ematch_tree {
|
|
+ struct tcf_ematch_tree_hdr hdr;
|
|
+ struct tcf_ematch *matches;
|
|
+};
|
|
+
|
|
+struct cls_cgroup_head {
|
|
+ u32 handle;
|
|
+ struct tcf_exts exts;
|
|
+ struct tcf_ematch_tree ematches;
|
|
+ struct tcf_proto *tp;
|
|
+ struct rcu_work rwork;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCA_EMATCH_TREE_UNSPEC = 0,
|
|
+ TCA_EMATCH_TREE_HDR = 1,
|
|
+ TCA_EMATCH_TREE_LIST = 2,
|
|
+ __TCA_EMATCH_TREE_MAX = 3,
|
|
+};
|
|
+
|
|
+struct tcf_ematch_hdr {
|
|
+ __u16 matchid;
|
|
+ __u16 kind;
|
|
+ __u16 flags;
|
|
+ __u16 pad;
|
|
+};
|
|
+
|
|
+struct sockaddr_nl {
|
|
+ __kernel_sa_family_t nl_family;
|
|
+ short unsigned int nl_pad;
|
|
+ __u32 nl_pid;
|
|
+ __u32 nl_groups;
|
|
+};
|
|
+
|
|
+struct nlmsgerr {
|
|
+ int error;
|
|
+ struct nlmsghdr msg;
|
|
+};
|
|
+
|
|
+enum nlmsgerr_attrs {
|
|
+ NLMSGERR_ATTR_UNUSED = 0,
|
|
+ NLMSGERR_ATTR_MSG = 1,
|
|
+ NLMSGERR_ATTR_OFFS = 2,
|
|
+ NLMSGERR_ATTR_COOKIE = 3,
|
|
+ __NLMSGERR_ATTR_MAX = 4,
|
|
+ NLMSGERR_ATTR_MAX = 3,
|
|
+};
|
|
+
|
|
+struct nl_pktinfo {
|
|
+ __u32 group;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NETLINK_UNCONNECTED = 0,
|
|
+ NETLINK_CONNECTED = 1,
|
|
+};
|
|
+
|
|
+enum netlink_skb_flags {
|
|
+ NETLINK_SKB_DST = 8,
|
|
+};
|
|
+
|
|
+struct netlink_notify {
|
|
+ struct net *net;
|
|
+ u32 portid;
|
|
+ int protocol;
|
|
+};
|
|
+
|
|
+struct netlink_tap {
|
|
+ struct net_device *dev;
|
|
+ struct module *module;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct netlink_sock {
|
|
+ struct sock sk;
|
|
+ u32 portid;
|
|
+ u32 dst_portid;
|
|
+ u32 dst_group;
|
|
+ u32 flags;
|
|
+ u32 subscriptions;
|
|
+ u32 ngroups;
|
|
+ long unsigned int *groups;
|
|
+ long unsigned int state;
|
|
+ size_t max_recvmsg_len;
|
|
+ wait_queue_head_t wait;
|
|
+ bool bound;
|
|
+ bool cb_running;
|
|
+ int dump_done_errno;
|
|
+ struct netlink_callback cb;
|
|
+ struct mutex *cb_mutex;
|
|
+ struct mutex cb_def_mutex;
|
|
+ void (*netlink_rcv)(struct sk_buff *);
|
|
+ int (*netlink_bind)(struct net *, int);
|
|
+ void (*netlink_unbind)(struct net *, int);
|
|
+ struct module *module;
|
|
+ struct rhash_head node;
|
|
+ struct callback_head rcu;
|
|
+ struct work_struct work;
|
|
+};
|
|
+
|
|
+struct listeners;
|
|
+
|
|
+struct netlink_table {
|
|
+ struct rhashtable hash;
|
|
+ struct hlist_head mc_list;
|
|
+ struct listeners *listeners;
|
|
+ unsigned int flags;
|
|
+ unsigned int groups;
|
|
+ struct mutex *cb_mutex;
|
|
+ struct module *module;
|
|
+ int (*bind)(struct net *, int);
|
|
+ void (*unbind)(struct net *, int);
|
|
+ bool (*compare)(struct net *, struct sock *);
|
|
+ int registered;
|
|
+};
|
|
+
|
|
+struct listeners {
|
|
+ struct callback_head rcu;
|
|
+ long unsigned int masks[0];
|
|
+};
|
|
+
|
|
+struct netlink_tap_net {
|
|
+ struct list_head netlink_tap_all;
|
|
+ struct mutex netlink_tap_lock;
|
|
+};
|
|
+
|
|
+struct netlink_compare_arg {
|
|
+ possible_net_t pnet;
|
|
+ u32 portid;
|
|
+};
|
|
+
|
|
+struct netlink_broadcast_data {
|
|
+ struct sock *exclude_sk;
|
|
+ struct net *net;
|
|
+ u32 portid;
|
|
+ u32 group;
|
|
+ int failure;
|
|
+ int delivery_failure;
|
|
+ int congested;
|
|
+ int delivered;
|
|
+ gfp_t allocation;
|
|
+ struct sk_buff *skb;
|
|
+ struct sk_buff *skb2;
|
|
+ int (*tx_filter)(struct sock *, struct sk_buff *, void *);
|
|
+ void *tx_data;
|
|
+};
|
|
+
|
|
+struct netlink_set_err_data {
|
|
+ struct sock *exclude_sk;
|
|
+ u32 portid;
|
|
+ u32 group;
|
|
+ int code;
|
|
+};
|
|
+
|
|
+struct nl_seq_iter {
|
|
+ struct seq_net_private p;
|
|
+ struct rhashtable_iter hti;
|
|
+ int link;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CTRL_CMD_UNSPEC = 0,
|
|
+ CTRL_CMD_NEWFAMILY = 1,
|
|
+ CTRL_CMD_DELFAMILY = 2,
|
|
+ CTRL_CMD_GETFAMILY = 3,
|
|
+ CTRL_CMD_NEWOPS = 4,
|
|
+ CTRL_CMD_DELOPS = 5,
|
|
+ CTRL_CMD_GETOPS = 6,
|
|
+ CTRL_CMD_NEWMCAST_GRP = 7,
|
|
+ CTRL_CMD_DELMCAST_GRP = 8,
|
|
+ CTRL_CMD_GETMCAST_GRP = 9,
|
|
+ __CTRL_CMD_MAX = 10,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CTRL_ATTR_UNSPEC = 0,
|
|
+ CTRL_ATTR_FAMILY_ID = 1,
|
|
+ CTRL_ATTR_FAMILY_NAME = 2,
|
|
+ CTRL_ATTR_VERSION = 3,
|
|
+ CTRL_ATTR_HDRSIZE = 4,
|
|
+ CTRL_ATTR_MAXATTR = 5,
|
|
+ CTRL_ATTR_OPS = 6,
|
|
+ CTRL_ATTR_MCAST_GROUPS = 7,
|
|
+ __CTRL_ATTR_MAX = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CTRL_ATTR_OP_UNSPEC = 0,
|
|
+ CTRL_ATTR_OP_ID = 1,
|
|
+ CTRL_ATTR_OP_FLAGS = 2,
|
|
+ __CTRL_ATTR_OP_MAX = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ CTRL_ATTR_MCAST_GRP_UNSPEC = 0,
|
|
+ CTRL_ATTR_MCAST_GRP_NAME = 1,
|
|
+ CTRL_ATTR_MCAST_GRP_ID = 2,
|
|
+ __CTRL_ATTR_MCAST_GRP_MAX = 3,
|
|
+};
|
|
+
|
|
+struct nf_hook_entries_rcu_head {
|
|
+ struct callback_head head;
|
|
+ void *allocation;
|
|
+};
|
|
+
|
|
+struct nf_loginfo {
|
|
+ u_int8_t type;
|
|
+ union {
|
|
+ struct {
|
|
+ u_int32_t copy_len;
|
|
+ u_int16_t group;
|
|
+ u_int16_t qthreshold;
|
|
+ u_int16_t flags;
|
|
+ } ulog;
|
|
+ struct {
|
|
+ u_int8_t level;
|
|
+ u_int8_t logflags;
|
|
+ } log;
|
|
+ } u;
|
|
+};
|
|
+
|
|
+struct nf_log_buf {
|
|
+ unsigned int count;
|
|
+ char buf[1020];
|
|
+};
|
|
+
|
|
+struct ip_rt_info {
|
|
+ __be32 daddr;
|
|
+ __be32 saddr;
|
|
+ u_int8_t tos;
|
|
+ u_int32_t mark;
|
|
+};
|
|
+
|
|
+struct ip6_rt_info {
|
|
+ struct in6_addr daddr;
|
|
+ struct in6_addr saddr;
|
|
+ u_int32_t mark;
|
|
+};
|
|
+
|
|
+struct nf_sockopt_ops {
|
|
+ struct list_head list;
|
|
+ u_int8_t pf;
|
|
+ int set_optmin;
|
|
+ int set_optmax;
|
|
+ int (*set)(struct sock *, int, void *, unsigned int);
|
|
+ int (*compat_set)(struct sock *, int, void *, unsigned int);
|
|
+ int get_optmin;
|
|
+ int get_optmax;
|
|
+ int (*get)(struct sock *, int, void *, int *);
|
|
+ int (*compat_get)(struct sock *, int, void *, int *);
|
|
+ struct module *owner;
|
|
+};
|
|
+
|
|
+struct xt_table_info;
|
|
+
|
|
+struct xt_table {
|
|
+ struct list_head list;
|
|
+ unsigned int valid_hooks;
|
|
+ struct xt_table_info *private;
|
|
+ struct module *me;
|
|
+ u_int8_t af;
|
|
+ int priority;
|
|
+ int (*table_init)(struct net *);
|
|
+ const char name[32];
|
|
+};
|
|
+
|
|
+struct xt_action_param;
|
|
+
|
|
+struct xt_mtchk_param;
|
|
+
|
|
+struct xt_mtdtor_param;
|
|
+
|
|
+struct xt_match {
|
|
+ struct list_head list;
|
|
+ const char name[29];
|
|
+ u_int8_t revision;
|
|
+ bool (*match)(const struct sk_buff *, struct xt_action_param *);
|
|
+ int (*checkentry)(const struct xt_mtchk_param *);
|
|
+ void (*destroy)(const struct xt_mtdtor_param *);
|
|
+ void (*compat_from_user)(void *, const void *);
|
|
+ int (*compat_to_user)(void *, const void *);
|
|
+ struct module *me;
|
|
+ const char *table;
|
|
+ unsigned int matchsize;
|
|
+ unsigned int usersize;
|
|
+ unsigned int compatsize;
|
|
+ unsigned int hooks;
|
|
+ short unsigned int proto;
|
|
+ short unsigned int family;
|
|
+};
|
|
+
|
|
+struct xt_entry_match {
|
|
+ union {
|
|
+ struct {
|
|
+ __u16 match_size;
|
|
+ char name[29];
|
|
+ __u8 revision;
|
|
+ } user;
|
|
+ struct {
|
|
+ __u16 match_size;
|
|
+ struct xt_match *match;
|
|
+ } kernel;
|
|
+ __u16 match_size;
|
|
+ } u;
|
|
+ unsigned char data[0];
|
|
+};
|
|
+
|
|
+struct xt_tgchk_param;
|
|
+
|
|
+struct xt_tgdtor_param;
|
|
+
|
|
+struct xt_target {
|
|
+ struct list_head list;
|
|
+ const char name[29];
|
|
+ u_int8_t revision;
|
|
+ unsigned int (*target)(struct sk_buff *, const struct xt_action_param *);
|
|
+ int (*checkentry)(const struct xt_tgchk_param *);
|
|
+ void (*destroy)(const struct xt_tgdtor_param *);
|
|
+ void (*compat_from_user)(void *, const void *);
|
|
+ int (*compat_to_user)(void *, const void *);
|
|
+ struct module *me;
|
|
+ const char *table;
|
|
+ unsigned int targetsize;
|
|
+ unsigned int usersize;
|
|
+ unsigned int compatsize;
|
|
+ unsigned int hooks;
|
|
+ short unsigned int proto;
|
|
+ short unsigned int family;
|
|
+};
|
|
+
|
|
+struct xt_entry_target {
|
|
+ union {
|
|
+ struct {
|
|
+ __u16 target_size;
|
|
+ char name[29];
|
|
+ __u8 revision;
|
|
+ } user;
|
|
+ struct {
|
|
+ __u16 target_size;
|
|
+ struct xt_target *target;
|
|
+ } kernel;
|
|
+ __u16 target_size;
|
|
+ } u;
|
|
+ unsigned char data[0];
|
|
+};
|
|
+
|
|
+struct xt_standard_target {
|
|
+ struct xt_entry_target target;
|
|
+ int verdict;
|
|
+};
|
|
+
|
|
+struct xt_error_target {
|
|
+ struct xt_entry_target target;
|
|
+ char errorname[30];
|
|
+};
|
|
+
|
|
+struct xt_counters {
|
|
+ __u64 pcnt;
|
|
+ __u64 bcnt;
|
|
+};
|
|
+
|
|
+struct xt_counters_info {
|
|
+ char name[32];
|
|
+ unsigned int num_counters;
|
|
+ struct xt_counters counters[0];
|
|
+};
|
|
+
|
|
+struct xt_action_param {
|
|
+ union {
|
|
+ const struct xt_match *match;
|
|
+ const struct xt_target *target;
|
|
+ };
|
|
+ union {
|
|
+ const void *matchinfo;
|
|
+ const void *targinfo;
|
|
+ };
|
|
+ const struct nf_hook_state *state;
|
|
+ int fragoff;
|
|
+ unsigned int thoff;
|
|
+ bool hotdrop;
|
|
+};
|
|
+
|
|
+struct xt_mtchk_param {
|
|
+ struct net *net;
|
|
+ const char *table;
|
|
+ const void *entryinfo;
|
|
+ const struct xt_match *match;
|
|
+ void *matchinfo;
|
|
+ unsigned int hook_mask;
|
|
+ u_int8_t family;
|
|
+ bool nft_compat;
|
|
+};
|
|
+
|
|
+struct xt_mtdtor_param {
|
|
+ struct net *net;
|
|
+ const struct xt_match *match;
|
|
+ void *matchinfo;
|
|
+ u_int8_t family;
|
|
+};
|
|
+
|
|
+struct xt_tgchk_param {
|
|
+ struct net *net;
|
|
+ const char *table;
|
|
+ const void *entryinfo;
|
|
+ const struct xt_target *target;
|
|
+ void *targinfo;
|
|
+ unsigned int hook_mask;
|
|
+ u_int8_t family;
|
|
+ bool nft_compat;
|
|
+};
|
|
+
|
|
+struct xt_tgdtor_param {
|
|
+ struct net *net;
|
|
+ const struct xt_target *target;
|
|
+ void *targinfo;
|
|
+ u_int8_t family;
|
|
+};
|
|
+
|
|
+struct xt_table_info {
|
|
+ unsigned int size;
|
|
+ unsigned int number;
|
|
+ unsigned int initial_entries;
|
|
+ unsigned int hook_entry[5];
|
|
+ unsigned int underflow[5];
|
|
+ unsigned int stacksize;
|
|
+ void ***jumpstack;
|
|
+ unsigned char entries[0];
|
|
+};
|
|
+
|
|
+struct xt_percpu_counter_alloc_state {
|
|
+ unsigned int off;
|
|
+ const char *mem;
|
|
+};
|
|
+
|
|
+struct compat_xt_entry_match {
|
|
+ union {
|
|
+ struct {
|
|
+ u_int16_t match_size;
|
|
+ char name[29];
|
|
+ u_int8_t revision;
|
|
+ } user;
|
|
+ struct {
|
|
+ u_int16_t match_size;
|
|
+ compat_uptr_t match;
|
|
+ } kernel;
|
|
+ u_int16_t match_size;
|
|
+ } u;
|
|
+ unsigned char data[0];
|
|
+};
|
|
+
|
|
+struct compat_xt_entry_target {
|
|
+ union {
|
|
+ struct {
|
|
+ u_int16_t target_size;
|
|
+ char name[29];
|
|
+ u_int8_t revision;
|
|
+ } user;
|
|
+ struct {
|
|
+ u_int16_t target_size;
|
|
+ compat_uptr_t target;
|
|
+ } kernel;
|
|
+ u_int16_t target_size;
|
|
+ } u;
|
|
+ unsigned char data[0];
|
|
+};
|
|
+
|
|
+struct compat_xt_counters {
|
|
+ compat_u64 pcnt;
|
|
+ compat_u64 bcnt;
|
|
+};
|
|
+
|
|
+struct compat_xt_counters_info {
|
|
+ char name[32];
|
|
+ compat_uint_t num_counters;
|
|
+ struct compat_xt_counters counters[0];
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct compat_delta {
|
|
+ unsigned int offset;
|
|
+ int delta;
|
|
+};
|
|
+
|
|
+struct xt_af {
|
|
+ struct mutex mutex;
|
|
+ struct list_head match;
|
|
+ struct list_head target;
|
|
+ struct mutex compat_mutex;
|
|
+ struct compat_delta *compat_tab;
|
|
+ unsigned int number;
|
|
+ unsigned int cur;
|
|
+};
|
|
+
|
|
+struct compat_xt_standard_target {
|
|
+ struct compat_xt_entry_target t;
|
|
+ compat_uint_t verdict;
|
|
+};
|
|
+
|
|
+struct compat_xt_error_target {
|
|
+ struct compat_xt_entry_target t;
|
|
+ char errorname[30];
|
|
+};
|
|
+
|
|
+struct nf_mttg_trav {
|
|
+ struct list_head *head;
|
|
+ struct list_head *curr;
|
|
+ uint8_t class;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MTTG_TRAV_INIT = 0,
|
|
+ MTTG_TRAV_NFP_UNSPEC = 1,
|
|
+ MTTG_TRAV_NFP_SPEC = 2,
|
|
+ MTTG_TRAV_DONE = 3,
|
|
+};
|
|
+
|
|
+struct xt_tcp {
|
|
+ __u16 spts[2];
|
|
+ __u16 dpts[2];
|
|
+ __u8 option;
|
|
+ __u8 flg_mask;
|
|
+ __u8 flg_cmp;
|
|
+ __u8 invflags;
|
|
+};
|
|
+
|
|
+struct xt_udp {
|
|
+ __u16 spts[2];
|
|
+ __u16 dpts[2];
|
|
+ __u8 invflags;
|
|
+};
|
|
+
|
|
+struct ip_mreqn {
|
|
+ struct in_addr imr_multiaddr;
|
|
+ struct in_addr imr_address;
|
|
+ int imr_ifindex;
|
|
+};
|
|
+
|
|
+struct rtmsg {
|
|
+ unsigned char rtm_family;
|
|
+ unsigned char rtm_dst_len;
|
|
+ unsigned char rtm_src_len;
|
|
+ unsigned char rtm_tos;
|
|
+ unsigned char rtm_table;
|
|
+ unsigned char rtm_protocol;
|
|
+ unsigned char rtm_scope;
|
|
+ unsigned char rtm_type;
|
|
+ unsigned int rtm_flags;
|
|
+};
|
|
+
|
|
+struct ip_sf_list;
|
|
+
|
|
+struct ip_mc_list {
|
|
+ struct in_device *interface;
|
|
+ __be32 multiaddr;
|
|
+ unsigned int sfmode;
|
|
+ struct ip_sf_list *sources;
|
|
+ struct ip_sf_list *tomb;
|
|
+ long unsigned int sfcount[2];
|
|
+ union {
|
|
+ struct ip_mc_list *next;
|
|
+ struct ip_mc_list *next_rcu;
|
|
+ };
|
|
+ struct ip_mc_list *next_hash;
|
|
+ struct timer_list timer;
|
|
+ int users;
|
|
+ refcount_t refcnt;
|
|
+ spinlock_t lock;
|
|
+ char tm_running;
|
|
+ char reporter;
|
|
+ char unsolicit_count;
|
|
+ char loaded;
|
|
+ unsigned char gsquery;
|
|
+ unsigned char crcount;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct ip_sf_socklist {
|
|
+ unsigned int sl_max;
|
|
+ unsigned int sl_count;
|
|
+ struct callback_head rcu;
|
|
+ __be32 sl_addr[0];
|
|
+};
|
|
+
|
|
+struct ip_mc_socklist {
|
|
+ struct ip_mc_socklist *next_rcu;
|
|
+ struct ip_mreqn multi;
|
|
+ unsigned int sfmode;
|
|
+ struct ip_sf_socklist *sflist;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct ip_sf_list {
|
|
+ struct ip_sf_list *sf_next;
|
|
+ __be32 sf_inaddr;
|
|
+ long unsigned int sf_count[2];
|
|
+ unsigned char sf_gsresp;
|
|
+ unsigned char sf_oldin;
|
|
+ unsigned char sf_crcount;
|
|
+};
|
|
+
|
|
+struct ipv4_addr_key {
|
|
+ __be32 addr;
|
|
+ int vif;
|
|
+};
|
|
+
|
|
+struct inetpeer_addr {
|
|
+ union {
|
|
+ struct ipv4_addr_key a4;
|
|
+ struct in6_addr a6;
|
|
+ u32 key[4];
|
|
+ };
|
|
+ __u16 family;
|
|
+};
|
|
+
|
|
+struct inet_peer {
|
|
+ struct rb_node rb_node;
|
|
+ struct inetpeer_addr daddr;
|
|
+ u32 metrics[17];
|
|
+ u32 rate_tokens;
|
|
+ u32 n_redirects;
|
|
+ long unsigned int rate_last;
|
|
+ union {
|
|
+ struct {
|
|
+ atomic_t rid;
|
|
+ };
|
|
+ struct callback_head rcu;
|
|
+ };
|
|
+ __u32 dtime;
|
|
+ refcount_t refcnt;
|
|
+};
|
|
+
|
|
+struct uncached_list {
|
|
+ spinlock_t lock;
|
|
+ struct list_head head;
|
|
+};
|
|
+
|
|
+struct rt_cache_stat {
|
|
+ unsigned int in_slow_tot;
|
|
+ unsigned int in_slow_mc;
|
|
+ unsigned int in_no_route;
|
|
+ unsigned int in_brd;
|
|
+ unsigned int in_martian_dst;
|
|
+ unsigned int in_martian_src;
|
|
+ unsigned int out_slow_tot;
|
|
+ unsigned int out_slow_mc;
|
|
+};
|
|
+
|
|
+struct icmphdr {
|
|
+ __u8 type;
|
|
+ __u8 code;
|
|
+ __sum16 checksum;
|
|
+ union {
|
|
+ struct {
|
|
+ __be16 id;
|
|
+ __be16 sequence;
|
|
+ } echo;
|
|
+ __be32 gateway;
|
|
+ struct {
|
|
+ __be16 __unused;
|
|
+ __be16 mtu;
|
|
+ } frag;
|
|
+ __u8 reserved[4];
|
|
+ } un;
|
|
+};
|
|
+
|
|
+struct fib_prop {
|
|
+ int error;
|
|
+ u8 scope;
|
|
+};
|
|
+
|
|
+struct raw_hashinfo {
|
|
+ rwlock_t lock;
|
|
+ struct hlist_head ht[256];
|
|
+};
|
|
+
|
|
+enum ip_defrag_users {
|
|
+ IP_DEFRAG_LOCAL_DELIVER = 0,
|
|
+ IP_DEFRAG_CALL_RA_CHAIN = 1,
|
|
+ IP_DEFRAG_CONNTRACK_IN = 2,
|
|
+ __IP_DEFRAG_CONNTRACK_IN_END = 65537,
|
|
+ IP_DEFRAG_CONNTRACK_OUT = 65538,
|
|
+ __IP_DEFRAG_CONNTRACK_OUT_END = 131073,
|
|
+ IP_DEFRAG_CONNTRACK_BRIDGE_IN = 131074,
|
|
+ __IP_DEFRAG_CONNTRACK_BRIDGE_IN = 196609,
|
|
+ IP_DEFRAG_VS_IN = 196610,
|
|
+ IP_DEFRAG_VS_OUT = 196611,
|
|
+ IP_DEFRAG_VS_FWD = 196612,
|
|
+ IP_DEFRAG_AF_PACKET = 196613,
|
|
+ IP_DEFRAG_MACVLAN = 196614,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ INET_FRAG_FIRST_IN = 1,
|
|
+ INET_FRAG_LAST_IN = 2,
|
|
+ INET_FRAG_COMPLETE = 4,
|
|
+};
|
|
+
|
|
+struct ipq {
|
|
+ struct inet_frag_queue q;
|
|
+ u8 ecn;
|
|
+ u16 max_df_size;
|
|
+ int iif;
|
|
+ unsigned int rid;
|
|
+ struct inet_peer *peer;
|
|
+};
|
|
+
|
|
+struct ip_options_data {
|
|
+ struct ip_options_rcu opt;
|
|
+ char data[40];
|
|
+};
|
|
+
|
|
+struct ipcm_cookie {
|
|
+ struct sockcm_cookie sockc;
|
|
+ __be32 addr;
|
|
+ int oif;
|
|
+ struct ip_options_rcu *opt;
|
|
+ __u8 ttl;
|
|
+ __s16 tos;
|
|
+ char priority;
|
|
+ __u16 gso_size;
|
|
+};
|
|
+
|
|
+struct ip_reply_arg {
|
|
+ struct kvec iov[1];
|
|
+ int flags;
|
|
+ __wsum csum;
|
|
+ int csumoffset;
|
|
+ int bound_dev_if;
|
|
+ u8 tos;
|
|
+ kuid_t uid;
|
|
+};
|
|
+
|
|
+struct ip_mreq_source {
|
|
+ __be32 imr_multiaddr;
|
|
+ __be32 imr_interface;
|
|
+ __be32 imr_sourceaddr;
|
|
+};
|
|
+
|
|
+struct ip_msfilter {
|
|
+ __be32 imsf_multiaddr;
|
|
+ __be32 imsf_interface;
|
|
+ __u32 imsf_fmode;
|
|
+ __u32 imsf_numsrc;
|
|
+ __be32 imsf_slist[1];
|
|
+};
|
|
+
|
|
+struct in_pktinfo {
|
|
+ int ipi_ifindex;
|
|
+ struct in_addr ipi_spec_dst;
|
|
+ struct in_addr ipi_addr;
|
|
+};
|
|
+
|
|
+struct inet_timewait_sock {
|
|
+ struct sock_common __tw_common;
|
|
+ __u32 tw_mark;
|
|
+ volatile unsigned char tw_substate;
|
|
+ unsigned char tw_rcv_wscale;
|
|
+ __be16 tw_sport;
|
|
+ unsigned int tw_kill: 1;
|
|
+ unsigned int tw_transparent: 1;
|
|
+ unsigned int tw_flowlabel: 20;
|
|
+ unsigned int tw_pad: 2;
|
|
+ unsigned int tw_tos: 8;
|
|
+ struct timer_list tw_timer;
|
|
+ struct inet_bind_bucket *tw_tb;
|
|
+};
|
|
+
|
|
+struct tcpvegas_info {
|
|
+ __u32 tcpv_enabled;
|
|
+ __u32 tcpv_rttcnt;
|
|
+ __u32 tcpv_rtt;
|
|
+ __u32 tcpv_minrtt;
|
|
+};
|
|
+
|
|
+struct tcp_dctcp_info {
|
|
+ __u16 dctcp_enabled;
|
|
+ __u16 dctcp_ce_state;
|
|
+ __u32 dctcp_alpha;
|
|
+ __u32 dctcp_ab_ecn;
|
|
+ __u32 dctcp_ab_tot;
|
|
+};
|
|
+
|
|
+struct tcp_bbr_info {
|
|
+ __u32 bbr_bw_lo;
|
|
+ __u32 bbr_bw_hi;
|
|
+ __u32 bbr_min_rtt;
|
|
+ __u32 bbr_pacing_gain;
|
|
+ __u32 bbr_cwnd_gain;
|
|
+};
|
|
+
|
|
+union tcp_cc_info {
|
|
+ struct tcpvegas_info vegas;
|
|
+ struct tcp_dctcp_info dctcp;
|
|
+ struct tcp_bbr_info bbr;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ BPF_TCP_ESTABLISHED = 1,
|
|
+ BPF_TCP_SYN_SENT = 2,
|
|
+ BPF_TCP_SYN_RECV = 3,
|
|
+ BPF_TCP_FIN_WAIT1 = 4,
|
|
+ BPF_TCP_FIN_WAIT2 = 5,
|
|
+ BPF_TCP_TIME_WAIT = 6,
|
|
+ BPF_TCP_CLOSE = 7,
|
|
+ BPF_TCP_CLOSE_WAIT = 8,
|
|
+ BPF_TCP_LAST_ACK = 9,
|
|
+ BPF_TCP_LISTEN = 10,
|
|
+ BPF_TCP_CLOSING = 11,
|
|
+ BPF_TCP_NEW_SYN_RECV = 12,
|
|
+ BPF_TCP_MAX_STATES = 13,
|
|
+};
|
|
+
|
|
+enum inet_csk_ack_state_t {
|
|
+ ICSK_ACK_SCHED = 1,
|
|
+ ICSK_ACK_TIMER = 2,
|
|
+ ICSK_ACK_PUSHED = 4,
|
|
+ ICSK_ACK_PUSHED2 = 8,
|
|
+ ICSK_ACK_NOW = 16,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCP_FLAG_CWR = 32768,
|
|
+ TCP_FLAG_ECE = 16384,
|
|
+ TCP_FLAG_URG = 8192,
|
|
+ TCP_FLAG_ACK = 4096,
|
|
+ TCP_FLAG_PSH = 2048,
|
|
+ TCP_FLAG_RST = 1024,
|
|
+ TCP_FLAG_SYN = 512,
|
|
+ TCP_FLAG_FIN = 256,
|
|
+ TCP_RESERVED_BITS = 15,
|
|
+ TCP_DATA_OFFSET = 240,
|
|
+};
|
|
+
|
|
+struct tcp_repair_opt {
|
|
+ __u32 opt_code;
|
|
+ __u32 opt_val;
|
|
+};
|
|
+
|
|
+struct tcp_repair_window {
|
|
+ __u32 snd_wl1;
|
|
+ __u32 snd_wnd;
|
|
+ __u32 max_window;
|
|
+ __u32 rcv_wnd;
|
|
+ __u32 rcv_wup;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCP_NO_QUEUE = 0,
|
|
+ TCP_RECV_QUEUE = 1,
|
|
+ TCP_SEND_QUEUE = 2,
|
|
+ TCP_QUEUES_NR = 3,
|
|
+};
|
|
+
|
|
+struct tcp_info {
|
|
+ __u8 tcpi_state;
|
|
+ __u8 tcpi_ca_state;
|
|
+ __u8 tcpi_retransmits;
|
|
+ __u8 tcpi_probes;
|
|
+ __u8 tcpi_backoff;
|
|
+ __u8 tcpi_options;
|
|
+ __u8 tcpi_snd_wscale: 4;
|
|
+ __u8 tcpi_rcv_wscale: 4;
|
|
+ __u8 tcpi_delivery_rate_app_limited: 1;
|
|
+ __u32 tcpi_rto;
|
|
+ __u32 tcpi_ato;
|
|
+ __u32 tcpi_snd_mss;
|
|
+ __u32 tcpi_rcv_mss;
|
|
+ __u32 tcpi_unacked;
|
|
+ __u32 tcpi_sacked;
|
|
+ __u32 tcpi_lost;
|
|
+ __u32 tcpi_retrans;
|
|
+ __u32 tcpi_fackets;
|
|
+ __u32 tcpi_last_data_sent;
|
|
+ __u32 tcpi_last_ack_sent;
|
|
+ __u32 tcpi_last_data_recv;
|
|
+ __u32 tcpi_last_ack_recv;
|
|
+ __u32 tcpi_pmtu;
|
|
+ __u32 tcpi_rcv_ssthresh;
|
|
+ __u32 tcpi_rtt;
|
|
+ __u32 tcpi_rttvar;
|
|
+ __u32 tcpi_snd_ssthresh;
|
|
+ __u32 tcpi_snd_cwnd;
|
|
+ __u32 tcpi_advmss;
|
|
+ __u32 tcpi_reordering;
|
|
+ __u32 tcpi_rcv_rtt;
|
|
+ __u32 tcpi_rcv_space;
|
|
+ __u32 tcpi_total_retrans;
|
|
+ __u64 tcpi_pacing_rate;
|
|
+ __u64 tcpi_max_pacing_rate;
|
|
+ __u64 tcpi_bytes_acked;
|
|
+ __u64 tcpi_bytes_received;
|
|
+ __u32 tcpi_segs_out;
|
|
+ __u32 tcpi_segs_in;
|
|
+ __u32 tcpi_notsent_bytes;
|
|
+ __u32 tcpi_min_rtt;
|
|
+ __u32 tcpi_data_segs_in;
|
|
+ __u32 tcpi_data_segs_out;
|
|
+ __u64 tcpi_delivery_rate;
|
|
+ __u64 tcpi_busy_time;
|
|
+ __u64 tcpi_rwnd_limited;
|
|
+ __u64 tcpi_sndbuf_limited;
|
|
+ __u32 tcpi_delivered;
|
|
+ __u32 tcpi_delivered_ce;
|
|
+ __u64 tcpi_bytes_sent;
|
|
+ __u64 tcpi_bytes_retrans;
|
|
+ __u32 tcpi_dsack_dups;
|
|
+ __u32 tcpi_reord_seen;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCP_NLA_PAD = 0,
|
|
+ TCP_NLA_BUSY = 1,
|
|
+ TCP_NLA_RWND_LIMITED = 2,
|
|
+ TCP_NLA_SNDBUF_LIMITED = 3,
|
|
+ TCP_NLA_DATA_SEGS_OUT = 4,
|
|
+ TCP_NLA_TOTAL_RETRANS = 5,
|
|
+ TCP_NLA_PACING_RATE = 6,
|
|
+ TCP_NLA_DELIVERY_RATE = 7,
|
|
+ TCP_NLA_SND_CWND = 8,
|
|
+ TCP_NLA_REORDERING = 9,
|
|
+ TCP_NLA_MIN_RTT = 10,
|
|
+ TCP_NLA_RECUR_RETRANS = 11,
|
|
+ TCP_NLA_DELIVERY_RATE_APP_LMT = 12,
|
|
+ TCP_NLA_SNDQ_SIZE = 13,
|
|
+ TCP_NLA_CA_STATE = 14,
|
|
+ TCP_NLA_SND_SSTHRESH = 15,
|
|
+ TCP_NLA_DELIVERED = 16,
|
|
+ TCP_NLA_DELIVERED_CE = 17,
|
|
+ TCP_NLA_BYTES_SENT = 18,
|
|
+ TCP_NLA_BYTES_RETRANS = 19,
|
|
+ TCP_NLA_DSACK_DUPS = 20,
|
|
+ TCP_NLA_REORD_SEEN = 21,
|
|
+};
|
|
+
|
|
+struct tcp_zerocopy_receive {
|
|
+ __u64 address;
|
|
+ __u32 length;
|
|
+ __u32 recv_skip_hint;
|
|
+};
|
|
+
|
|
+struct tcp_md5sig_pool {
|
|
+ struct ahash_request *md5_req;
|
|
+ void *scratch;
|
|
+};
|
|
+
|
|
+enum tcp_chrono {
|
|
+ TCP_CHRONO_UNSPEC = 0,
|
|
+ TCP_CHRONO_BUSY = 1,
|
|
+ TCP_CHRONO_RWND_LIMITED = 2,
|
|
+ TCP_CHRONO_SNDBUF_LIMITED = 3,
|
|
+ __TCP_CHRONO_MAX = 4,
|
|
+};
|
|
+
|
|
+struct tcp_splice_state {
|
|
+ struct pipe_inode_info *pipe;
|
|
+ size_t len;
|
|
+ unsigned int flags;
|
|
+};
|
|
+
|
|
+struct tcp_sack_block_wire {
|
|
+ __be32 start_seq;
|
|
+ __be32 end_seq;
|
|
+};
|
|
+
|
|
+enum tcp_queue {
|
|
+ TCP_FRAG_IN_WRITE_QUEUE = 0,
|
|
+ TCP_FRAG_IN_RTX_QUEUE = 1,
|
|
+};
|
|
+
|
|
+enum tcp_ca_ack_event_flags {
|
|
+ CA_ACK_SLOWPATH = 1,
|
|
+ CA_ACK_WIN_UPDATE = 2,
|
|
+ CA_ACK_ECE = 4,
|
|
+};
|
|
+
|
|
+struct tcp_sacktag_state {
|
|
+ u32 reord;
|
|
+ u64 first_sackt;
|
|
+ u64 last_sackt;
|
|
+ struct rate_sample *rate;
|
|
+ int flag;
|
|
+ unsigned int mss_now;
|
|
+};
|
|
+
|
|
+enum pkt_hash_types {
|
|
+ PKT_HASH_TYPE_NONE = 0,
|
|
+ PKT_HASH_TYPE_L2 = 1,
|
|
+ PKT_HASH_TYPE_L3 = 2,
|
|
+ PKT_HASH_TYPE_L4 = 3,
|
|
+};
|
|
+
|
|
+enum tsq_flags {
|
|
+ TSQF_THROTTLED = 1,
|
|
+ TSQF_QUEUED = 2,
|
|
+ TCPF_TSQ_DEFERRED = 4,
|
|
+ TCPF_WRITE_TIMER_DEFERRED = 8,
|
|
+ TCPF_DELACK_TIMER_DEFERRED = 16,
|
|
+ TCPF_MTU_REDUCED_DEFERRED = 32,
|
|
+};
|
|
+
|
|
+struct tcp_out_options {
|
|
+ u16 options;
|
|
+ u16 mss;
|
|
+ u8 ws;
|
|
+ u8 num_sack_blocks;
|
|
+ u8 hash_size;
|
|
+ __u8 *hash_location;
|
|
+ __u32 tsval;
|
|
+ __u32 tsecr;
|
|
+ struct tcp_fastopen_cookie *fastopen_cookie;
|
|
+};
|
|
+
|
|
+struct tsq_tasklet {
|
|
+ struct tasklet_struct tasklet;
|
|
+ struct list_head head;
|
|
+};
|
|
+
|
|
+struct tcp_md5sig {
|
|
+ struct __kernel_sockaddr_storage tcpm_addr;
|
|
+ __u8 tcpm_flags;
|
|
+ __u8 tcpm_prefixlen;
|
|
+ __u16 tcpm_keylen;
|
|
+ __u32 __tcpm_pad;
|
|
+ __u8 tcpm_key[80];
|
|
+};
|
|
+
|
|
+struct tcp_timewait_sock {
|
|
+ struct inet_timewait_sock tw_sk;
|
|
+ u32 tw_rcv_wnd;
|
|
+ u32 tw_ts_offset;
|
|
+ u32 tw_ts_recent;
|
|
+ u32 tw_last_oow_ack_time;
|
|
+ int tw_ts_recent_stamp;
|
|
+ struct tcp_md5sig_key *tw_md5_key;
|
|
+};
|
|
+
|
|
+enum tcp_tw_status {
|
|
+ TCP_TW_SUCCESS = 0,
|
|
+ TCP_TW_RST = 1,
|
|
+ TCP_TW_ACK = 2,
|
|
+ TCP_TW_SYN = 3,
|
|
+};
|
|
+
|
|
+struct tcp4_pseudohdr {
|
|
+ __be32 saddr;
|
|
+ __be32 daddr;
|
|
+ __u8 pad;
|
|
+ __u8 protocol;
|
|
+ __be16 len;
|
|
+};
|
|
+
|
|
+enum tcp_seq_states {
|
|
+ TCP_SEQ_STATE_LISTENING = 0,
|
|
+ TCP_SEQ_STATE_ESTABLISHED = 1,
|
|
+};
|
|
+
|
|
+struct tcp_seq_afinfo {
|
|
+ sa_family_t family;
|
|
+};
|
|
+
|
|
+struct tcp_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ enum tcp_seq_states state;
|
|
+ struct sock *syn_wait_sk;
|
|
+ int bucket;
|
|
+ int offset;
|
|
+ int sbucket;
|
|
+ int num;
|
|
+ loff_t last_pos;
|
|
+};
|
|
+
|
|
+enum tcp_metric_index {
|
|
+ TCP_METRIC_RTT = 0,
|
|
+ TCP_METRIC_RTTVAR = 1,
|
|
+ TCP_METRIC_SSTHRESH = 2,
|
|
+ TCP_METRIC_CWND = 3,
|
|
+ TCP_METRIC_REORDERING = 4,
|
|
+ TCP_METRIC_RTT_US = 5,
|
|
+ TCP_METRIC_RTTVAR_US = 6,
|
|
+ __TCP_METRIC_MAX = 7,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCP_METRICS_ATTR_UNSPEC = 0,
|
|
+ TCP_METRICS_ATTR_ADDR_IPV4 = 1,
|
|
+ TCP_METRICS_ATTR_ADDR_IPV6 = 2,
|
|
+ TCP_METRICS_ATTR_AGE = 3,
|
|
+ TCP_METRICS_ATTR_TW_TSVAL = 4,
|
|
+ TCP_METRICS_ATTR_TW_TS_STAMP = 5,
|
|
+ TCP_METRICS_ATTR_VALS = 6,
|
|
+ TCP_METRICS_ATTR_FOPEN_MSS = 7,
|
|
+ TCP_METRICS_ATTR_FOPEN_SYN_DROPS = 8,
|
|
+ TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS = 9,
|
|
+ TCP_METRICS_ATTR_FOPEN_COOKIE = 10,
|
|
+ TCP_METRICS_ATTR_SADDR_IPV4 = 11,
|
|
+ TCP_METRICS_ATTR_SADDR_IPV6 = 12,
|
|
+ TCP_METRICS_ATTR_PAD = 13,
|
|
+ __TCP_METRICS_ATTR_MAX = 14,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TCP_METRICS_CMD_UNSPEC = 0,
|
|
+ TCP_METRICS_CMD_GET = 1,
|
|
+ TCP_METRICS_CMD_DEL = 2,
|
|
+ __TCP_METRICS_CMD_MAX = 3,
|
|
+};
|
|
+
|
|
+struct tcp_fastopen_metrics {
|
|
+ u16 mss;
|
|
+ u16 syn_loss: 10;
|
|
+ u16 try_exp: 2;
|
|
+ long unsigned int last_syn_loss;
|
|
+ struct tcp_fastopen_cookie cookie;
|
|
+};
|
|
+
|
|
+struct tcp_metrics_block {
|
|
+ struct tcp_metrics_block *tcpm_next;
|
|
+ possible_net_t tcpm_net;
|
|
+ struct inetpeer_addr tcpm_saddr;
|
|
+ struct inetpeer_addr tcpm_daddr;
|
|
+ long unsigned int tcpm_stamp;
|
|
+ u32 tcpm_lock;
|
|
+ u32 tcpm_vals[5];
|
|
+ struct tcp_fastopen_metrics tcpm_fastopen;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+struct tcpm_hash_bucket {
|
|
+ struct tcp_metrics_block *chain;
|
|
+};
|
|
+
|
|
+struct icmp_filter {
|
|
+ __u32 data;
|
|
+};
|
|
+
|
|
+struct raw_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ int bucket;
|
|
+};
|
|
+
|
|
+struct raw_sock {
|
|
+ struct inet_sock inet;
|
|
+ struct icmp_filter filter;
|
|
+ u32 ipmr_table;
|
|
+};
|
|
+
|
|
+struct raw_frag_vec {
|
|
+ struct msghdr *msg;
|
|
+ union {
|
|
+ struct icmphdr icmph;
|
|
+ char c[1];
|
|
+ } hdr;
|
|
+ int hlen;
|
|
+};
|
|
+
|
|
+struct udp_sock {
|
|
+ struct inet_sock inet;
|
|
+ int pending;
|
|
+ unsigned int corkflag;
|
|
+ __u8 encap_type;
|
|
+ unsigned char no_check6_tx: 1;
|
|
+ unsigned char no_check6_rx: 1;
|
|
+ __u16 len;
|
|
+ __u16 gso_size;
|
|
+ __u16 pcslen;
|
|
+ __u16 pcrlen;
|
|
+ __u8 pcflag;
|
|
+ __u8 unused[3];
|
|
+ int (*encap_rcv)(struct sock *, struct sk_buff *);
|
|
+ void (*encap_destroy)(struct sock *);
|
|
+ struct sk_buff * (*gro_receive)(struct sock *, struct list_head *, struct sk_buff *);
|
|
+ int (*gro_complete)(struct sock *, struct sk_buff *, int);
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct sk_buff_head reader_queue;
|
|
+ int forward_deficit;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct udp_skb_cb {
|
|
+ union {
|
|
+ struct inet_skb_parm h4;
|
|
+ struct inet6_skb_parm h6;
|
|
+ } header;
|
|
+ __u16 cscov;
|
|
+ __u8 partial_cov;
|
|
+};
|
|
+
|
|
+struct udp_dev_scratch {
|
|
+ u32 _tsize_state;
|
|
+ u16 len;
|
|
+ bool is_linear;
|
|
+ bool csum_unnecessary;
|
|
+};
|
|
+
|
|
+struct udp_seq_afinfo {
|
|
+ sa_family_t family;
|
|
+ struct udp_table *udp_table;
|
|
+};
|
|
+
|
|
+struct udp_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ int bucket;
|
|
+};
|
|
+
|
|
+struct inet_protosw {
|
|
+ struct list_head list;
|
|
+ short unsigned int type;
|
|
+ short unsigned int protocol;
|
|
+ struct proto *prot;
|
|
+ const struct proto_ops *ops;
|
|
+ unsigned char flags;
|
|
+};
|
|
+
|
|
+typedef struct sk_buff * (*gro_receive_sk_t)(struct sock *, struct list_head *, struct sk_buff *);
|
|
+
|
|
+typedef struct sock * (*udp_lookup_t)(struct sk_buff *, __be16, __be16);
|
|
+
|
|
+struct arpreq {
|
|
+ struct sockaddr arp_pa;
|
|
+ struct sockaddr arp_ha;
|
|
+ int arp_flags;
|
|
+ struct sockaddr arp_netmask;
|
|
+ char arp_dev[16];
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ char ax25_call[7];
|
|
+} ax25_address;
|
|
+
|
|
+enum {
|
|
+ AX25_VALUES_IPDEFMODE = 0,
|
|
+ AX25_VALUES_AXDEFMODE = 1,
|
|
+ AX25_VALUES_BACKOFF = 2,
|
|
+ AX25_VALUES_CONMODE = 3,
|
|
+ AX25_VALUES_WINDOW = 4,
|
|
+ AX25_VALUES_EWINDOW = 5,
|
|
+ AX25_VALUES_T1 = 6,
|
|
+ AX25_VALUES_T2 = 7,
|
|
+ AX25_VALUES_T3 = 8,
|
|
+ AX25_VALUES_IDLE = 9,
|
|
+ AX25_VALUES_N2 = 10,
|
|
+ AX25_VALUES_PACLEN = 11,
|
|
+ AX25_VALUES_PROTOCOL = 12,
|
|
+ AX25_VALUES_DS_TIMEOUT = 13,
|
|
+ AX25_MAX_VALUES = 14,
|
|
+};
|
|
+
|
|
+struct ax25_dev {
|
|
+ struct ax25_dev *next;
|
|
+ struct net_device *dev;
|
|
+ struct net_device *forward;
|
|
+ struct ctl_table_header *sysheader;
|
|
+ int values[14];
|
|
+};
|
|
+
|
|
+typedef struct ax25_dev ax25_dev;
|
|
+
|
|
+enum ip_conntrack_status {
|
|
+ IPS_EXPECTED_BIT = 0,
|
|
+ IPS_EXPECTED = 1,
|
|
+ IPS_SEEN_REPLY_BIT = 1,
|
|
+ IPS_SEEN_REPLY = 2,
|
|
+ IPS_ASSURED_BIT = 2,
|
|
+ IPS_ASSURED = 4,
|
|
+ IPS_CONFIRMED_BIT = 3,
|
|
+ IPS_CONFIRMED = 8,
|
|
+ IPS_SRC_NAT_BIT = 4,
|
|
+ IPS_SRC_NAT = 16,
|
|
+ IPS_DST_NAT_BIT = 5,
|
|
+ IPS_DST_NAT = 32,
|
|
+ IPS_NAT_MASK = 48,
|
|
+ IPS_SEQ_ADJUST_BIT = 6,
|
|
+ IPS_SEQ_ADJUST = 64,
|
|
+ IPS_SRC_NAT_DONE_BIT = 7,
|
|
+ IPS_SRC_NAT_DONE = 128,
|
|
+ IPS_DST_NAT_DONE_BIT = 8,
|
|
+ IPS_DST_NAT_DONE = 256,
|
|
+ IPS_NAT_DONE_MASK = 384,
|
|
+ IPS_DYING_BIT = 9,
|
|
+ IPS_DYING = 512,
|
|
+ IPS_FIXED_TIMEOUT_BIT = 10,
|
|
+ IPS_FIXED_TIMEOUT = 1024,
|
|
+ IPS_TEMPLATE_BIT = 11,
|
|
+ IPS_TEMPLATE = 2048,
|
|
+ IPS_UNTRACKED_BIT = 12,
|
|
+ IPS_UNTRACKED = 4096,
|
|
+ IPS_HELPER_BIT = 13,
|
|
+ IPS_HELPER = 8192,
|
|
+ IPS_OFFLOAD_BIT = 14,
|
|
+ IPS_OFFLOAD = 16384,
|
|
+ IPS_UNCHANGEABLE_MASK = 19449,
|
|
+ __IPS_MAX_BIT = 15,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ XFRM_LOOKUP_ICMP = 1,
|
|
+ XFRM_LOOKUP_QUEUE = 2,
|
|
+ XFRM_LOOKUP_KEEP_DST_REF = 4,
|
|
+};
|
|
+
|
|
+struct pingv6_ops {
|
|
+ int (*ipv6_recv_error)(struct sock *, struct msghdr *, int, int *);
|
|
+ void (*ip6_datagram_recv_common_ctl)(struct sock *, struct msghdr *, struct sk_buff *);
|
|
+ void (*ip6_datagram_recv_specific_ctl)(struct sock *, struct msghdr *, struct sk_buff *);
|
|
+ int (*icmpv6_err_convert)(u8, u8, int *);
|
|
+ void (*ipv6_icmp_error)(struct sock *, struct sk_buff *, int, __be16, u32, u8 *);
|
|
+ int (*ipv6_chk_addr)(struct net *, const struct in6_addr *, const struct net_device *, int);
|
|
+};
|
|
+
|
|
+struct icmp_bxm {
|
|
+ struct sk_buff *skb;
|
|
+ int offset;
|
|
+ int data_len;
|
|
+ struct {
|
|
+ struct icmphdr icmph;
|
|
+ __be32 times[3];
|
|
+ } data;
|
|
+ int head_len;
|
|
+ struct ip_options_data replyopts;
|
|
+};
|
|
+
|
|
+struct icmp_control {
|
|
+ bool (*handler)(struct sk_buff *);
|
|
+ short int error;
|
|
+};
|
|
+
|
|
+enum grep_conntrack {
|
|
+ GRE_CT_UNREPLIED = 0,
|
|
+ GRE_CT_REPLIED = 1,
|
|
+ GRE_CT_MAX = 2,
|
|
+};
|
|
+
|
|
+struct nf_conntrack_l4proto;
|
|
+
|
|
+struct nf_conntrack_l3proto;
|
|
+
|
|
+struct ifaddrmsg {
|
|
+ __u8 ifa_family;
|
|
+ __u8 ifa_prefixlen;
|
|
+ __u8 ifa_flags;
|
|
+ __u8 ifa_scope;
|
|
+ __u32 ifa_index;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFA_UNSPEC = 0,
|
|
+ IFA_ADDRESS = 1,
|
|
+ IFA_LOCAL = 2,
|
|
+ IFA_LABEL = 3,
|
|
+ IFA_BROADCAST = 4,
|
|
+ IFA_ANYCAST = 5,
|
|
+ IFA_CACHEINFO = 6,
|
|
+ IFA_MULTICAST = 7,
|
|
+ IFA_FLAGS = 8,
|
|
+ IFA_RT_PRIORITY = 9,
|
|
+ __IFA_MAX = 10,
|
|
+};
|
|
+
|
|
+struct ifa_cacheinfo {
|
|
+ __u32 ifa_prefered;
|
|
+ __u32 ifa_valid;
|
|
+ __u32 cstamp;
|
|
+ __u32 tstamp;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_INET_UNSPEC = 0,
|
|
+ IFLA_INET_CONF = 1,
|
|
+ __IFLA_INET_MAX = 2,
|
|
+};
|
|
+
|
|
+struct in_validator_info {
|
|
+ __be32 ivi_addr;
|
|
+ struct in_device *ivi_dev;
|
|
+ struct netlink_ext_ack *extack;
|
|
+};
|
|
+
|
|
+struct netconfmsg {
|
|
+ __u8 ncm_family;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NETCONFA_UNSPEC = 0,
|
|
+ NETCONFA_IFINDEX = 1,
|
|
+ NETCONFA_FORWARDING = 2,
|
|
+ NETCONFA_RP_FILTER = 3,
|
|
+ NETCONFA_MC_FORWARDING = 4,
|
|
+ NETCONFA_PROXY_NEIGH = 5,
|
|
+ NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN = 6,
|
|
+ NETCONFA_INPUT = 7,
|
|
+ NETCONFA_BC_FORWARDING = 8,
|
|
+ __NETCONFA_MAX = 9,
|
|
+};
|
|
+
|
|
+struct devinet_sysctl_table {
|
|
+ struct ctl_table_header *sysctl_header;
|
|
+ struct ctl_table devinet_vars[33];
|
|
+};
|
|
+
|
|
+struct igmphdr {
|
|
+ __u8 type;
|
|
+ __u8 code;
|
|
+ __sum16 csum;
|
|
+ __be32 group;
|
|
+};
|
|
+
|
|
+struct igmpv3_grec {
|
|
+ __u8 grec_type;
|
|
+ __u8 grec_auxwords;
|
|
+ __be16 grec_nsrcs;
|
|
+ __be32 grec_mca;
|
|
+ __be32 grec_src[0];
|
|
+};
|
|
+
|
|
+struct igmpv3_report {
|
|
+ __u8 type;
|
|
+ __u8 resv1;
|
|
+ __sum16 csum;
|
|
+ __be16 resv2;
|
|
+ __be16 ngrec;
|
|
+ struct igmpv3_grec grec[0];
|
|
+};
|
|
+
|
|
+struct igmpv3_query {
|
|
+ __u8 type;
|
|
+ __u8 code;
|
|
+ __sum16 csum;
|
|
+ __be32 group;
|
|
+ __u8 qrv: 3;
|
|
+ __u8 suppress: 1;
|
|
+ __u8 resv: 4;
|
|
+ __u8 qqic;
|
|
+ __be16 nsrcs;
|
|
+ __be32 srcs[0];
|
|
+};
|
|
+
|
|
+struct igmp_mc_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ struct net_device *dev;
|
|
+ struct in_device *in_dev;
|
|
+};
|
|
+
|
|
+struct igmp_mcf_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ struct net_device *dev;
|
|
+ struct in_device *idev;
|
|
+ struct ip_mc_list *im;
|
|
+};
|
|
+
|
|
+struct nl_info {
|
|
+ struct nlmsghdr *nlh;
|
|
+ struct net *nl_net;
|
|
+ u32 portid;
|
|
+ bool skip_notify;
|
|
+};
|
|
+
|
|
+struct fib_config {
|
|
+ u8 fc_dst_len;
|
|
+ u8 fc_tos;
|
|
+ u8 fc_protocol;
|
|
+ u8 fc_scope;
|
|
+ u8 fc_type;
|
|
+ u32 fc_table;
|
|
+ __be32 fc_dst;
|
|
+ __be32 fc_gw;
|
|
+ int fc_oif;
|
|
+ u32 fc_flags;
|
|
+ u32 fc_priority;
|
|
+ __be32 fc_prefsrc;
|
|
+ struct nlattr *fc_mx;
|
|
+ struct rtnexthop *fc_mp;
|
|
+ int fc_mx_len;
|
|
+ int fc_mp_len;
|
|
+ u32 fc_flow;
|
|
+ u32 fc_nlflags;
|
|
+ struct nl_info fc_nlinfo;
|
|
+ struct nlattr *fc_encap;
|
|
+ u16 fc_encap_type;
|
|
+};
|
|
+
|
|
+struct fib_result_nl {
|
|
+ __be32 fl_addr;
|
|
+ u32 fl_mark;
|
|
+ unsigned char fl_tos;
|
|
+ unsigned char fl_scope;
|
|
+ unsigned char tb_id_in;
|
|
+ unsigned char tb_id;
|
|
+ unsigned char prefixlen;
|
|
+ unsigned char nh_sel;
|
|
+ unsigned char type;
|
|
+ unsigned char scope;
|
|
+ int err;
|
|
+};
|
|
+
|
|
+struct fib_nh_notifier_info {
|
|
+ struct fib_notifier_info info;
|
|
+ struct fib_nh *fib_nh;
|
|
+};
|
|
+
|
|
+struct fib_alias {
|
|
+ struct hlist_node fa_list;
|
|
+ struct fib_info *fa_info;
|
|
+ u8 fa_tos;
|
|
+ u8 fa_type;
|
|
+ u8 fa_state;
|
|
+ u8 fa_slen;
|
|
+ u32 tb_id;
|
|
+ s16 fa_default;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct fib_entry_notifier_info {
|
|
+ struct fib_notifier_info info;
|
|
+ u32 dst;
|
|
+ int dst_len;
|
|
+ struct fib_info *fi;
|
|
+ u8 tos;
|
|
+ u8 type;
|
|
+ u32 tb_id;
|
|
+};
|
|
+
|
|
+typedef unsigned int t_key;
|
|
+
|
|
+struct key_vector {
|
|
+ t_key key;
|
|
+ unsigned char pos;
|
|
+ unsigned char bits;
|
|
+ unsigned char slen;
|
|
+ union {
|
|
+ struct hlist_head leaf;
|
|
+ struct key_vector *tnode[0];
|
|
+ };
|
|
+};
|
|
+
|
|
+struct tnode {
|
|
+ struct callback_head rcu;
|
|
+ t_key empty_children;
|
|
+ t_key full_children;
|
|
+ struct key_vector *parent;
|
|
+ struct key_vector kv[1];
|
|
+};
|
|
+
|
|
+struct trie_use_stats {
|
|
+ unsigned int gets;
|
|
+ unsigned int backtrack;
|
|
+ unsigned int semantic_match_passed;
|
|
+ unsigned int semantic_match_miss;
|
|
+ unsigned int null_node_hit;
|
|
+ unsigned int resize_node_skipped;
|
|
+};
|
|
+
|
|
+struct trie_stat {
|
|
+ unsigned int totdepth;
|
|
+ unsigned int maxdepth;
|
|
+ unsigned int tnodes;
|
|
+ unsigned int leaves;
|
|
+ unsigned int nullpointers;
|
|
+ unsigned int prefixes;
|
|
+ unsigned int nodesizes[32];
|
|
+};
|
|
+
|
|
+struct trie {
|
|
+ struct key_vector kv[1];
|
|
+ struct trie_use_stats *stats;
|
|
+};
|
|
+
|
|
+struct fib_trie_iter {
|
|
+ struct seq_net_private p;
|
|
+ struct fib_table *tb;
|
|
+ struct key_vector *tnode;
|
|
+ unsigned int index;
|
|
+ unsigned int depth;
|
|
+};
|
|
+
|
|
+struct fib_route_iter {
|
|
+ struct seq_net_private p;
|
|
+ struct fib_table *main_tb;
|
|
+ struct key_vector *tnode;
|
|
+ loff_t pos;
|
|
+ t_key key;
|
|
+};
|
|
+
|
|
+struct ipfrag_skb_cb {
|
|
+ union {
|
|
+ struct inet_skb_parm h4;
|
|
+ struct inet6_skb_parm h6;
|
|
+ };
|
|
+ struct sk_buff *next_frag;
|
|
+ int frag_run_len;
|
|
+};
|
|
+
|
|
+struct icmpv6_echo {
|
|
+ __be16 identifier;
|
|
+ __be16 sequence;
|
|
+};
|
|
+
|
|
+struct icmpv6_nd_advt {
|
|
+ __u32 reserved: 5;
|
|
+ __u32 override: 1;
|
|
+ __u32 solicited: 1;
|
|
+ __u32 router: 1;
|
|
+ __u32 reserved2: 24;
|
|
+};
|
|
+
|
|
+struct icmpv6_nd_ra {
|
|
+ __u8 hop_limit;
|
|
+ __u8 reserved: 3;
|
|
+ __u8 router_pref: 2;
|
|
+ __u8 home_agent: 1;
|
|
+ __u8 other: 1;
|
|
+ __u8 managed: 1;
|
|
+ __be16 rt_lifetime;
|
|
+};
|
|
+
|
|
+struct icmp6hdr {
|
|
+ __u8 icmp6_type;
|
|
+ __u8 icmp6_code;
|
|
+ __sum16 icmp6_cksum;
|
|
+ union {
|
|
+ __be32 un_data32[1];
|
|
+ __be16 un_data16[2];
|
|
+ __u8 un_data8[4];
|
|
+ struct icmpv6_echo u_echo;
|
|
+ struct icmpv6_nd_advt u_nd_advt;
|
|
+ struct icmpv6_nd_ra u_nd_ra;
|
|
+ } icmp6_dataun;
|
|
+};
|
|
+
|
|
+struct ping_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ int bucket;
|
|
+ sa_family_t family;
|
|
+};
|
|
+
|
|
+struct pingfakehdr {
|
|
+ struct icmphdr icmph;
|
|
+ struct msghdr *msg;
|
|
+ sa_family_t family;
|
|
+ __wsum wcheck;
|
|
+};
|
|
+
|
|
+struct ping_table {
|
|
+ struct hlist_nulls_head hash[64];
|
|
+ rwlock_t lock;
|
|
+};
|
|
+
|
|
+enum lwtunnel_ip_t {
|
|
+ LWTUNNEL_IP_UNSPEC = 0,
|
|
+ LWTUNNEL_IP_ID = 1,
|
|
+ LWTUNNEL_IP_DST = 2,
|
|
+ LWTUNNEL_IP_SRC = 3,
|
|
+ LWTUNNEL_IP_TTL = 4,
|
|
+ LWTUNNEL_IP_TOS = 5,
|
|
+ LWTUNNEL_IP_FLAGS = 6,
|
|
+ LWTUNNEL_IP_PAD = 7,
|
|
+ __LWTUNNEL_IP_MAX = 8,
|
|
+};
|
|
+
|
|
+enum lwtunnel_ip6_t {
|
|
+ LWTUNNEL_IP6_UNSPEC = 0,
|
|
+ LWTUNNEL_IP6_ID = 1,
|
|
+ LWTUNNEL_IP6_DST = 2,
|
|
+ LWTUNNEL_IP6_SRC = 3,
|
|
+ LWTUNNEL_IP6_HOPLIMIT = 4,
|
|
+ LWTUNNEL_IP6_TC = 5,
|
|
+ LWTUNNEL_IP6_FLAGS = 6,
|
|
+ LWTUNNEL_IP6_PAD = 7,
|
|
+ __LWTUNNEL_IP6_MAX = 8,
|
|
+};
|
|
+
|
|
+struct ip6_tnl_encap_ops {
|
|
+ size_t (*encap_hlen)(struct ip_tunnel_encap *);
|
|
+ int (*build_header)(struct sk_buff *, struct ip_tunnel_encap *, u8 *, struct flowi6 *);
|
|
+};
|
|
+
|
|
+struct snmp_mib {
|
|
+ const char *name;
|
|
+ int entry;
|
|
+};
|
|
+
|
|
+struct fib4_rule {
|
|
+ struct fib_rule common;
|
|
+ u8 dst_len;
|
|
+ u8 src_len;
|
|
+ u8 tos;
|
|
+ __be32 src;
|
|
+ __be32 srcmask;
|
|
+ __be32 dst;
|
|
+ __be32 dstmask;
|
|
+ u32 tclassid;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PIM_TYPE_HELLO = 0,
|
|
+ PIM_TYPE_REGISTER = 1,
|
|
+ PIM_TYPE_REGISTER_STOP = 2,
|
|
+ PIM_TYPE_JOIN_PRUNE = 3,
|
|
+ PIM_TYPE_BOOTSTRAP = 4,
|
|
+ PIM_TYPE_ASSERT = 5,
|
|
+ PIM_TYPE_GRAFT = 6,
|
|
+ PIM_TYPE_GRAFT_ACK = 7,
|
|
+ PIM_TYPE_CANDIDATE_RP_ADV = 8,
|
|
+};
|
|
+
|
|
+struct pimreghdr {
|
|
+ __u8 type;
|
|
+ __u8 reserved;
|
|
+ __be16 csum;
|
|
+ __be32 flags;
|
|
+};
|
|
+
|
|
+typedef short unsigned int vifi_t;
|
|
+
|
|
+struct vifctl {
|
|
+ vifi_t vifc_vifi;
|
|
+ unsigned char vifc_flags;
|
|
+ unsigned char vifc_threshold;
|
|
+ unsigned int vifc_rate_limit;
|
|
+ union {
|
|
+ struct in_addr vifc_lcl_addr;
|
|
+ int vifc_lcl_ifindex;
|
|
+ };
|
|
+ struct in_addr vifc_rmt_addr;
|
|
+};
|
|
+
|
|
+struct mfcctl {
|
|
+ struct in_addr mfcc_origin;
|
|
+ struct in_addr mfcc_mcastgrp;
|
|
+ vifi_t mfcc_parent;
|
|
+ unsigned char mfcc_ttls[32];
|
|
+ unsigned int mfcc_pkt_cnt;
|
|
+ unsigned int mfcc_byte_cnt;
|
|
+ unsigned int mfcc_wrong_if;
|
|
+ int mfcc_expire;
|
|
+};
|
|
+
|
|
+struct sioc_sg_req {
|
|
+ struct in_addr src;
|
|
+ struct in_addr grp;
|
|
+ long unsigned int pktcnt;
|
|
+ long unsigned int bytecnt;
|
|
+ long unsigned int wrong_if;
|
|
+};
|
|
+
|
|
+struct sioc_vif_req {
|
|
+ vifi_t vifi;
|
|
+ long unsigned int icount;
|
|
+ long unsigned int ocount;
|
|
+ long unsigned int ibytes;
|
|
+ long unsigned int obytes;
|
|
+};
|
|
+
|
|
+struct igmpmsg {
|
|
+ __u32 unused1;
|
|
+ __u32 unused2;
|
|
+ unsigned char im_msgtype;
|
|
+ unsigned char im_mbz;
|
|
+ unsigned char im_vif;
|
|
+ unsigned char unused3;
|
|
+ struct in_addr im_src;
|
|
+ struct in_addr im_dst;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IPMRA_TABLE_UNSPEC = 0,
|
|
+ IPMRA_TABLE_ID = 1,
|
|
+ IPMRA_TABLE_CACHE_RES_QUEUE_LEN = 2,
|
|
+ IPMRA_TABLE_MROUTE_REG_VIF_NUM = 3,
|
|
+ IPMRA_TABLE_MROUTE_DO_ASSERT = 4,
|
|
+ IPMRA_TABLE_MROUTE_DO_PIM = 5,
|
|
+ IPMRA_TABLE_VIFS = 6,
|
|
+ IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE = 7,
|
|
+ __IPMRA_TABLE_MAX = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IPMRA_VIF_UNSPEC = 0,
|
|
+ IPMRA_VIF = 1,
|
|
+ __IPMRA_VIF_MAX = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IPMRA_VIFA_UNSPEC = 0,
|
|
+ IPMRA_VIFA_IFINDEX = 1,
|
|
+ IPMRA_VIFA_VIF_ID = 2,
|
|
+ IPMRA_VIFA_FLAGS = 3,
|
|
+ IPMRA_VIFA_BYTES_IN = 4,
|
|
+ IPMRA_VIFA_BYTES_OUT = 5,
|
|
+ IPMRA_VIFA_PACKETS_IN = 6,
|
|
+ IPMRA_VIFA_PACKETS_OUT = 7,
|
|
+ IPMRA_VIFA_LOCAL_ADDR = 8,
|
|
+ IPMRA_VIFA_REMOTE_ADDR = 9,
|
|
+ IPMRA_VIFA_PAD = 10,
|
|
+ __IPMRA_VIFA_MAX = 11,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IPMRA_CREPORT_UNSPEC = 0,
|
|
+ IPMRA_CREPORT_MSGTYPE = 1,
|
|
+ IPMRA_CREPORT_VIF_ID = 2,
|
|
+ IPMRA_CREPORT_SRC_ADDR = 3,
|
|
+ IPMRA_CREPORT_DST_ADDR = 4,
|
|
+ IPMRA_CREPORT_PKT = 5,
|
|
+ __IPMRA_CREPORT_MAX = 6,
|
|
+};
|
|
+
|
|
+struct vif_device {
|
|
+ struct net_device *dev;
|
|
+ long unsigned int bytes_in;
|
|
+ long unsigned int bytes_out;
|
|
+ long unsigned int pkt_in;
|
|
+ long unsigned int pkt_out;
|
|
+ long unsigned int rate_limit;
|
|
+ unsigned char threshold;
|
|
+ short unsigned int flags;
|
|
+ int link;
|
|
+ struct netdev_phys_item_id dev_parent_id;
|
|
+ __be32 local;
|
|
+ __be32 remote;
|
|
+};
|
|
+
|
|
+struct vif_entry_notifier_info {
|
|
+ struct fib_notifier_info info;
|
|
+ struct net_device *dev;
|
|
+ short unsigned int vif_index;
|
|
+ short unsigned int vif_flags;
|
|
+ u32 tb_id;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ MFC_STATIC = 1,
|
|
+ MFC_OFFLOAD = 2,
|
|
+};
|
|
+
|
|
+struct mr_mfc {
|
|
+ struct rhlist_head mnode;
|
|
+ short unsigned int mfc_parent;
|
|
+ int mfc_flags;
|
|
+ union {
|
|
+ struct {
|
|
+ long unsigned int expires;
|
|
+ struct sk_buff_head unresolved;
|
|
+ } unres;
|
|
+ struct {
|
|
+ long unsigned int last_assert;
|
|
+ int minvif;
|
|
+ int maxvif;
|
|
+ long unsigned int bytes;
|
|
+ long unsigned int pkt;
|
|
+ long unsigned int wrong_if;
|
|
+ long unsigned int lastuse;
|
|
+ unsigned char ttls[32];
|
|
+ refcount_t refcount;
|
|
+ } res;
|
|
+ } mfc_un;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+ void (*free)(struct callback_head *);
|
|
+};
|
|
+
|
|
+struct mfc_entry_notifier_info {
|
|
+ struct fib_notifier_info info;
|
|
+ struct mr_mfc *mfc;
|
|
+ u32 tb_id;
|
|
+};
|
|
+
|
|
+struct mr_table_ops {
|
|
+ const struct rhashtable_params *rht_params;
|
|
+ void *cmparg_any;
|
|
+};
|
|
+
|
|
+struct mr_table {
|
|
+ struct list_head list;
|
|
+ possible_net_t net;
|
|
+ struct mr_table_ops ops;
|
|
+ u32 id;
|
|
+ struct sock *mroute_sk;
|
|
+ struct timer_list ipmr_expire_timer;
|
|
+ struct list_head mfc_unres_queue;
|
|
+ struct vif_device vif_table[32];
|
|
+ struct rhltable mfc_hash;
|
|
+ struct list_head mfc_cache_list;
|
|
+ int maxvif;
|
|
+ atomic_t cache_resolve_queue_len;
|
|
+ bool mroute_do_assert;
|
|
+ bool mroute_do_pim;
|
|
+ bool mroute_do_wrvifwhole;
|
|
+ int mroute_reg_vif_num;
|
|
+};
|
|
+
|
|
+struct mr_vif_iter {
|
|
+ struct seq_net_private p;
|
|
+ struct mr_table *mrt;
|
|
+ int ct;
|
|
+};
|
|
+
|
|
+struct mr_mfc_iter {
|
|
+ struct seq_net_private p;
|
|
+ struct mr_table *mrt;
|
|
+ struct list_head *cache;
|
|
+ spinlock_t *lock;
|
|
+};
|
|
+
|
|
+struct mfc_cache_cmp_arg {
|
|
+ __be32 mfc_mcastgrp;
|
|
+ __be32 mfc_origin;
|
|
+};
|
|
+
|
|
+struct mfc_cache {
|
|
+ struct mr_mfc _c;
|
|
+ union {
|
|
+ struct {
|
|
+ __be32 mfc_mcastgrp;
|
|
+ __be32 mfc_origin;
|
|
+ };
|
|
+ struct mfc_cache_cmp_arg cmparg;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct ip_tunnel_parm {
|
|
+ char name[16];
|
|
+ int link;
|
|
+ __be16 i_flags;
|
|
+ __be16 o_flags;
|
|
+ __be32 i_key;
|
|
+ __be32 o_key;
|
|
+ struct iphdr iph;
|
|
+};
|
|
+
|
|
+struct ipmr_result {
|
|
+ struct mr_table *mrt;
|
|
+};
|
|
+
|
|
+struct compat_sioc_sg_req {
|
|
+ struct in_addr src;
|
|
+ struct in_addr grp;
|
|
+ compat_ulong_t pktcnt;
|
|
+ compat_ulong_t bytecnt;
|
|
+ compat_ulong_t wrong_if;
|
|
+};
|
|
+
|
|
+struct compat_sioc_vif_req {
|
|
+ vifi_t vifi;
|
|
+ compat_ulong_t icount;
|
|
+ compat_ulong_t ocount;
|
|
+ compat_ulong_t ibytes;
|
|
+ compat_ulong_t obytes;
|
|
+};
|
|
+
|
|
+struct rta_mfc_stats {
|
|
+ __u64 mfcs_packets;
|
|
+ __u64 mfcs_bytes;
|
|
+ __u64 mfcs_wrong_if;
|
|
+};
|
|
+
|
|
+struct bictcp {
|
|
+ u32 cnt;
|
|
+ u32 last_max_cwnd;
|
|
+ u32 last_cwnd;
|
|
+ u32 last_time;
|
|
+ u32 bic_origin_point;
|
|
+ u32 bic_K;
|
|
+ u32 delay_min;
|
|
+ u32 epoch_start;
|
|
+ u32 ack_cnt;
|
|
+ u32 tcp_cwnd;
|
|
+ u16 unused;
|
|
+ u8 sample_cnt;
|
|
+ u8 found;
|
|
+ u32 round_start;
|
|
+ u32 end_seq;
|
|
+ u32 last_ack;
|
|
+ u32 curr_rtt;
|
|
+};
|
|
+
|
|
+struct netlbl_audit {
|
|
+ u32 secid;
|
|
+ kuid_t loginuid;
|
|
+ unsigned int sessionid;
|
|
+};
|
|
+
|
|
+struct cipso_v4_std_map_tbl {
|
|
+ struct {
|
|
+ u32 *cipso;
|
|
+ u32 *local;
|
|
+ u32 cipso_size;
|
|
+ u32 local_size;
|
|
+ } lvl;
|
|
+ struct {
|
|
+ u32 *cipso;
|
|
+ u32 *local;
|
|
+ u32 cipso_size;
|
|
+ u32 local_size;
|
|
+ } cat;
|
|
+};
|
|
+
|
|
+struct cipso_v4_doi {
|
|
+ u32 doi;
|
|
+ u32 type;
|
|
+ union {
|
|
+ struct cipso_v4_std_map_tbl *std;
|
|
+ } map;
|
|
+ u8 tags[5];
|
|
+ refcount_t refcount;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct cipso_v4_map_cache_bkt {
|
|
+ spinlock_t lock;
|
|
+ u32 size;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct cipso_v4_map_cache_entry {
|
|
+ u32 hash;
|
|
+ unsigned char *key;
|
|
+ size_t key_len;
|
|
+ struct netlbl_lsm_cache *lsm_data;
|
|
+ u32 activity;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct strp_msg {
|
|
+ int full_len;
|
|
+ int offset;
|
|
+};
|
|
+
|
|
+struct ZSTD_CStream_s___2;
|
|
+
|
|
+typedef struct ZSTD_CStream_s___2 ZSTD_CStream___2;
|
|
+
|
|
+struct ZSTD_DStream_s___2;
|
|
+
|
|
+typedef struct ZSTD_DStream_s___2 ZSTD_DStream___2;
|
|
+
|
|
+struct tcp_comp_context_tx {
|
|
+ ZSTD_CStream___2 *cstream;
|
|
+ void *cworkspace;
|
|
+ void *plaintext_data;
|
|
+ void *compressed_data;
|
|
+ struct scatterlist sg_data[17];
|
|
+ unsigned int sg_size;
|
|
+ int sg_num;
|
|
+ struct scatterlist *partially_send;
|
|
+ bool in_tcp_sendpages;
|
|
+};
|
|
+
|
|
+struct tcp_comp_context_rx {
|
|
+ ZSTD_DStream___2 *dstream;
|
|
+ void *dworkspace;
|
|
+ void *plaintext_data;
|
|
+ struct strparser strp;
|
|
+ void (*saved_data_ready)(struct sock *);
|
|
+ struct sk_buff *pkt;
|
|
+ struct sk_buff *dpkt;
|
|
+};
|
|
+
|
|
+struct tcp_comp_context {
|
|
+ struct callback_head rcu;
|
|
+ struct proto *sk_proto;
|
|
+ void (*sk_write_space)(struct sock *);
|
|
+ struct tcp_comp_context_tx tx;
|
|
+ struct tcp_comp_context_rx rx;
|
|
+ long unsigned int flags;
|
|
+};
|
|
+
|
|
+struct xfrm_policy_afinfo {
|
|
+ struct dst_ops *dst_ops;
|
|
+ struct dst_entry * (*dst_lookup)(struct net *, int, int, const xfrm_address_t *, const xfrm_address_t *, u32);
|
|
+ int (*get_saddr)(struct net *, int, xfrm_address_t *, xfrm_address_t *, u32);
|
|
+ void (*decode_session)(struct sk_buff *, struct flowi *, int);
|
|
+ int (*get_tos)(const struct flowi *);
|
|
+ int (*init_path)(struct xfrm_dst *, struct dst_entry *, int);
|
|
+ int (*fill_dst)(struct xfrm_dst *, struct net_device *, const struct flowi *);
|
|
+ struct dst_entry * (*blackhole_route)(struct net *, struct dst_entry *);
|
|
+};
|
|
+
|
|
+struct ip_tunnel;
|
|
+
|
|
+struct ip6_tnl;
|
|
+
|
|
+struct xfrm_tunnel_skb_cb {
|
|
+ union {
|
|
+ struct inet_skb_parm h4;
|
|
+ struct inet6_skb_parm h6;
|
|
+ } header;
|
|
+ union {
|
|
+ struct ip_tunnel *ip4;
|
|
+ struct ip6_tnl *ip6;
|
|
+ } tunnel;
|
|
+};
|
|
+
|
|
+struct xfrm_mode_skb_cb {
|
|
+ struct xfrm_tunnel_skb_cb header;
|
|
+ __be16 id;
|
|
+ __be16 frag_off;
|
|
+ u8 ihl;
|
|
+ u8 tos;
|
|
+ u8 ttl;
|
|
+ u8 protocol;
|
|
+ u8 optlen;
|
|
+ u8 flow_lbl[3];
|
|
+};
|
|
+
|
|
+struct xfrm_spi_skb_cb {
|
|
+ struct xfrm_tunnel_skb_cb header;
|
|
+ unsigned int daddroff;
|
|
+ unsigned int family;
|
|
+ __be32 seq;
|
|
+};
|
|
+
|
|
+struct xfrm_input_afinfo {
|
|
+ unsigned int family;
|
|
+ int (*callback)(struct sk_buff *, u8, int);
|
|
+};
|
|
+
|
|
+struct xfrm4_protocol {
|
|
+ int (*handler)(struct sk_buff *);
|
|
+ int (*input_handler)(struct sk_buff *, int, __be32, int);
|
|
+ int (*cb_handler)(struct sk_buff *, int);
|
|
+ int (*err_handler)(struct sk_buff *, u32);
|
|
+ struct xfrm4_protocol *next;
|
|
+ int priority;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ XFRM_STATE_VOID = 0,
|
|
+ XFRM_STATE_ACQ = 1,
|
|
+ XFRM_STATE_VALID = 2,
|
|
+ XFRM_STATE_ERROR = 3,
|
|
+ XFRM_STATE_EXPIRED = 4,
|
|
+ XFRM_STATE_DEAD = 5,
|
|
+};
|
|
+
|
|
+struct xfrm_if;
|
|
+
|
|
+struct xfrm_if_cb {
|
|
+ struct xfrm_if * (*decode_session)(struct sk_buff *, short unsigned int);
|
|
+};
|
|
+
|
|
+struct xfrm_if_parms {
|
|
+ int link;
|
|
+ u32 if_id;
|
|
+};
|
|
+
|
|
+struct xfrm_if {
|
|
+ struct xfrm_if *next;
|
|
+ struct net_device *dev;
|
|
+ struct net *net;
|
|
+ struct xfrm_if_parms p;
|
|
+ struct gro_cells gro_cells;
|
|
+};
|
|
+
|
|
+struct xfrm_policy_walk {
|
|
+ struct xfrm_policy_walk_entry walk;
|
|
+ u8 type;
|
|
+ u32 seq;
|
|
+};
|
|
+
|
|
+struct xfrm_kmaddress {
|
|
+ xfrm_address_t local;
|
|
+ xfrm_address_t remote;
|
|
+ u32 reserved;
|
|
+ u16 family;
|
|
+};
|
|
+
|
|
+struct xfrm_migrate {
|
|
+ xfrm_address_t old_daddr;
|
|
+ xfrm_address_t old_saddr;
|
|
+ xfrm_address_t new_daddr;
|
|
+ xfrm_address_t new_saddr;
|
|
+ u8 proto;
|
|
+ u8 mode;
|
|
+ u16 reserved;
|
|
+ u32 reqid;
|
|
+ u16 old_family;
|
|
+ u16 new_family;
|
|
+};
|
|
+
|
|
+struct xfrmk_spdinfo {
|
|
+ u32 incnt;
|
|
+ u32 outcnt;
|
|
+ u32 fwdcnt;
|
|
+ u32 inscnt;
|
|
+ u32 outscnt;
|
|
+ u32 fwdscnt;
|
|
+ u32 spdhcnt;
|
|
+ u32 spdhmcnt;
|
|
+};
|
|
+
|
|
+struct xfrm_flo {
|
|
+ struct dst_entry *dst_orig;
|
|
+ u8 flags;
|
|
+};
|
|
+
|
|
+enum xfrm_ae_ftype_t {
|
|
+ XFRM_AE_UNSPEC = 0,
|
|
+ XFRM_AE_RTHR = 1,
|
|
+ XFRM_AE_RVAL = 2,
|
|
+ XFRM_AE_LVAL = 4,
|
|
+ XFRM_AE_ETHR = 8,
|
|
+ XFRM_AE_CR = 16,
|
|
+ XFRM_AE_CE = 32,
|
|
+ XFRM_AE_CU = 64,
|
|
+ __XFRM_AE_MAX = 65,
|
|
+};
|
|
+
|
|
+enum xfrm_attr_type_t {
|
|
+ XFRMA_UNSPEC = 0,
|
|
+ XFRMA_ALG_AUTH = 1,
|
|
+ XFRMA_ALG_CRYPT = 2,
|
|
+ XFRMA_ALG_COMP = 3,
|
|
+ XFRMA_ENCAP = 4,
|
|
+ XFRMA_TMPL = 5,
|
|
+ XFRMA_SA = 6,
|
|
+ XFRMA_POLICY = 7,
|
|
+ XFRMA_SEC_CTX = 8,
|
|
+ XFRMA_LTIME_VAL = 9,
|
|
+ XFRMA_REPLAY_VAL = 10,
|
|
+ XFRMA_REPLAY_THRESH = 11,
|
|
+ XFRMA_ETIMER_THRESH = 12,
|
|
+ XFRMA_SRCADDR = 13,
|
|
+ XFRMA_COADDR = 14,
|
|
+ XFRMA_LASTUSED = 15,
|
|
+ XFRMA_POLICY_TYPE = 16,
|
|
+ XFRMA_MIGRATE = 17,
|
|
+ XFRMA_ALG_AEAD = 18,
|
|
+ XFRMA_KMADDRESS = 19,
|
|
+ XFRMA_ALG_AUTH_TRUNC = 20,
|
|
+ XFRMA_MARK = 21,
|
|
+ XFRMA_TFCPAD = 22,
|
|
+ XFRMA_REPLAY_ESN_VAL = 23,
|
|
+ XFRMA_SA_EXTRA_FLAGS = 24,
|
|
+ XFRMA_PROTO = 25,
|
|
+ XFRMA_ADDRESS_FILTER = 26,
|
|
+ XFRMA_PAD = 27,
|
|
+ XFRMA_OFFLOAD_DEV = 28,
|
|
+ XFRMA_SET_MARK = 29,
|
|
+ XFRMA_SET_MARK_MASK = 30,
|
|
+ XFRMA_IF_ID = 31,
|
|
+ __XFRMA_MAX = 32,
|
|
+};
|
|
+
|
|
+enum xfrm_nlgroups {
|
|
+ XFRMNLGRP_NONE = 0,
|
|
+ XFRMNLGRP_ACQUIRE = 1,
|
|
+ XFRMNLGRP_EXPIRE = 2,
|
|
+ XFRMNLGRP_SA = 3,
|
|
+ XFRMNLGRP_POLICY = 4,
|
|
+ XFRMNLGRP_AEVENTS = 5,
|
|
+ XFRMNLGRP_REPORT = 6,
|
|
+ XFRMNLGRP_MIGRATE = 7,
|
|
+ XFRMNLGRP_MAPPING = 8,
|
|
+ __XFRMNLGRP_MAX = 9,
|
|
+};
|
|
+
|
|
+struct km_event {
|
|
+ union {
|
|
+ u32 hard;
|
|
+ u32 proto;
|
|
+ u32 byid;
|
|
+ u32 aevent;
|
|
+ u32 type;
|
|
+ } data;
|
|
+ u32 seq;
|
|
+ u32 portid;
|
|
+ u32 event;
|
|
+ struct net *net;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ XFRM_MODE_FLAG_TUNNEL = 1,
|
|
+};
|
|
+
|
|
+struct xfrm_mgr {
|
|
+ struct list_head list;
|
|
+ int (*notify)(struct xfrm_state *, const struct km_event *);
|
|
+ int (*acquire)(struct xfrm_state *, struct xfrm_tmpl *, struct xfrm_policy *);
|
|
+ struct xfrm_policy * (*compile_policy)(struct sock *, int, u8 *, int, int *);
|
|
+ int (*new_mapping)(struct xfrm_state *, xfrm_address_t *, __be16);
|
|
+ int (*notify_policy)(struct xfrm_policy *, int, const struct km_event *);
|
|
+ int (*report)(struct net *, u8, struct xfrm_selector *, xfrm_address_t *);
|
|
+ int (*migrate)(const struct xfrm_selector *, u8, u8, const struct xfrm_migrate *, int, const struct xfrm_kmaddress *, const struct xfrm_encap_tmpl *);
|
|
+ bool (*is_alive)(const struct km_event *);
|
|
+};
|
|
+
|
|
+struct xfrmk_sadinfo {
|
|
+ u32 sadhcnt;
|
|
+ u32 sadhmcnt;
|
|
+ u32 sadcnt;
|
|
+};
|
|
+
|
|
+struct ip_tunnel_6rd_parm {
|
|
+ struct in6_addr prefix;
|
|
+ __be32 relay_prefix;
|
|
+ u16 prefixlen;
|
|
+ u16 relay_prefixlen;
|
|
+};
|
|
+
|
|
+struct ip_tunnel_prl_entry;
|
|
+
|
|
+struct ip_tunnel {
|
|
+ struct ip_tunnel *next;
|
|
+ struct hlist_node hash_node;
|
|
+ struct net_device *dev;
|
|
+ struct net *net;
|
|
+ long unsigned int err_time;
|
|
+ int err_count;
|
|
+ u32 i_seqno;
|
|
+ u32 o_seqno;
|
|
+ int tun_hlen;
|
|
+ u32 index;
|
|
+ u8 erspan_ver;
|
|
+ u8 dir;
|
|
+ u16 hwid;
|
|
+ struct dst_cache dst_cache;
|
|
+ struct ip_tunnel_parm parms;
|
|
+ int mlink;
|
|
+ int encap_hlen;
|
|
+ int hlen;
|
|
+ struct ip_tunnel_encap encap;
|
|
+ struct ip_tunnel_6rd_parm ip6rd;
|
|
+ struct ip_tunnel_prl_entry *prl;
|
|
+ unsigned int prl_count;
|
|
+ unsigned int ip_tnl_net_id;
|
|
+ struct gro_cells gro_cells;
|
|
+ __u32 fwmark;
|
|
+ bool collect_md;
|
|
+ bool ignore_df;
|
|
+};
|
|
+
|
|
+struct __ip6_tnl_parm {
|
|
+ char name[16];
|
|
+ int link;
|
|
+ __u8 proto;
|
|
+ __u8 encap_limit;
|
|
+ __u8 hop_limit;
|
|
+ bool collect_md;
|
|
+ __be32 flowinfo;
|
|
+ __u32 flags;
|
|
+ struct in6_addr laddr;
|
|
+ struct in6_addr raddr;
|
|
+ __be16 i_flags;
|
|
+ __be16 o_flags;
|
|
+ __be32 i_key;
|
|
+ __be32 o_key;
|
|
+ __u32 fwmark;
|
|
+ __u32 index;
|
|
+ __u8 erspan_ver;
|
|
+ __u8 dir;
|
|
+ __u16 hwid;
|
|
+};
|
|
+
|
|
+struct ip6_tnl {
|
|
+ struct ip6_tnl *next;
|
|
+ struct net_device *dev;
|
|
+ struct net *net;
|
|
+ struct __ip6_tnl_parm parms;
|
|
+ struct flowi fl;
|
|
+ struct dst_cache dst_cache;
|
|
+ struct gro_cells gro_cells;
|
|
+ int err_count;
|
|
+ long unsigned int err_time;
|
|
+ __u32 i_seqno;
|
|
+ __u32 o_seqno;
|
|
+ int hlen;
|
|
+ int tun_hlen;
|
|
+ int encap_hlen;
|
|
+ struct ip_tunnel_encap encap;
|
|
+ int mlink;
|
|
+};
|
|
+
|
|
+struct xfrm_skb_cb {
|
|
+ struct xfrm_tunnel_skb_cb header;
|
|
+ union {
|
|
+ struct {
|
|
+ __u32 low;
|
|
+ __u32 hi;
|
|
+ } output;
|
|
+ struct {
|
|
+ __be32 low;
|
|
+ __be32 hi;
|
|
+ } input;
|
|
+ } seq;
|
|
+};
|
|
+
|
|
+struct ip_tunnel_prl_entry {
|
|
+ struct ip_tunnel_prl_entry *next;
|
|
+ __be32 addr;
|
|
+ u16 flags;
|
|
+ struct callback_head callback_head;
|
|
+};
|
|
+
|
|
+struct xfrm_trans_tasklet {
|
|
+ struct tasklet_struct tasklet;
|
|
+ struct sk_buff_head queue;
|
|
+};
|
|
+
|
|
+struct xfrm_trans_cb {
|
|
+ union {
|
|
+ struct inet_skb_parm h4;
|
|
+ struct inet6_skb_parm h6;
|
|
+ } header;
|
|
+ int (*finish)(struct net *, struct sock *, struct sk_buff *);
|
|
+};
|
|
+
|
|
+struct xfrm_user_offload {
|
|
+ int ifindex;
|
|
+ __u8 flags;
|
|
+};
|
|
+
|
|
+struct sadb_alg {
|
|
+ __u8 sadb_alg_id;
|
|
+ __u8 sadb_alg_ivlen;
|
|
+ __u16 sadb_alg_minbits;
|
|
+ __u16 sadb_alg_maxbits;
|
|
+ __u16 sadb_alg_reserved;
|
|
+};
|
|
+
|
|
+struct xfrm_algo_aead_info {
|
|
+ char *geniv;
|
|
+ u16 icv_truncbits;
|
|
+};
|
|
+
|
|
+struct xfrm_algo_auth_info {
|
|
+ u16 icv_truncbits;
|
|
+ u16 icv_fullbits;
|
|
+};
|
|
+
|
|
+struct xfrm_algo_encr_info {
|
|
+ char *geniv;
|
|
+ u16 blockbits;
|
|
+ u16 defkeybits;
|
|
+};
|
|
+
|
|
+struct xfrm_algo_comp_info {
|
|
+ u16 threshold;
|
|
+};
|
|
+
|
|
+struct xfrm_algo_desc {
|
|
+ char *name;
|
|
+ char *compat;
|
|
+ u8 available: 1;
|
|
+ u8 pfkey_supported: 1;
|
|
+ union {
|
|
+ struct xfrm_algo_aead_info aead;
|
|
+ struct xfrm_algo_auth_info auth;
|
|
+ struct xfrm_algo_encr_info encr;
|
|
+ struct xfrm_algo_comp_info comp;
|
|
+ } uinfo;
|
|
+ struct sadb_alg desc;
|
|
+};
|
|
+
|
|
+struct xfrm_algo_list {
|
|
+ struct xfrm_algo_desc *algs;
|
|
+ int entries;
|
|
+ u32 type;
|
|
+ u32 mask;
|
|
+};
|
|
+
|
|
+struct xfrm_aead_name {
|
|
+ const char *name;
|
|
+ int icvbits;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ XFRM_SHARE_ANY = 0,
|
|
+ XFRM_SHARE_SESSION = 1,
|
|
+ XFRM_SHARE_USER = 2,
|
|
+ XFRM_SHARE_UNIQUE = 3,
|
|
+};
|
|
+
|
|
+struct xfrm_user_tmpl {
|
|
+ struct xfrm_id id;
|
|
+ __u16 family;
|
|
+ xfrm_address_t saddr;
|
|
+ __u32 reqid;
|
|
+ __u8 mode;
|
|
+ __u8 share;
|
|
+ __u8 optional;
|
|
+ __u32 aalgos;
|
|
+ __u32 ealgos;
|
|
+ __u32 calgos;
|
|
+};
|
|
+
|
|
+struct xfrm_userpolicy_type {
|
|
+ __u8 type;
|
|
+ __u16 reserved1;
|
|
+ __u8 reserved2;
|
|
+};
|
|
+
|
|
+enum xfrm_sadattr_type_t {
|
|
+ XFRMA_SAD_UNSPEC = 0,
|
|
+ XFRMA_SAD_CNT = 1,
|
|
+ XFRMA_SAD_HINFO = 2,
|
|
+ __XFRMA_SAD_MAX = 3,
|
|
+};
|
|
+
|
|
+struct xfrmu_sadhinfo {
|
|
+ __u32 sadhcnt;
|
|
+ __u32 sadhmcnt;
|
|
+};
|
|
+
|
|
+enum xfrm_spdattr_type_t {
|
|
+ XFRMA_SPD_UNSPEC = 0,
|
|
+ XFRMA_SPD_INFO = 1,
|
|
+ XFRMA_SPD_HINFO = 2,
|
|
+ XFRMA_SPD_IPV4_HTHRESH = 3,
|
|
+ XFRMA_SPD_IPV6_HTHRESH = 4,
|
|
+ __XFRMA_SPD_MAX = 5,
|
|
+};
|
|
+
|
|
+struct xfrmu_spdinfo {
|
|
+ __u32 incnt;
|
|
+ __u32 outcnt;
|
|
+ __u32 fwdcnt;
|
|
+ __u32 inscnt;
|
|
+ __u32 outscnt;
|
|
+ __u32 fwdscnt;
|
|
+};
|
|
+
|
|
+struct xfrmu_spdhinfo {
|
|
+ __u32 spdhcnt;
|
|
+ __u32 spdhmcnt;
|
|
+};
|
|
+
|
|
+struct xfrmu_spdhthresh {
|
|
+ __u8 lbits;
|
|
+ __u8 rbits;
|
|
+};
|
|
+
|
|
+struct xfrm_usersa_info {
|
|
+ struct xfrm_selector sel;
|
|
+ struct xfrm_id id;
|
|
+ xfrm_address_t saddr;
|
|
+ struct xfrm_lifetime_cfg lft;
|
|
+ struct xfrm_lifetime_cur curlft;
|
|
+ struct xfrm_stats stats;
|
|
+ __u32 seq;
|
|
+ __u32 reqid;
|
|
+ __u16 family;
|
|
+ __u8 mode;
|
|
+ __u8 replay_window;
|
|
+ __u8 flags;
|
|
+};
|
|
+
|
|
+struct xfrm_usersa_id {
|
|
+ xfrm_address_t daddr;
|
|
+ __be32 spi;
|
|
+ __u16 family;
|
|
+ __u8 proto;
|
|
+};
|
|
+
|
|
+struct xfrm_aevent_id {
|
|
+ struct xfrm_usersa_id sa_id;
|
|
+ xfrm_address_t saddr;
|
|
+ __u32 flags;
|
|
+ __u32 reqid;
|
|
+};
|
|
+
|
|
+struct xfrm_userspi_info {
|
|
+ struct xfrm_usersa_info info;
|
|
+ __u32 min;
|
|
+ __u32 max;
|
|
+};
|
|
+
|
|
+struct xfrm_userpolicy_info {
|
|
+ struct xfrm_selector sel;
|
|
+ struct xfrm_lifetime_cfg lft;
|
|
+ struct xfrm_lifetime_cur curlft;
|
|
+ __u32 priority;
|
|
+ __u32 index;
|
|
+ __u8 dir;
|
|
+ __u8 action;
|
|
+ __u8 flags;
|
|
+ __u8 share;
|
|
+};
|
|
+
|
|
+struct xfrm_userpolicy_id {
|
|
+ struct xfrm_selector sel;
|
|
+ __u32 index;
|
|
+ __u8 dir;
|
|
+};
|
|
+
|
|
+struct xfrm_user_acquire {
|
|
+ struct xfrm_id id;
|
|
+ xfrm_address_t saddr;
|
|
+ struct xfrm_selector sel;
|
|
+ struct xfrm_userpolicy_info policy;
|
|
+ __u32 aalgos;
|
|
+ __u32 ealgos;
|
|
+ __u32 calgos;
|
|
+ __u32 seq;
|
|
+};
|
|
+
|
|
+struct xfrm_user_expire {
|
|
+ struct xfrm_usersa_info state;
|
|
+ __u8 hard;
|
|
+};
|
|
+
|
|
+struct xfrm_user_polexpire {
|
|
+ struct xfrm_userpolicy_info pol;
|
|
+ __u8 hard;
|
|
+};
|
|
+
|
|
+struct xfrm_usersa_flush {
|
|
+ __u8 proto;
|
|
+};
|
|
+
|
|
+struct xfrm_user_report {
|
|
+ __u8 proto;
|
|
+ struct xfrm_selector sel;
|
|
+};
|
|
+
|
|
+struct xfrm_user_kmaddress {
|
|
+ xfrm_address_t local;
|
|
+ xfrm_address_t remote;
|
|
+ __u32 reserved;
|
|
+ __u16 family;
|
|
+};
|
|
+
|
|
+struct xfrm_user_migrate {
|
|
+ xfrm_address_t old_daddr;
|
|
+ xfrm_address_t old_saddr;
|
|
+ xfrm_address_t new_daddr;
|
|
+ xfrm_address_t new_saddr;
|
|
+ __u8 proto;
|
|
+ __u8 mode;
|
|
+ __u16 reserved;
|
|
+ __u32 reqid;
|
|
+ __u16 old_family;
|
|
+ __u16 new_family;
|
|
+};
|
|
+
|
|
+struct xfrm_user_mapping {
|
|
+ struct xfrm_usersa_id id;
|
|
+ __u32 reqid;
|
|
+ xfrm_address_t old_saddr;
|
|
+ xfrm_address_t new_saddr;
|
|
+ __be16 old_sport;
|
|
+ __be16 new_sport;
|
|
+};
|
|
+
|
|
+struct xfrm_dump_info {
|
|
+ struct sk_buff *in_skb;
|
|
+ struct sk_buff *out_skb;
|
|
+ u32 nlmsg_seq;
|
|
+ u16 nlmsg_flags;
|
|
+};
|
|
+
|
|
+struct xfrm_link {
|
|
+ int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
|
|
+ int (*start)(struct netlink_callback *);
|
|
+ int (*dump)(struct sk_buff *, struct netlink_callback *);
|
|
+ int (*done)(struct netlink_callback *);
|
|
+ const struct nla_policy *nla_pol;
|
|
+ int nla_max;
|
|
+};
|
|
+
|
|
+struct unix_stream_read_state {
|
|
+ int (*recv_actor)(struct sk_buff *, int, int, struct unix_stream_read_state *);
|
|
+ struct socket *socket;
|
|
+ struct msghdr *msg;
|
|
+ struct pipe_inode_info *pipe;
|
|
+ size_t size;
|
|
+ int flags;
|
|
+ unsigned int splice_flags;
|
|
+};
|
|
+
|
|
+struct ac6_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ struct net_device *dev;
|
|
+ struct inet6_dev *idev;
|
|
+};
|
|
+
|
|
+struct ipcm6_cookie {
|
|
+ struct sockcm_cookie sockc;
|
|
+ __s16 hlimit;
|
|
+ __s16 tclass;
|
|
+ __s8 dontfrag;
|
|
+ struct ipv6_txoptions *opt;
|
|
+ __u16 gso_size;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFLA_INET6_UNSPEC = 0,
|
|
+ IFLA_INET6_FLAGS = 1,
|
|
+ IFLA_INET6_CONF = 2,
|
|
+ IFLA_INET6_STATS = 3,
|
|
+ IFLA_INET6_MCAST = 4,
|
|
+ IFLA_INET6_CACHEINFO = 5,
|
|
+ IFLA_INET6_ICMP6STATS = 6,
|
|
+ IFLA_INET6_TOKEN = 7,
|
|
+ IFLA_INET6_ADDR_GEN_MODE = 8,
|
|
+ __IFLA_INET6_MAX = 9,
|
|
+};
|
|
+
|
|
+enum in6_addr_gen_mode {
|
|
+ IN6_ADDR_GEN_MODE_EUI64 = 0,
|
|
+ IN6_ADDR_GEN_MODE_NONE = 1,
|
|
+ IN6_ADDR_GEN_MODE_STABLE_PRIVACY = 2,
|
|
+ IN6_ADDR_GEN_MODE_RANDOM = 3,
|
|
+};
|
|
+
|
|
+struct ifla_cacheinfo {
|
|
+ __u32 max_reasm_len;
|
|
+ __u32 tstamp;
|
|
+ __u32 reachable_time;
|
|
+ __u32 retrans_time;
|
|
+};
|
|
+
|
|
+struct wpan_phy;
|
|
+
|
|
+struct wpan_dev_header_ops;
|
|
+
|
|
+struct wpan_dev {
|
|
+ struct wpan_phy *wpan_phy;
|
|
+ int iftype;
|
|
+ struct list_head list;
|
|
+ struct net_device *netdev;
|
|
+ const struct wpan_dev_header_ops *header_ops;
|
|
+ struct net_device *lowpan_dev;
|
|
+ u32 identifier;
|
|
+ __le16 pan_id;
|
|
+ __le16 short_addr;
|
|
+ __le64 extended_addr;
|
|
+ atomic_t bsn;
|
|
+ atomic_t dsn;
|
|
+ u8 min_be;
|
|
+ u8 max_be;
|
|
+ u8 csma_retries;
|
|
+ s8 frame_retries;
|
|
+ bool lbt;
|
|
+ bool promiscuous_mode;
|
|
+ bool ackreq;
|
|
+};
|
|
+
|
|
+struct prefixmsg {
|
|
+ unsigned char prefix_family;
|
|
+ unsigned char prefix_pad1;
|
|
+ short unsigned int prefix_pad2;
|
|
+ int prefix_ifindex;
|
|
+ unsigned char prefix_type;
|
|
+ unsigned char prefix_len;
|
|
+ unsigned char prefix_flags;
|
|
+ unsigned char prefix_pad3;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ PREFIX_UNSPEC = 0,
|
|
+ PREFIX_ADDRESS = 1,
|
|
+ PREFIX_CACHEINFO = 2,
|
|
+ __PREFIX_MAX = 3,
|
|
+};
|
|
+
|
|
+struct prefix_cacheinfo {
|
|
+ __u32 preferred_time;
|
|
+ __u32 valid_time;
|
|
+};
|
|
+
|
|
+struct in6_ifreq {
|
|
+ struct in6_addr ifr6_addr;
|
|
+ __u32 ifr6_prefixlen;
|
|
+ int ifr6_ifindex;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DEVCONF_FORWARDING = 0,
|
|
+ DEVCONF_HOPLIMIT = 1,
|
|
+ DEVCONF_MTU6 = 2,
|
|
+ DEVCONF_ACCEPT_RA = 3,
|
|
+ DEVCONF_ACCEPT_REDIRECTS = 4,
|
|
+ DEVCONF_AUTOCONF = 5,
|
|
+ DEVCONF_DAD_TRANSMITS = 6,
|
|
+ DEVCONF_RTR_SOLICITS = 7,
|
|
+ DEVCONF_RTR_SOLICIT_INTERVAL = 8,
|
|
+ DEVCONF_RTR_SOLICIT_DELAY = 9,
|
|
+ DEVCONF_USE_TEMPADDR = 10,
|
|
+ DEVCONF_TEMP_VALID_LFT = 11,
|
|
+ DEVCONF_TEMP_PREFERED_LFT = 12,
|
|
+ DEVCONF_REGEN_MAX_RETRY = 13,
|
|
+ DEVCONF_MAX_DESYNC_FACTOR = 14,
|
|
+ DEVCONF_MAX_ADDRESSES = 15,
|
|
+ DEVCONF_FORCE_MLD_VERSION = 16,
|
|
+ DEVCONF_ACCEPT_RA_DEFRTR = 17,
|
|
+ DEVCONF_ACCEPT_RA_PINFO = 18,
|
|
+ DEVCONF_ACCEPT_RA_RTR_PREF = 19,
|
|
+ DEVCONF_RTR_PROBE_INTERVAL = 20,
|
|
+ DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN = 21,
|
|
+ DEVCONF_PROXY_NDP = 22,
|
|
+ DEVCONF_OPTIMISTIC_DAD = 23,
|
|
+ DEVCONF_ACCEPT_SOURCE_ROUTE = 24,
|
|
+ DEVCONF_MC_FORWARDING = 25,
|
|
+ DEVCONF_DISABLE_IPV6 = 26,
|
|
+ DEVCONF_ACCEPT_DAD = 27,
|
|
+ DEVCONF_FORCE_TLLAO = 28,
|
|
+ DEVCONF_NDISC_NOTIFY = 29,
|
|
+ DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL = 30,
|
|
+ DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL = 31,
|
|
+ DEVCONF_SUPPRESS_FRAG_NDISC = 32,
|
|
+ DEVCONF_ACCEPT_RA_FROM_LOCAL = 33,
|
|
+ DEVCONF_USE_OPTIMISTIC = 34,
|
|
+ DEVCONF_ACCEPT_RA_MTU = 35,
|
|
+ DEVCONF_STABLE_SECRET = 36,
|
|
+ DEVCONF_USE_OIF_ADDRS_ONLY = 37,
|
|
+ DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT = 38,
|
|
+ DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN = 39,
|
|
+ DEVCONF_DROP_UNICAST_IN_L2_MULTICAST = 40,
|
|
+ DEVCONF_DROP_UNSOLICITED_NA = 41,
|
|
+ DEVCONF_KEEP_ADDR_ON_DOWN = 42,
|
|
+ DEVCONF_RTR_SOLICIT_MAX_INTERVAL = 43,
|
|
+ DEVCONF_SEG6_ENABLED = 44,
|
|
+ DEVCONF_SEG6_REQUIRE_HMAC = 45,
|
|
+ DEVCONF_ENHANCED_DAD = 46,
|
|
+ DEVCONF_ADDR_GEN_MODE = 47,
|
|
+ DEVCONF_DISABLE_POLICY = 48,
|
|
+ DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN = 49,
|
|
+ DEVCONF_NDISC_TCLASS = 50,
|
|
+ DEVCONF_MAX = 51,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ INET6_IFADDR_STATE_PREDAD = 0,
|
|
+ INET6_IFADDR_STATE_DAD = 1,
|
|
+ INET6_IFADDR_STATE_POSTDAD = 2,
|
|
+ INET6_IFADDR_STATE_ERRDAD = 3,
|
|
+ INET6_IFADDR_STATE_DEAD = 4,
|
|
+};
|
|
+
|
|
+enum nl802154_cca_modes {
|
|
+ __NL802154_CCA_INVALID = 0,
|
|
+ NL802154_CCA_ENERGY = 1,
|
|
+ NL802154_CCA_CARRIER = 2,
|
|
+ NL802154_CCA_ENERGY_CARRIER = 3,
|
|
+ NL802154_CCA_ALOHA = 4,
|
|
+ NL802154_CCA_UWB_SHR = 5,
|
|
+ NL802154_CCA_UWB_MULTIPLEXED = 6,
|
|
+ __NL802154_CCA_ATTR_AFTER_LAST = 7,
|
|
+ NL802154_CCA_ATTR_MAX = 6,
|
|
+};
|
|
+
|
|
+enum nl802154_cca_opts {
|
|
+ NL802154_CCA_OPT_ENERGY_CARRIER_AND = 0,
|
|
+ NL802154_CCA_OPT_ENERGY_CARRIER_OR = 1,
|
|
+ __NL802154_CCA_OPT_ATTR_AFTER_LAST = 2,
|
|
+ NL802154_CCA_OPT_ATTR_MAX = 1,
|
|
+};
|
|
+
|
|
+enum nl802154_supported_bool_states {
|
|
+ NL802154_SUPPORTED_BOOL_FALSE = 0,
|
|
+ NL802154_SUPPORTED_BOOL_TRUE = 1,
|
|
+ __NL802154_SUPPORTED_BOOL_INVALD = 2,
|
|
+ NL802154_SUPPORTED_BOOL_BOTH = 3,
|
|
+ __NL802154_SUPPORTED_BOOL_AFTER_LAST = 4,
|
|
+ NL802154_SUPPORTED_BOOL_MAX = 3,
|
|
+};
|
|
+
|
|
+struct wpan_phy_supported {
|
|
+ u32 channels[32];
|
|
+ u32 cca_modes;
|
|
+ u32 cca_opts;
|
|
+ u32 iftypes;
|
|
+ enum nl802154_supported_bool_states lbt;
|
|
+ u8 min_minbe;
|
|
+ u8 max_minbe;
|
|
+ u8 min_maxbe;
|
|
+ u8 max_maxbe;
|
|
+ u8 min_csma_backoffs;
|
|
+ u8 max_csma_backoffs;
|
|
+ s8 min_frame_retries;
|
|
+ s8 max_frame_retries;
|
|
+ size_t tx_powers_size;
|
|
+ size_t cca_ed_levels_size;
|
|
+ const s32 *tx_powers;
|
|
+ const s32 *cca_ed_levels;
|
|
+};
|
|
+
|
|
+struct wpan_phy_cca {
|
|
+ enum nl802154_cca_modes mode;
|
|
+ enum nl802154_cca_opts opt;
|
|
+};
|
|
+
|
|
+struct wpan_phy {
|
|
+ const void *privid;
|
|
+ u32 flags;
|
|
+ u8 current_channel;
|
|
+ u8 current_page;
|
|
+ struct wpan_phy_supported supported;
|
|
+ s32 transmit_power;
|
|
+ struct wpan_phy_cca cca;
|
|
+ __le64 perm_extended_addr;
|
|
+ s32 cca_ed_level;
|
|
+ u8 symbol_duration;
|
|
+ u16 lifs_period;
|
|
+ u16 sifs_period;
|
|
+ struct device dev;
|
|
+ possible_net_t _net;
|
|
+ char priv[0];
|
|
+};
|
|
+
|
|
+struct ieee802154_addr {
|
|
+ u8 mode;
|
|
+ __le16 pan_id;
|
|
+ union {
|
|
+ __le16 short_addr;
|
|
+ __le64 extended_addr;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct wpan_dev_header_ops {
|
|
+ int (*create)(struct sk_buff *, struct net_device *, const struct ieee802154_addr *, const struct ieee802154_addr *, unsigned int);
|
|
+};
|
|
+
|
|
+union fwnet_hwaddr {
|
|
+ u8 u[16];
|
|
+ struct {
|
|
+ __be64 uniq_id;
|
|
+ u8 max_rec;
|
|
+ u8 sspd;
|
|
+ __be16 fifo_hi;
|
|
+ __be32 fifo_lo;
|
|
+ } uc;
|
|
+};
|
|
+
|
|
+struct in6_validator_info {
|
|
+ struct in6_addr i6vi_addr;
|
|
+ struct inet6_dev *i6vi_dev;
|
|
+ struct netlink_ext_ack *extack;
|
|
+};
|
|
+
|
|
+struct ifa6_config {
|
|
+ const struct in6_addr *pfx;
|
|
+ unsigned int plen;
|
|
+ const struct in6_addr *peer_pfx;
|
|
+ u32 rt_priority;
|
|
+ u32 ifa_flags;
|
|
+ u32 preferred_lft;
|
|
+ u32 valid_lft;
|
|
+ u16 scope;
|
|
+};
|
|
+
|
|
+struct fib6_config {
|
|
+ u32 fc_table;
|
|
+ u32 fc_metric;
|
|
+ int fc_dst_len;
|
|
+ int fc_src_len;
|
|
+ int fc_ifindex;
|
|
+ u32 fc_flags;
|
|
+ u32 fc_protocol;
|
|
+ u16 fc_type;
|
|
+ u16 fc_delete_all_nh: 1;
|
|
+ u16 __unused: 15;
|
|
+ struct in6_addr fc_dst;
|
|
+ struct in6_addr fc_src;
|
|
+ struct in6_addr fc_prefsrc;
|
|
+ struct in6_addr fc_gateway;
|
|
+ long unsigned int fc_expires;
|
|
+ struct nlattr *fc_mx;
|
|
+ int fc_mx_len;
|
|
+ int fc_mp_len;
|
|
+ struct nlattr *fc_mp;
|
|
+ struct nl_info fc_nlinfo;
|
|
+ struct nlattr *fc_encap;
|
|
+ u16 fc_encap_type;
|
|
+};
|
|
+
|
|
+enum cleanup_prefix_rt_t {
|
|
+ CLEANUP_PREFIX_RT_NOP = 0,
|
|
+ CLEANUP_PREFIX_RT_DEL = 1,
|
|
+ CLEANUP_PREFIX_RT_EXPIRE = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IPV6_SADDR_RULE_INIT = 0,
|
|
+ IPV6_SADDR_RULE_LOCAL = 1,
|
|
+ IPV6_SADDR_RULE_SCOPE = 2,
|
|
+ IPV6_SADDR_RULE_PREFERRED = 3,
|
|
+ IPV6_SADDR_RULE_OIF = 4,
|
|
+ IPV6_SADDR_RULE_LABEL = 5,
|
|
+ IPV6_SADDR_RULE_PRIVACY = 6,
|
|
+ IPV6_SADDR_RULE_ORCHID = 7,
|
|
+ IPV6_SADDR_RULE_PREFIX = 8,
|
|
+ IPV6_SADDR_RULE_NOT_OPTIMISTIC = 9,
|
|
+ IPV6_SADDR_RULE_MAX = 10,
|
|
+};
|
|
+
|
|
+struct ipv6_saddr_score {
|
|
+ int rule;
|
|
+ int addr_type;
|
|
+ struct inet6_ifaddr *ifa;
|
|
+ long unsigned int scorebits[1];
|
|
+ int scopedist;
|
|
+ int matchlen;
|
|
+};
|
|
+
|
|
+struct ipv6_saddr_dst {
|
|
+ const struct in6_addr *addr;
|
|
+ int ifindex;
|
|
+ int scope;
|
|
+ int label;
|
|
+ unsigned int prefs;
|
|
+};
|
|
+
|
|
+struct if6_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ int bucket;
|
|
+ int offset;
|
|
+};
|
|
+
|
|
+enum addr_type_t {
|
|
+ UNICAST_ADDR = 0,
|
|
+ MULTICAST_ADDR = 1,
|
|
+ ANYCAST_ADDR = 2,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ DAD_PROCESS = 0,
|
|
+ DAD_BEGIN = 1,
|
|
+ DAD_ABORT = 2,
|
|
+};
|
|
+
|
|
+struct ifaddrlblmsg {
|
|
+ __u8 ifal_family;
|
|
+ __u8 __ifal_reserved;
|
|
+ __u8 ifal_prefixlen;
|
|
+ __u8 ifal_flags;
|
|
+ __u32 ifal_index;
|
|
+ __u32 ifal_seq;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IFAL_ADDRESS = 1,
|
|
+ IFAL_LABEL = 2,
|
|
+ __IFAL_MAX = 3,
|
|
+};
|
|
+
|
|
+struct ip6addrlbl_entry {
|
|
+ struct in6_addr prefix;
|
|
+ int prefixlen;
|
|
+ int ifindex;
|
|
+ int addrtype;
|
|
+ u32 label;
|
|
+ struct hlist_node list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct ip6addrlbl_init_table {
|
|
+ const struct in6_addr *prefix;
|
|
+ int prefixlen;
|
|
+ u32 label;
|
|
+};
|
|
+
|
|
+struct rd_msg {
|
|
+ struct icmp6hdr icmph;
|
|
+ struct in6_addr target;
|
|
+ struct in6_addr dest;
|
|
+ __u8 opt[0];
|
|
+};
|
|
+
|
|
+struct fib6_gc_args {
|
|
+ int timeout;
|
|
+ int more;
|
|
+};
|
|
+
|
|
+struct rt6_exception {
|
|
+ struct hlist_node hlist;
|
|
+ struct rt6_info *rt6i;
|
|
+ long unsigned int stamp;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct route_info {
|
|
+ __u8 type;
|
|
+ __u8 length;
|
|
+ __u8 prefix_len;
|
|
+ __u8 reserved_l: 3;
|
|
+ __u8 route_pref: 2;
|
|
+ __u8 reserved_h: 3;
|
|
+ __be32 lifetime;
|
|
+ __u8 prefix[0];
|
|
+};
|
|
+
|
|
+struct rt6_rtnl_dump_arg {
|
|
+ struct sk_buff *skb;
|
|
+ struct netlink_callback *cb;
|
|
+ struct net *net;
|
|
+};
|
|
+
|
|
+struct netevent_redirect {
|
|
+ struct dst_entry *old;
|
|
+ struct dst_entry *new;
|
|
+ struct neighbour *neigh;
|
|
+ const void *daddr;
|
|
+};
|
|
+
|
|
+struct trace_event_raw_fib6_table_lookup {
|
|
+ struct trace_entry ent;
|
|
+ u32 tb_id;
|
|
+ int err;
|
|
+ int oif;
|
|
+ int iif;
|
|
+ __u8 tos;
|
|
+ __u8 scope;
|
|
+ __u8 flags;
|
|
+ __u8 src[16];
|
|
+ __u8 dst[16];
|
|
+ u16 sport;
|
|
+ u16 dport;
|
|
+ u8 proto;
|
|
+ u8 rt_type;
|
|
+ u32 __data_loc_name;
|
|
+ __u8 gw[16];
|
|
+ char __data[0];
|
|
+};
|
|
+
|
|
+struct trace_event_data_offsets_fib6_table_lookup {
|
|
+ u32 name;
|
|
+};
|
|
+
|
|
+enum rt6_nud_state {
|
|
+ RT6_NUD_FAIL_HARD = -3,
|
|
+ RT6_NUD_FAIL_PROBE = -2,
|
|
+ RT6_NUD_FAIL_DO_RR = -1,
|
|
+ RT6_NUD_SUCCEED = 1,
|
|
+};
|
|
+
|
|
+struct __rt6_probe_work {
|
|
+ struct work_struct work;
|
|
+ struct in6_addr target;
|
|
+ struct net_device *dev;
|
|
+};
|
|
+
|
|
+struct ip6rd_flowi {
|
|
+ struct flowi6 fl6;
|
|
+ struct in6_addr gateway;
|
|
+};
|
|
+
|
|
+struct arg_dev_net_ip {
|
|
+ struct net_device *dev;
|
|
+ struct net *net;
|
|
+ struct in6_addr *addr;
|
|
+};
|
|
+
|
|
+struct arg_netdev_event {
|
|
+ const struct net_device *dev;
|
|
+ union {
|
|
+ unsigned int nh_flags;
|
|
+ long unsigned int event;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct rt6_mtu_change_arg {
|
|
+ struct net_device *dev;
|
|
+ unsigned int mtu;
|
|
+};
|
|
+
|
|
+struct rt6_nh {
|
|
+ struct fib6_info *fib6_info;
|
|
+ struct fib6_config r_cfg;
|
|
+ struct list_head next;
|
|
+};
|
|
+
|
|
+enum fib6_walk_state {
|
|
+ FWS_L = 0,
|
|
+ FWS_R = 1,
|
|
+ FWS_C = 2,
|
|
+ FWS_U = 3,
|
|
+};
|
|
+
|
|
+struct fib6_walker {
|
|
+ struct list_head lh;
|
|
+ struct fib6_node *root;
|
|
+ struct fib6_node *node;
|
|
+ struct fib6_info *leaf;
|
|
+ enum fib6_walk_state state;
|
|
+ unsigned int skip;
|
|
+ unsigned int count;
|
|
+ int (*func)(struct fib6_walker *);
|
|
+ void *args;
|
|
+};
|
|
+
|
|
+struct fib6_entry_notifier_info {
|
|
+ struct fib_notifier_info info;
|
|
+ struct fib6_info *rt;
|
|
+};
|
|
+
|
|
+struct ipv6_route_iter {
|
|
+ struct seq_net_private p;
|
|
+ struct fib6_walker w;
|
|
+ loff_t skip;
|
|
+ struct fib6_table *tbl;
|
|
+ int sernum;
|
|
+};
|
|
+
|
|
+struct fib6_cleaner {
|
|
+ struct fib6_walker w;
|
|
+ struct net *net;
|
|
+ int (*func)(struct fib6_info *, void *);
|
|
+ int sernum;
|
|
+ void *arg;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ FIB6_NO_SERNUM_CHANGE = 0,
|
|
+};
|
|
+
|
|
+struct fib6_dump_arg {
|
|
+ struct net *net;
|
|
+ struct notifier_block *nb;
|
|
+};
|
|
+
|
|
+struct lookup_args {
|
|
+ int offset;
|
|
+ const struct in6_addr *addr;
|
|
+};
|
|
+
|
|
+struct ipv6_mreq {
|
|
+ struct in6_addr ipv6mr_multiaddr;
|
|
+ int ipv6mr_ifindex;
|
|
+};
|
|
+
|
|
+struct in6_flowlabel_req {
|
|
+ struct in6_addr flr_dst;
|
|
+ __be32 flr_label;
|
|
+ __u8 flr_action;
|
|
+ __u8 flr_share;
|
|
+ __u16 flr_flags;
|
|
+ __u16 flr_expires;
|
|
+ __u16 flr_linger;
|
|
+ __u32 __flr_pad;
|
|
+};
|
|
+
|
|
+struct ip6_mtuinfo {
|
|
+ struct sockaddr_in6 ip6m_addr;
|
|
+ __u32 ip6m_mtu;
|
|
+};
|
|
+
|
|
+struct nduseroptmsg {
|
|
+ unsigned char nduseropt_family;
|
|
+ unsigned char nduseropt_pad1;
|
|
+ short unsigned int nduseropt_opts_len;
|
|
+ int nduseropt_ifindex;
|
|
+ __u8 nduseropt_icmp_type;
|
|
+ __u8 nduseropt_icmp_code;
|
|
+ short unsigned int nduseropt_pad2;
|
|
+ unsigned int nduseropt_pad3;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NDUSEROPT_UNSPEC = 0,
|
|
+ NDUSEROPT_SRCADDR = 1,
|
|
+ __NDUSEROPT_MAX = 2,
|
|
+};
|
|
+
|
|
+struct nd_msg {
|
|
+ struct icmp6hdr icmph;
|
|
+ struct in6_addr target;
|
|
+ __u8 opt[0];
|
|
+};
|
|
+
|
|
+struct rs_msg {
|
|
+ struct icmp6hdr icmph;
|
|
+ __u8 opt[0];
|
|
+};
|
|
+
|
|
+struct ra_msg {
|
|
+ struct icmp6hdr icmph;
|
|
+ __be32 reachable_time;
|
|
+ __be32 retrans_timer;
|
|
+};
|
|
+
|
|
+struct icmp6_filter {
|
|
+ __u32 data[8];
|
|
+};
|
|
+
|
|
+struct raw6_sock {
|
|
+ struct inet_sock inet;
|
|
+ __u32 checksum;
|
|
+ __u32 offset;
|
|
+ struct icmp6_filter filter;
|
|
+ __u32 ip6mr_table;
|
|
+ struct ipv6_pinfo inet6;
|
|
+};
|
|
+
|
|
+typedef int mh_filter_t(struct sock *, struct sk_buff *);
|
|
+
|
|
+struct raw6_frag_vec {
|
|
+ struct msghdr *msg;
|
|
+ int hlen;
|
|
+ char c[4];
|
|
+};
|
|
+
|
|
+struct ipv6_destopt_hao {
|
|
+ __u8 type;
|
|
+ __u8 length;
|
|
+ struct in6_addr addr;
|
|
+} __attribute__((packed));
|
|
+
|
|
+typedef void ip6_icmp_send_t(struct sk_buff *, u8, u8, __u32, const struct in6_addr *, const struct inet6_skb_parm *);
|
|
+
|
|
+struct icmpv6_msg {
|
|
+ struct sk_buff *skb;
|
|
+ int offset;
|
|
+ uint8_t type;
|
|
+};
|
|
+
|
|
+struct icmp6_err {
|
|
+ int err;
|
|
+ int fatal;
|
|
+};
|
|
+
|
|
+struct mld_msg {
|
|
+ struct icmp6hdr mld_hdr;
|
|
+ struct in6_addr mld_mca;
|
|
+};
|
|
+
|
|
+struct mld2_grec {
|
|
+ __u8 grec_type;
|
|
+ __u8 grec_auxwords;
|
|
+ __be16 grec_nsrcs;
|
|
+ struct in6_addr grec_mca;
|
|
+ struct in6_addr grec_src[0];
|
|
+};
|
|
+
|
|
+struct mld2_report {
|
|
+ struct icmp6hdr mld2r_hdr;
|
|
+ struct mld2_grec mld2r_grec[0];
|
|
+};
|
|
+
|
|
+struct mld2_query {
|
|
+ struct icmp6hdr mld2q_hdr;
|
|
+ struct in6_addr mld2q_mca;
|
|
+ __u8 mld2q_qrv: 3;
|
|
+ __u8 mld2q_suppress: 1;
|
|
+ __u8 mld2q_resv2: 4;
|
|
+ __u8 mld2q_qqic;
|
|
+ __be16 mld2q_nsrcs;
|
|
+ struct in6_addr mld2q_srcs[0];
|
|
+};
|
|
+
|
|
+struct igmp6_mc_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ struct net_device *dev;
|
|
+ struct inet6_dev *idev;
|
|
+};
|
|
+
|
|
+struct igmp6_mcf_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ struct net_device *dev;
|
|
+ struct inet6_dev *idev;
|
|
+ struct ifmcaddr6 *im;
|
|
+};
|
|
+
|
|
+enum ip6_defrag_users {
|
|
+ IP6_DEFRAG_LOCAL_DELIVER = 0,
|
|
+ IP6_DEFRAG_CONNTRACK_IN = 1,
|
|
+ __IP6_DEFRAG_CONNTRACK_IN = 65536,
|
|
+ IP6_DEFRAG_CONNTRACK_OUT = 65537,
|
|
+ __IP6_DEFRAG_CONNTRACK_OUT = 131072,
|
|
+ IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 131073,
|
|
+ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = 196608,
|
|
+};
|
|
+
|
|
+struct frag_queue {
|
|
+ struct inet_frag_queue q;
|
|
+ int iif;
|
|
+ __u16 nhoffset;
|
|
+ u8 ecn;
|
|
+};
|
|
+
|
|
+struct tcp6_sock {
|
|
+ struct tcp_sock tcp;
|
|
+ struct ipv6_pinfo inet6;
|
|
+};
|
|
+
|
|
+struct tcp6_pseudohdr {
|
|
+ struct in6_addr saddr;
|
|
+ struct in6_addr daddr;
|
|
+ __be32 len;
|
|
+ __be32 protocol;
|
|
+};
|
|
+
|
|
+struct rt0_hdr {
|
|
+ struct ipv6_rt_hdr rt_hdr;
|
|
+ __u32 reserved;
|
|
+ struct in6_addr addr[0];
|
|
+};
|
|
+
|
|
+struct tlvtype_proc {
|
|
+ int type;
|
|
+ bool (*func)(struct sk_buff *, int);
|
|
+};
|
|
+
|
|
+struct ip6fl_iter_state {
|
|
+ struct seq_net_private p;
|
|
+ struct pid_namespace *pid_ns;
|
|
+ int bucket;
|
|
+};
|
|
+
|
|
+struct sr6_tlv {
|
|
+ __u8 type;
|
|
+ __u8 len;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SEG6_ATTR_UNSPEC = 0,
|
|
+ SEG6_ATTR_DST = 1,
|
|
+ SEG6_ATTR_DSTLEN = 2,
|
|
+ SEG6_ATTR_HMACKEYID = 3,
|
|
+ SEG6_ATTR_SECRET = 4,
|
|
+ SEG6_ATTR_SECRETLEN = 5,
|
|
+ SEG6_ATTR_ALGID = 6,
|
|
+ SEG6_ATTR_HMACINFO = 7,
|
|
+ __SEG6_ATTR_MAX = 8,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ SEG6_CMD_UNSPEC = 0,
|
|
+ SEG6_CMD_SETHMAC = 1,
|
|
+ SEG6_CMD_DUMPHMAC = 2,
|
|
+ SEG6_CMD_SET_TUNSRC = 3,
|
|
+ SEG6_CMD_GET_TUNSRC = 4,
|
|
+ __SEG6_CMD_MAX = 5,
|
|
+};
|
|
+
|
|
+typedef short unsigned int mifi_t;
|
|
+
|
|
+typedef __u32 if_mask;
|
|
+
|
|
+struct if_set {
|
|
+ if_mask ifs_bits[8];
|
|
+};
|
|
+
|
|
+struct mif6ctl {
|
|
+ mifi_t mif6c_mifi;
|
|
+ unsigned char mif6c_flags;
|
|
+ unsigned char vifc_threshold;
|
|
+ __u16 mif6c_pifi;
|
|
+ unsigned int vifc_rate_limit;
|
|
+};
|
|
+
|
|
+struct mf6cctl {
|
|
+ struct sockaddr_in6 mf6cc_origin;
|
|
+ struct sockaddr_in6 mf6cc_mcastgrp;
|
|
+ mifi_t mf6cc_parent;
|
|
+ struct if_set mf6cc_ifset;
|
|
+};
|
|
+
|
|
+struct sioc_sg_req6 {
|
|
+ struct sockaddr_in6 src;
|
|
+ struct sockaddr_in6 grp;
|
|
+ long unsigned int pktcnt;
|
|
+ long unsigned int bytecnt;
|
|
+ long unsigned int wrong_if;
|
|
+};
|
|
+
|
|
+struct sioc_mif_req6 {
|
|
+ mifi_t mifi;
|
|
+ long unsigned int icount;
|
|
+ long unsigned int ocount;
|
|
+ long unsigned int ibytes;
|
|
+ long unsigned int obytes;
|
|
+};
|
|
+
|
|
+struct mrt6msg {
|
|
+ __u8 im6_mbz;
|
|
+ __u8 im6_msgtype;
|
|
+ __u16 im6_mif;
|
|
+ __u32 im6_pad;
|
|
+ struct in6_addr im6_src;
|
|
+ struct in6_addr im6_dst;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IP6MRA_CREPORT_UNSPEC = 0,
|
|
+ IP6MRA_CREPORT_MSGTYPE = 1,
|
|
+ IP6MRA_CREPORT_MIF_ID = 2,
|
|
+ IP6MRA_CREPORT_SRC_ADDR = 3,
|
|
+ IP6MRA_CREPORT_DST_ADDR = 4,
|
|
+ IP6MRA_CREPORT_PKT = 5,
|
|
+ __IP6MRA_CREPORT_MAX = 6,
|
|
+};
|
|
+
|
|
+struct mfc6_cache_cmp_arg {
|
|
+ struct in6_addr mf6c_mcastgrp;
|
|
+ struct in6_addr mf6c_origin;
|
|
+};
|
|
+
|
|
+struct mfc6_cache {
|
|
+ struct mr_mfc _c;
|
|
+ union {
|
|
+ struct {
|
|
+ struct in6_addr mf6c_mcastgrp;
|
|
+ struct in6_addr mf6c_origin;
|
|
+ };
|
|
+ struct mfc6_cache_cmp_arg cmparg;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct ip6mr_result {
|
|
+ struct mr_table *mrt;
|
|
+};
|
|
+
|
|
+struct compat_sioc_sg_req6 {
|
|
+ struct sockaddr_in6 src;
|
|
+ struct sockaddr_in6 grp;
|
|
+ compat_ulong_t pktcnt;
|
|
+ compat_ulong_t bytecnt;
|
|
+ compat_ulong_t wrong_if;
|
|
+};
|
|
+
|
|
+struct compat_sioc_mif_req6 {
|
|
+ mifi_t mifi;
|
|
+ compat_ulong_t icount;
|
|
+ compat_ulong_t ocount;
|
|
+ compat_ulong_t ibytes;
|
|
+ compat_ulong_t obytes;
|
|
+};
|
|
+
|
|
+struct ip6_mh {
|
|
+ __u8 ip6mh_proto;
|
|
+ __u8 ip6mh_hdrlen;
|
|
+ __u8 ip6mh_type;
|
|
+ __u8 ip6mh_reserved;
|
|
+ __u16 ip6mh_cksum;
|
|
+ __u8 data[0];
|
|
+};
|
|
+
|
|
+struct xfrm6_protocol {
|
|
+ int (*handler)(struct sk_buff *);
|
|
+ int (*cb_handler)(struct sk_buff *, int);
|
|
+ int (*err_handler)(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int, __be32);
|
|
+ struct xfrm6_protocol *next;
|
|
+ int priority;
|
|
+};
|
|
+
|
|
+typedef struct rt6_info * (*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, const struct sk_buff *, int);
|
|
+
|
|
+struct fib6_rule {
|
|
+ struct fib_rule common;
|
|
+ struct rt6key src;
|
|
+ struct rt6key dst;
|
|
+ u8 tclass;
|
|
+};
|
|
+
|
|
+struct calipso_doi;
|
|
+
|
|
+struct netlbl_calipso_ops {
|
|
+ int (*doi_add)(struct calipso_doi *, struct netlbl_audit *);
|
|
+ void (*doi_free)(struct calipso_doi *);
|
|
+ int (*doi_remove)(u32, struct netlbl_audit *);
|
|
+ struct calipso_doi * (*doi_getdef)(u32);
|
|
+ void (*doi_putdef)(struct calipso_doi *);
|
|
+ int (*doi_walk)(u32 *, int (*)(struct calipso_doi *, void *), void *);
|
|
+ int (*sock_getattr)(struct sock *, struct netlbl_lsm_secattr *);
|
|
+ int (*sock_setattr)(struct sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *);
|
|
+ void (*sock_delattr)(struct sock *);
|
|
+ int (*req_setattr)(struct request_sock *, const struct calipso_doi *, const struct netlbl_lsm_secattr *);
|
|
+ void (*req_delattr)(struct request_sock *);
|
|
+ int (*opt_getattr)(const unsigned char *, struct netlbl_lsm_secattr *);
|
|
+ unsigned char * (*skbuff_optptr)(const struct sk_buff *);
|
|
+ int (*skbuff_setattr)(struct sk_buff *, const struct calipso_doi *, const struct netlbl_lsm_secattr *);
|
|
+ int (*skbuff_delattr)(struct sk_buff *);
|
|
+ void (*cache_invalidate)();
|
|
+ int (*cache_add)(const unsigned char *, const struct netlbl_lsm_secattr *);
|
|
+};
|
|
+
|
|
+struct calipso_doi {
|
|
+ u32 doi;
|
|
+ u32 type;
|
|
+ refcount_t refcount;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct calipso_map_cache_bkt {
|
|
+ spinlock_t lock;
|
|
+ u32 size;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct calipso_map_cache_entry {
|
|
+ u32 hash;
|
|
+ unsigned char *key;
|
|
+ size_t key_len;
|
|
+ struct netlbl_lsm_cache *lsm_data;
|
|
+ u32 activity;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ IP6_FH_F_FRAG = 1,
|
|
+ IP6_FH_F_AUTH = 2,
|
|
+ IP6_FH_F_SKIP_RH = 4,
|
|
+};
|
|
+
|
|
+struct sockaddr_pkt {
|
|
+ short unsigned int spkt_family;
|
|
+ unsigned char spkt_device[14];
|
|
+ __be16 spkt_protocol;
|
|
+};
|
|
+
|
|
+struct sockaddr_ll {
|
|
+ short unsigned int sll_family;
|
|
+ __be16 sll_protocol;
|
|
+ int sll_ifindex;
|
|
+ short unsigned int sll_hatype;
|
|
+ unsigned char sll_pkttype;
|
|
+ unsigned char sll_halen;
|
|
+ unsigned char sll_addr[8];
|
|
+};
|
|
+
|
|
+struct tpacket_stats {
|
|
+ unsigned int tp_packets;
|
|
+ unsigned int tp_drops;
|
|
+};
|
|
+
|
|
+struct tpacket_stats_v3 {
|
|
+ unsigned int tp_packets;
|
|
+ unsigned int tp_drops;
|
|
+ unsigned int tp_freeze_q_cnt;
|
|
+};
|
|
+
|
|
+struct tpacket_rollover_stats {
|
|
+ __u64 tp_all;
|
|
+ __u64 tp_huge;
|
|
+ __u64 tp_failed;
|
|
+};
|
|
+
|
|
+union tpacket_stats_u {
|
|
+ struct tpacket_stats stats1;
|
|
+ struct tpacket_stats_v3 stats3;
|
|
+};
|
|
+
|
|
+struct tpacket_auxdata {
|
|
+ __u32 tp_status;
|
|
+ __u32 tp_len;
|
|
+ __u32 tp_snaplen;
|
|
+ __u16 tp_mac;
|
|
+ __u16 tp_net;
|
|
+ __u16 tp_vlan_tci;
|
|
+ __u16 tp_vlan_tpid;
|
|
+};
|
|
+
|
|
+struct tpacket_hdr {
|
|
+ long unsigned int tp_status;
|
|
+ unsigned int tp_len;
|
|
+ unsigned int tp_snaplen;
|
|
+ short unsigned int tp_mac;
|
|
+ short unsigned int tp_net;
|
|
+ unsigned int tp_sec;
|
|
+ unsigned int tp_usec;
|
|
+};
|
|
+
|
|
+struct tpacket2_hdr {
|
|
+ __u32 tp_status;
|
|
+ __u32 tp_len;
|
|
+ __u32 tp_snaplen;
|
|
+ __u16 tp_mac;
|
|
+ __u16 tp_net;
|
|
+ __u32 tp_sec;
|
|
+ __u32 tp_nsec;
|
|
+ __u16 tp_vlan_tci;
|
|
+ __u16 tp_vlan_tpid;
|
|
+ __u8 tp_padding[4];
|
|
+};
|
|
+
|
|
+struct tpacket_hdr_variant1 {
|
|
+ __u32 tp_rxhash;
|
|
+ __u32 tp_vlan_tci;
|
|
+ __u16 tp_vlan_tpid;
|
|
+ __u16 tp_padding;
|
|
+};
|
|
+
|
|
+struct tpacket3_hdr {
|
|
+ __u32 tp_next_offset;
|
|
+ __u32 tp_sec;
|
|
+ __u32 tp_nsec;
|
|
+ __u32 tp_snaplen;
|
|
+ __u32 tp_len;
|
|
+ __u32 tp_status;
|
|
+ __u16 tp_mac;
|
|
+ __u16 tp_net;
|
|
+ union {
|
|
+ struct tpacket_hdr_variant1 hv1;
|
|
+ };
|
|
+ __u8 tp_padding[8];
|
|
+};
|
|
+
|
|
+struct tpacket_bd_ts {
|
|
+ unsigned int ts_sec;
|
|
+ union {
|
|
+ unsigned int ts_usec;
|
|
+ unsigned int ts_nsec;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct tpacket_hdr_v1 {
|
|
+ __u32 block_status;
|
|
+ __u32 num_pkts;
|
|
+ __u32 offset_to_first_pkt;
|
|
+ __u32 blk_len;
|
|
+ __u64 seq_num;
|
|
+ struct tpacket_bd_ts ts_first_pkt;
|
|
+ struct tpacket_bd_ts ts_last_pkt;
|
|
+};
|
|
+
|
|
+union tpacket_bd_header_u {
|
|
+ struct tpacket_hdr_v1 bh1;
|
|
+};
|
|
+
|
|
+struct tpacket_block_desc {
|
|
+ __u32 version;
|
|
+ __u32 offset_to_priv;
|
|
+ union tpacket_bd_header_u hdr;
|
|
+};
|
|
+
|
|
+enum tpacket_versions {
|
|
+ TPACKET_V1 = 0,
|
|
+ TPACKET_V2 = 1,
|
|
+ TPACKET_V3 = 2,
|
|
+};
|
|
+
|
|
+struct tpacket_req {
|
|
+ unsigned int tp_block_size;
|
|
+ unsigned int tp_block_nr;
|
|
+ unsigned int tp_frame_size;
|
|
+ unsigned int tp_frame_nr;
|
|
+};
|
|
+
|
|
+struct tpacket_req3 {
|
|
+ unsigned int tp_block_size;
|
|
+ unsigned int tp_block_nr;
|
|
+ unsigned int tp_frame_size;
|
|
+ unsigned int tp_frame_nr;
|
|
+ unsigned int tp_retire_blk_tov;
|
|
+ unsigned int tp_sizeof_priv;
|
|
+ unsigned int tp_feature_req_word;
|
|
+};
|
|
+
|
|
+union tpacket_req_u {
|
|
+ struct tpacket_req req;
|
|
+ struct tpacket_req3 req3;
|
|
+};
|
|
+
|
|
+struct virtio_net_hdr {
|
|
+ __u8 flags;
|
|
+ __u8 gso_type;
|
|
+ __virtio16 hdr_len;
|
|
+ __virtio16 gso_size;
|
|
+ __virtio16 csum_start;
|
|
+ __virtio16 csum_offset;
|
|
+};
|
|
+
|
|
+struct packet_mclist {
|
|
+ struct packet_mclist *next;
|
|
+ int ifindex;
|
|
+ int count;
|
|
+ short unsigned int type;
|
|
+ short unsigned int alen;
|
|
+ unsigned char addr[32];
|
|
+};
|
|
+
|
|
+struct pgv;
|
|
+
|
|
+struct tpacket_kbdq_core {
|
|
+ struct pgv *pkbdq;
|
|
+ unsigned int feature_req_word;
|
|
+ unsigned int hdrlen;
|
|
+ unsigned char reset_pending_on_curr_blk;
|
|
+ unsigned char delete_blk_timer;
|
|
+ short unsigned int kactive_blk_num;
|
|
+ short unsigned int blk_sizeof_priv;
|
|
+ short unsigned int last_kactive_blk_num;
|
|
+ char *pkblk_start;
|
|
+ char *pkblk_end;
|
|
+ int kblk_size;
|
|
+ unsigned int max_frame_len;
|
|
+ unsigned int knum_blocks;
|
|
+ uint64_t knxt_seq_num;
|
|
+ char *prev;
|
|
+ char *nxt_offset;
|
|
+ struct sk_buff *skb;
|
|
+ atomic_t blk_fill_in_prog;
|
|
+ short unsigned int retire_blk_tov;
|
|
+ short unsigned int version;
|
|
+ long unsigned int tov_in_jiffies;
|
|
+ struct timer_list retire_blk_timer;
|
|
+};
|
|
+
|
|
+struct pgv {
|
|
+ char *buffer;
|
|
+};
|
|
+
|
|
+struct packet_ring_buffer {
|
|
+ struct pgv *pg_vec;
|
|
+ unsigned int head;
|
|
+ unsigned int frames_per_block;
|
|
+ unsigned int frame_size;
|
|
+ unsigned int frame_max;
|
|
+ unsigned int pg_vec_order;
|
|
+ unsigned int pg_vec_pages;
|
|
+ unsigned int pg_vec_len;
|
|
+ unsigned int *pending_refcnt;
|
|
+ union {
|
|
+ long unsigned int *rx_owner_map;
|
|
+ struct tpacket_kbdq_core prb_bdqc;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct packet_fanout {
|
|
+ possible_net_t net;
|
|
+ unsigned int num_members;
|
|
+ u16 id;
|
|
+ u8 type;
|
|
+ u8 flags;
|
|
+ union {
|
|
+ atomic_t rr_cur;
|
|
+ struct bpf_prog *bpf_prog;
|
|
+ };
|
|
+ struct list_head list;
|
|
+ struct sock *arr[256];
|
|
+ spinlock_t lock;
|
|
+ refcount_t sk_ref;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct packet_type prot_hook;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct packet_rollover {
|
|
+ int sock;
|
|
+ atomic_long_t num;
|
|
+ atomic_long_t num_huge;
|
|
+ atomic_long_t num_failed;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ u32 history[16];
|
|
+};
|
|
+
|
|
+struct packet_sock {
|
|
+ struct sock sk;
|
|
+ struct packet_fanout *fanout;
|
|
+ union tpacket_stats_u stats;
|
|
+ struct packet_ring_buffer rx_ring;
|
|
+ struct packet_ring_buffer tx_ring;
|
|
+ int copy_thresh;
|
|
+ spinlock_t bind_lock;
|
|
+ struct mutex pg_vec_lock;
|
|
+ unsigned int running;
|
|
+ unsigned int auxdata: 1;
|
|
+ unsigned int origdev: 1;
|
|
+ unsigned int has_vnet_hdr: 1;
|
|
+ unsigned int tp_loss: 1;
|
|
+ unsigned int tp_tx_has_off: 1;
|
|
+ int pressure;
|
|
+ int ifindex;
|
|
+ __be16 num;
|
|
+ struct packet_rollover *rollover;
|
|
+ struct packet_mclist *mclist;
|
|
+ atomic_t mapped;
|
|
+ enum tpacket_versions tp_version;
|
|
+ unsigned int tp_hdrlen;
|
|
+ unsigned int tp_reserve;
|
|
+ unsigned int tp_tstamp;
|
|
+ struct completion skb_completion;
|
|
+ struct net_device *cached_dev;
|
|
+ int (*xmit)(struct sk_buff *);
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ struct packet_type prot_hook;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ atomic_t tp_drops;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct packet_mreq_max {
|
|
+ int mr_ifindex;
|
|
+ short unsigned int mr_type;
|
|
+ short unsigned int mr_alen;
|
|
+ unsigned char mr_address[32];
|
|
+};
|
|
+
|
|
+union tpacket_uhdr {
|
|
+ struct tpacket_hdr *h1;
|
|
+ struct tpacket2_hdr *h2;
|
|
+ struct tpacket3_hdr *h3;
|
|
+ void *raw;
|
|
+};
|
|
+
|
|
+struct packet_skb_cb {
|
|
+ union {
|
|
+ struct sockaddr_pkt pkt;
|
|
+ union {
|
|
+ unsigned int origlen;
|
|
+ struct sockaddr_ll ll;
|
|
+ };
|
|
+ } sa;
|
|
+};
|
|
+
|
|
+struct _strp_msg {
|
|
+ struct strp_msg strp;
|
|
+ int accum_len;
|
|
+};
|
|
+
|
|
+struct vlan_group {
|
|
+ unsigned int nr_vlan_devs;
|
|
+ struct hlist_node hlist;
|
|
+ struct net_device **vlan_devices_arrays[16];
|
|
+};
|
|
+
|
|
+struct vlan_info {
|
|
+ struct net_device *real_dev;
|
|
+ struct vlan_group grp;
|
|
+ struct list_head vid_list;
|
|
+ unsigned int nr_vids;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+enum vlan_flags {
|
|
+ VLAN_FLAG_REORDER_HDR = 1,
|
|
+ VLAN_FLAG_GVRP = 2,
|
|
+ VLAN_FLAG_LOOSE_BINDING = 4,
|
|
+ VLAN_FLAG_MVRP = 8,
|
|
+};
|
|
+
|
|
+struct vlan_priority_tci_mapping {
|
|
+ u32 priority;
|
|
+ u16 vlan_qos;
|
|
+ struct vlan_priority_tci_mapping *next;
|
|
+};
|
|
+
|
|
+struct vlan_dev_priv {
|
|
+ unsigned int nr_ingress_mappings;
|
|
+ u32 ingress_priority_map[8];
|
|
+ unsigned int nr_egress_mappings;
|
|
+ struct vlan_priority_tci_mapping *egress_priority_map[16];
|
|
+ __be16 vlan_proto;
|
|
+ u16 vlan_id;
|
|
+ u16 flags;
|
|
+ struct net_device *real_dev;
|
|
+ unsigned char real_dev_addr[6];
|
|
+ struct proc_dir_entry *dent;
|
|
+ struct vlan_pcpu_stats *vlan_pcpu_stats;
|
|
+ struct netpoll *netpoll;
|
|
+ unsigned int nest_level;
|
|
+};
|
|
+
|
|
+enum vlan_protos {
|
|
+ VLAN_PROTO_8021Q = 0,
|
|
+ VLAN_PROTO_8021AD = 1,
|
|
+ VLAN_PROTO_NUM = 2,
|
|
+};
|
|
+
|
|
+struct vlan_vid_info {
|
|
+ struct list_head list;
|
|
+ __be16 proto;
|
|
+ u16 vid;
|
|
+ int refcount;
|
|
+};
|
|
+
|
|
+struct netlbl_af4list {
|
|
+ __be32 addr;
|
|
+ __be32 mask;
|
|
+ u32 valid;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct netlbl_af6list {
|
|
+ struct in6_addr addr;
|
|
+ struct in6_addr mask;
|
|
+ u32 valid;
|
|
+ struct list_head list;
|
|
+};
|
|
+
|
|
+struct netlbl_domaddr_map {
|
|
+ struct list_head list4;
|
|
+ struct list_head list6;
|
|
+};
|
|
+
|
|
+struct netlbl_dommap_def {
|
|
+ u32 type;
|
|
+ union {
|
|
+ struct netlbl_domaddr_map *addrsel;
|
|
+ struct cipso_v4_doi *cipso;
|
|
+ struct calipso_doi *calipso;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct netlbl_domaddr4_map {
|
|
+ struct netlbl_dommap_def def;
|
|
+ struct netlbl_af4list list;
|
|
+};
|
|
+
|
|
+struct netlbl_domaddr6_map {
|
|
+ struct netlbl_dommap_def def;
|
|
+ struct netlbl_af6list list;
|
|
+};
|
|
+
|
|
+struct netlbl_dom_map {
|
|
+ char *domain;
|
|
+ u16 family;
|
|
+ struct netlbl_dommap_def def;
|
|
+ u32 valid;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct netlbl_domhsh_tbl {
|
|
+ struct list_head *tbl;
|
|
+ u32 size;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLBL_MGMT_C_UNSPEC = 0,
|
|
+ NLBL_MGMT_C_ADD = 1,
|
|
+ NLBL_MGMT_C_REMOVE = 2,
|
|
+ NLBL_MGMT_C_LISTALL = 3,
|
|
+ NLBL_MGMT_C_ADDDEF = 4,
|
|
+ NLBL_MGMT_C_REMOVEDEF = 5,
|
|
+ NLBL_MGMT_C_LISTDEF = 6,
|
|
+ NLBL_MGMT_C_PROTOCOLS = 7,
|
|
+ NLBL_MGMT_C_VERSION = 8,
|
|
+ __NLBL_MGMT_C_MAX = 9,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLBL_MGMT_A_UNSPEC = 0,
|
|
+ NLBL_MGMT_A_DOMAIN = 1,
|
|
+ NLBL_MGMT_A_PROTOCOL = 2,
|
|
+ NLBL_MGMT_A_VERSION = 3,
|
|
+ NLBL_MGMT_A_CV4DOI = 4,
|
|
+ NLBL_MGMT_A_IPV6ADDR = 5,
|
|
+ NLBL_MGMT_A_IPV6MASK = 6,
|
|
+ NLBL_MGMT_A_IPV4ADDR = 7,
|
|
+ NLBL_MGMT_A_IPV4MASK = 8,
|
|
+ NLBL_MGMT_A_ADDRSELECTOR = 9,
|
|
+ NLBL_MGMT_A_SELECTORLIST = 10,
|
|
+ NLBL_MGMT_A_FAMILY = 11,
|
|
+ NLBL_MGMT_A_CLPDOI = 12,
|
|
+ __NLBL_MGMT_A_MAX = 13,
|
|
+};
|
|
+
|
|
+struct netlbl_domhsh_walk_arg {
|
|
+ struct netlink_callback *nl_cb;
|
|
+ struct sk_buff *skb;
|
|
+ u32 seq;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLBL_UNLABEL_C_UNSPEC = 0,
|
|
+ NLBL_UNLABEL_C_ACCEPT = 1,
|
|
+ NLBL_UNLABEL_C_LIST = 2,
|
|
+ NLBL_UNLABEL_C_STATICADD = 3,
|
|
+ NLBL_UNLABEL_C_STATICREMOVE = 4,
|
|
+ NLBL_UNLABEL_C_STATICLIST = 5,
|
|
+ NLBL_UNLABEL_C_STATICADDDEF = 6,
|
|
+ NLBL_UNLABEL_C_STATICREMOVEDEF = 7,
|
|
+ NLBL_UNLABEL_C_STATICLISTDEF = 8,
|
|
+ __NLBL_UNLABEL_C_MAX = 9,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLBL_UNLABEL_A_UNSPEC = 0,
|
|
+ NLBL_UNLABEL_A_ACPTFLG = 1,
|
|
+ NLBL_UNLABEL_A_IPV6ADDR = 2,
|
|
+ NLBL_UNLABEL_A_IPV6MASK = 3,
|
|
+ NLBL_UNLABEL_A_IPV4ADDR = 4,
|
|
+ NLBL_UNLABEL_A_IPV4MASK = 5,
|
|
+ NLBL_UNLABEL_A_IFACE = 6,
|
|
+ NLBL_UNLABEL_A_SECCTX = 7,
|
|
+ __NLBL_UNLABEL_A_MAX = 8,
|
|
+};
|
|
+
|
|
+struct netlbl_unlhsh_tbl {
|
|
+ struct list_head *tbl;
|
|
+ u32 size;
|
|
+};
|
|
+
|
|
+struct netlbl_unlhsh_addr4 {
|
|
+ u32 secid;
|
|
+ struct netlbl_af4list list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct netlbl_unlhsh_addr6 {
|
|
+ u32 secid;
|
|
+ struct netlbl_af6list list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct netlbl_unlhsh_iface {
|
|
+ int ifindex;
|
|
+ struct list_head addr4_list;
|
|
+ struct list_head addr6_list;
|
|
+ u32 valid;
|
|
+ struct list_head list;
|
|
+ struct callback_head rcu;
|
|
+};
|
|
+
|
|
+struct netlbl_unlhsh_walk_arg {
|
|
+ struct netlink_callback *nl_cb;
|
|
+ struct sk_buff *skb;
|
|
+ u32 seq;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLBL_CIPSOV4_C_UNSPEC = 0,
|
|
+ NLBL_CIPSOV4_C_ADD = 1,
|
|
+ NLBL_CIPSOV4_C_REMOVE = 2,
|
|
+ NLBL_CIPSOV4_C_LIST = 3,
|
|
+ NLBL_CIPSOV4_C_LISTALL = 4,
|
|
+ __NLBL_CIPSOV4_C_MAX = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLBL_CIPSOV4_A_UNSPEC = 0,
|
|
+ NLBL_CIPSOV4_A_DOI = 1,
|
|
+ NLBL_CIPSOV4_A_MTYPE = 2,
|
|
+ NLBL_CIPSOV4_A_TAG = 3,
|
|
+ NLBL_CIPSOV4_A_TAGLST = 4,
|
|
+ NLBL_CIPSOV4_A_MLSLVLLOC = 5,
|
|
+ NLBL_CIPSOV4_A_MLSLVLREM = 6,
|
|
+ NLBL_CIPSOV4_A_MLSLVL = 7,
|
|
+ NLBL_CIPSOV4_A_MLSLVLLST = 8,
|
|
+ NLBL_CIPSOV4_A_MLSCATLOC = 9,
|
|
+ NLBL_CIPSOV4_A_MLSCATREM = 10,
|
|
+ NLBL_CIPSOV4_A_MLSCAT = 11,
|
|
+ NLBL_CIPSOV4_A_MLSCATLST = 12,
|
|
+ __NLBL_CIPSOV4_A_MAX = 13,
|
|
+};
|
|
+
|
|
+struct netlbl_cipsov4_doiwalk_arg {
|
|
+ struct netlink_callback *nl_cb;
|
|
+ struct sk_buff *skb;
|
|
+ u32 seq;
|
|
+};
|
|
+
|
|
+struct netlbl_domhsh_walk_arg___2 {
|
|
+ struct netlbl_audit *audit_info;
|
|
+ u32 doi;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLBL_CALIPSO_C_UNSPEC = 0,
|
|
+ NLBL_CALIPSO_C_ADD = 1,
|
|
+ NLBL_CALIPSO_C_REMOVE = 2,
|
|
+ NLBL_CALIPSO_C_LIST = 3,
|
|
+ NLBL_CALIPSO_C_LISTALL = 4,
|
|
+ __NLBL_CALIPSO_C_MAX = 5,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ NLBL_CALIPSO_A_UNSPEC = 0,
|
|
+ NLBL_CALIPSO_A_DOI = 1,
|
|
+ NLBL_CALIPSO_A_MTYPE = 2,
|
|
+ __NLBL_CALIPSO_A_MAX = 3,
|
|
+};
|
|
+
|
|
+struct netlbl_calipso_doiwalk_arg {
|
|
+ struct netlink_callback *nl_cb;
|
|
+ struct sk_buff *skb;
|
|
+ u32 seq;
|
|
+};
|
|
+
|
|
+struct dcbmsg {
|
|
+ __u8 dcb_family;
|
|
+ __u8 cmd;
|
|
+ __u16 dcb_pad;
|
|
+};
|
|
+
|
|
+enum dcbnl_commands {
|
|
+ DCB_CMD_UNDEFINED = 0,
|
|
+ DCB_CMD_GSTATE = 1,
|
|
+ DCB_CMD_SSTATE = 2,
|
|
+ DCB_CMD_PGTX_GCFG = 3,
|
|
+ DCB_CMD_PGTX_SCFG = 4,
|
|
+ DCB_CMD_PGRX_GCFG = 5,
|
|
+ DCB_CMD_PGRX_SCFG = 6,
|
|
+ DCB_CMD_PFC_GCFG = 7,
|
|
+ DCB_CMD_PFC_SCFG = 8,
|
|
+ DCB_CMD_SET_ALL = 9,
|
|
+ DCB_CMD_GPERM_HWADDR = 10,
|
|
+ DCB_CMD_GCAP = 11,
|
|
+ DCB_CMD_GNUMTCS = 12,
|
|
+ DCB_CMD_SNUMTCS = 13,
|
|
+ DCB_CMD_PFC_GSTATE = 14,
|
|
+ DCB_CMD_PFC_SSTATE = 15,
|
|
+ DCB_CMD_BCN_GCFG = 16,
|
|
+ DCB_CMD_BCN_SCFG = 17,
|
|
+ DCB_CMD_GAPP = 18,
|
|
+ DCB_CMD_SAPP = 19,
|
|
+ DCB_CMD_IEEE_SET = 20,
|
|
+ DCB_CMD_IEEE_GET = 21,
|
|
+ DCB_CMD_GDCBX = 22,
|
|
+ DCB_CMD_SDCBX = 23,
|
|
+ DCB_CMD_GFEATCFG = 24,
|
|
+ DCB_CMD_SFEATCFG = 25,
|
|
+ DCB_CMD_CEE_GET = 26,
|
|
+ DCB_CMD_IEEE_DEL = 27,
|
|
+ __DCB_CMD_ENUM_MAX = 28,
|
|
+ DCB_CMD_MAX = 27,
|
|
+};
|
|
+
|
|
+enum dcbnl_attrs {
|
|
+ DCB_ATTR_UNDEFINED = 0,
|
|
+ DCB_ATTR_IFNAME = 1,
|
|
+ DCB_ATTR_STATE = 2,
|
|
+ DCB_ATTR_PFC_STATE = 3,
|
|
+ DCB_ATTR_PFC_CFG = 4,
|
|
+ DCB_ATTR_NUM_TC = 5,
|
|
+ DCB_ATTR_PG_CFG = 6,
|
|
+ DCB_ATTR_SET_ALL = 7,
|
|
+ DCB_ATTR_PERM_HWADDR = 8,
|
|
+ DCB_ATTR_CAP = 9,
|
|
+ DCB_ATTR_NUMTCS = 10,
|
|
+ DCB_ATTR_BCN = 11,
|
|
+ DCB_ATTR_APP = 12,
|
|
+ DCB_ATTR_IEEE = 13,
|
|
+ DCB_ATTR_DCBX = 14,
|
|
+ DCB_ATTR_FEATCFG = 15,
|
|
+ DCB_ATTR_CEE = 16,
|
|
+ __DCB_ATTR_ENUM_MAX = 17,
|
|
+ DCB_ATTR_MAX = 16,
|
|
+};
|
|
+
|
|
+enum ieee_attrs {
|
|
+ DCB_ATTR_IEEE_UNSPEC = 0,
|
|
+ DCB_ATTR_IEEE_ETS = 1,
|
|
+ DCB_ATTR_IEEE_PFC = 2,
|
|
+ DCB_ATTR_IEEE_APP_TABLE = 3,
|
|
+ DCB_ATTR_IEEE_PEER_ETS = 4,
|
|
+ DCB_ATTR_IEEE_PEER_PFC = 5,
|
|
+ DCB_ATTR_IEEE_PEER_APP = 6,
|
|
+ DCB_ATTR_IEEE_MAXRATE = 7,
|
|
+ DCB_ATTR_IEEE_QCN = 8,
|
|
+ DCB_ATTR_IEEE_QCN_STATS = 9,
|
|
+ DCB_ATTR_DCB_BUFFER = 10,
|
|
+ __DCB_ATTR_IEEE_MAX = 11,
|
|
+};
|
|
+
|
|
+enum ieee_attrs_app {
|
|
+ DCB_ATTR_IEEE_APP_UNSPEC = 0,
|
|
+ DCB_ATTR_IEEE_APP = 1,
|
|
+ __DCB_ATTR_IEEE_APP_MAX = 2,
|
|
+};
|
|
+
|
|
+enum cee_attrs {
|
|
+ DCB_ATTR_CEE_UNSPEC = 0,
|
|
+ DCB_ATTR_CEE_PEER_PG = 1,
|
|
+ DCB_ATTR_CEE_PEER_PFC = 2,
|
|
+ DCB_ATTR_CEE_PEER_APP_TABLE = 3,
|
|
+ DCB_ATTR_CEE_TX_PG = 4,
|
|
+ DCB_ATTR_CEE_RX_PG = 5,
|
|
+ DCB_ATTR_CEE_PFC = 6,
|
|
+ DCB_ATTR_CEE_APP_TABLE = 7,
|
|
+ DCB_ATTR_CEE_FEAT = 8,
|
|
+ __DCB_ATTR_CEE_MAX = 9,
|
|
+};
|
|
+
|
|
+enum peer_app_attr {
|
|
+ DCB_ATTR_CEE_PEER_APP_UNSPEC = 0,
|
|
+ DCB_ATTR_CEE_PEER_APP_INFO = 1,
|
|
+ DCB_ATTR_CEE_PEER_APP = 2,
|
|
+ __DCB_ATTR_CEE_PEER_APP_MAX = 3,
|
|
+};
|
|
+
|
|
+enum dcbnl_pfc_up_attrs {
|
|
+ DCB_PFC_UP_ATTR_UNDEFINED = 0,
|
|
+ DCB_PFC_UP_ATTR_0 = 1,
|
|
+ DCB_PFC_UP_ATTR_1 = 2,
|
|
+ DCB_PFC_UP_ATTR_2 = 3,
|
|
+ DCB_PFC_UP_ATTR_3 = 4,
|
|
+ DCB_PFC_UP_ATTR_4 = 5,
|
|
+ DCB_PFC_UP_ATTR_5 = 6,
|
|
+ DCB_PFC_UP_ATTR_6 = 7,
|
|
+ DCB_PFC_UP_ATTR_7 = 8,
|
|
+ DCB_PFC_UP_ATTR_ALL = 9,
|
|
+ __DCB_PFC_UP_ATTR_ENUM_MAX = 10,
|
|
+ DCB_PFC_UP_ATTR_MAX = 9,
|
|
+};
|
|
+
|
|
+enum dcbnl_pg_attrs {
|
|
+ DCB_PG_ATTR_UNDEFINED = 0,
|
|
+ DCB_PG_ATTR_TC_0 = 1,
|
|
+ DCB_PG_ATTR_TC_1 = 2,
|
|
+ DCB_PG_ATTR_TC_2 = 3,
|
|
+ DCB_PG_ATTR_TC_3 = 4,
|
|
+ DCB_PG_ATTR_TC_4 = 5,
|
|
+ DCB_PG_ATTR_TC_5 = 6,
|
|
+ DCB_PG_ATTR_TC_6 = 7,
|
|
+ DCB_PG_ATTR_TC_7 = 8,
|
|
+ DCB_PG_ATTR_TC_MAX = 9,
|
|
+ DCB_PG_ATTR_TC_ALL = 10,
|
|
+ DCB_PG_ATTR_BW_ID_0 = 11,
|
|
+ DCB_PG_ATTR_BW_ID_1 = 12,
|
|
+ DCB_PG_ATTR_BW_ID_2 = 13,
|
|
+ DCB_PG_ATTR_BW_ID_3 = 14,
|
|
+ DCB_PG_ATTR_BW_ID_4 = 15,
|
|
+ DCB_PG_ATTR_BW_ID_5 = 16,
|
|
+ DCB_PG_ATTR_BW_ID_6 = 17,
|
|
+ DCB_PG_ATTR_BW_ID_7 = 18,
|
|
+ DCB_PG_ATTR_BW_ID_MAX = 19,
|
|
+ DCB_PG_ATTR_BW_ID_ALL = 20,
|
|
+ __DCB_PG_ATTR_ENUM_MAX = 21,
|
|
+ DCB_PG_ATTR_MAX = 20,
|
|
+};
|
|
+
|
|
+enum dcbnl_tc_attrs {
|
|
+ DCB_TC_ATTR_PARAM_UNDEFINED = 0,
|
|
+ DCB_TC_ATTR_PARAM_PGID = 1,
|
|
+ DCB_TC_ATTR_PARAM_UP_MAPPING = 2,
|
|
+ DCB_TC_ATTR_PARAM_STRICT_PRIO = 3,
|
|
+ DCB_TC_ATTR_PARAM_BW_PCT = 4,
|
|
+ DCB_TC_ATTR_PARAM_ALL = 5,
|
|
+ __DCB_TC_ATTR_PARAM_ENUM_MAX = 6,
|
|
+ DCB_TC_ATTR_PARAM_MAX = 5,
|
|
+};
|
|
+
|
|
+enum dcbnl_cap_attrs {
|
|
+ DCB_CAP_ATTR_UNDEFINED = 0,
|
|
+ DCB_CAP_ATTR_ALL = 1,
|
|
+ DCB_CAP_ATTR_PG = 2,
|
|
+ DCB_CAP_ATTR_PFC = 3,
|
|
+ DCB_CAP_ATTR_UP2TC = 4,
|
|
+ DCB_CAP_ATTR_PG_TCS = 5,
|
|
+ DCB_CAP_ATTR_PFC_TCS = 6,
|
|
+ DCB_CAP_ATTR_GSP = 7,
|
|
+ DCB_CAP_ATTR_BCN = 8,
|
|
+ DCB_CAP_ATTR_DCBX = 9,
|
|
+ __DCB_CAP_ATTR_ENUM_MAX = 10,
|
|
+ DCB_CAP_ATTR_MAX = 9,
|
|
+};
|
|
+
|
|
+enum dcbnl_numtcs_attrs {
|
|
+ DCB_NUMTCS_ATTR_UNDEFINED = 0,
|
|
+ DCB_NUMTCS_ATTR_ALL = 1,
|
|
+ DCB_NUMTCS_ATTR_PG = 2,
|
|
+ DCB_NUMTCS_ATTR_PFC = 3,
|
|
+ __DCB_NUMTCS_ATTR_ENUM_MAX = 4,
|
|
+ DCB_NUMTCS_ATTR_MAX = 3,
|
|
+};
|
|
+
|
|
+enum dcbnl_bcn_attrs {
|
|
+ DCB_BCN_ATTR_UNDEFINED = 0,
|
|
+ DCB_BCN_ATTR_RP_0 = 1,
|
|
+ DCB_BCN_ATTR_RP_1 = 2,
|
|
+ DCB_BCN_ATTR_RP_2 = 3,
|
|
+ DCB_BCN_ATTR_RP_3 = 4,
|
|
+ DCB_BCN_ATTR_RP_4 = 5,
|
|
+ DCB_BCN_ATTR_RP_5 = 6,
|
|
+ DCB_BCN_ATTR_RP_6 = 7,
|
|
+ DCB_BCN_ATTR_RP_7 = 8,
|
|
+ DCB_BCN_ATTR_RP_ALL = 9,
|
|
+ DCB_BCN_ATTR_BCNA_0 = 10,
|
|
+ DCB_BCN_ATTR_BCNA_1 = 11,
|
|
+ DCB_BCN_ATTR_ALPHA = 12,
|
|
+ DCB_BCN_ATTR_BETA = 13,
|
|
+ DCB_BCN_ATTR_GD = 14,
|
|
+ DCB_BCN_ATTR_GI = 15,
|
|
+ DCB_BCN_ATTR_TMAX = 16,
|
|
+ DCB_BCN_ATTR_TD = 17,
|
|
+ DCB_BCN_ATTR_RMIN = 18,
|
|
+ DCB_BCN_ATTR_W = 19,
|
|
+ DCB_BCN_ATTR_RD = 20,
|
|
+ DCB_BCN_ATTR_RU = 21,
|
|
+ DCB_BCN_ATTR_WRTT = 22,
|
|
+ DCB_BCN_ATTR_RI = 23,
|
|
+ DCB_BCN_ATTR_C = 24,
|
|
+ DCB_BCN_ATTR_ALL = 25,
|
|
+ __DCB_BCN_ATTR_ENUM_MAX = 26,
|
|
+ DCB_BCN_ATTR_MAX = 25,
|
|
+};
|
|
+
|
|
+enum dcb_general_attr_values {
|
|
+ DCB_ATTR_VALUE_UNDEFINED = 255,
|
|
+};
|
|
+
|
|
+enum dcbnl_app_attrs {
|
|
+ DCB_APP_ATTR_UNDEFINED = 0,
|
|
+ DCB_APP_ATTR_IDTYPE = 1,
|
|
+ DCB_APP_ATTR_ID = 2,
|
|
+ DCB_APP_ATTR_PRIORITY = 3,
|
|
+ __DCB_APP_ATTR_ENUM_MAX = 4,
|
|
+ DCB_APP_ATTR_MAX = 3,
|
|
+};
|
|
+
|
|
+enum dcbnl_featcfg_attrs {
|
|
+ DCB_FEATCFG_ATTR_UNDEFINED = 0,
|
|
+ DCB_FEATCFG_ATTR_ALL = 1,
|
|
+ DCB_FEATCFG_ATTR_PG = 2,
|
|
+ DCB_FEATCFG_ATTR_PFC = 3,
|
|
+ DCB_FEATCFG_ATTR_APP = 4,
|
|
+ __DCB_FEATCFG_ATTR_ENUM_MAX = 5,
|
|
+ DCB_FEATCFG_ATTR_MAX = 4,
|
|
+};
|
|
+
|
|
+struct dcb_app_type {
|
|
+ int ifindex;
|
|
+ struct dcb_app app;
|
|
+ struct list_head list;
|
|
+ u8 dcbx;
|
|
+};
|
|
+
|
|
+struct dcb_ieee_app_prio_map {
|
|
+ u64 map[8];
|
|
+};
|
|
+
|
|
+struct dcb_ieee_app_dscp_map {
|
|
+ u8 map[64];
|
|
+};
|
|
+
|
|
+enum dcbevent_notif_type {
|
|
+ DCB_APP_EVENT = 1,
|
|
+};
|
|
+
|
|
+struct reply_func {
|
|
+ int type;
|
|
+ int (*cb)(struct net_device *, struct nlmsghdr *, u32, struct nlattr **, struct sk_buff *);
|
|
+};
|
|
+
|
|
+struct nsh_md1_ctx {
|
|
+ __be32 context[4];
|
|
+};
|
|
+
|
|
+struct nsh_md2_tlv {
|
|
+ __be16 md_class;
|
|
+ u8 type;
|
|
+ u8 length;
|
|
+ u8 md_value[0];
|
|
+};
|
|
+
|
|
+struct nshhdr {
|
|
+ __be16 ver_flags_ttl_len;
|
|
+ u8 mdtype;
|
|
+ u8 np;
|
|
+ __be32 path_hdr;
|
|
+ union {
|
|
+ struct nsh_md1_ctx md1;
|
|
+ struct nsh_md2_tlv md2;
|
|
+ };
|
|
+};
|
|
+
|
|
+struct switchdev_trans_item {
|
|
+ struct list_head list;
|
|
+ void *data;
|
|
+ void (*destructor)(const void *);
|
|
+};
|
|
+
|
|
+struct switchdev_notifier_info {
|
|
+ struct net_device *dev;
|
|
+};
|
|
+
|
|
+typedef void switchdev_deferred_func_t(struct net_device *, const void *);
|
|
+
|
|
+struct switchdev_deferred_item {
|
|
+ struct list_head list;
|
|
+ struct net_device *dev;
|
|
+ switchdev_deferred_func_t *func;
|
|
+ long unsigned int data[0];
|
|
+};
|
|
+
|
|
+struct sockaddr_xdp {
|
|
+ __u16 sxdp_family;
|
|
+ __u16 sxdp_flags;
|
|
+ __u32 sxdp_ifindex;
|
|
+ __u32 sxdp_queue_id;
|
|
+ __u32 sxdp_shared_umem_fd;
|
|
+};
|
|
+
|
|
+struct xdp_ring_offset {
|
|
+ __u64 producer;
|
|
+ __u64 consumer;
|
|
+ __u64 desc;
|
|
+};
|
|
+
|
|
+struct xdp_mmap_offsets {
|
|
+ struct xdp_ring_offset rx;
|
|
+ struct xdp_ring_offset tx;
|
|
+ struct xdp_ring_offset fr;
|
|
+ struct xdp_ring_offset cr;
|
|
+};
|
|
+
|
|
+struct xdp_umem_reg {
|
|
+ __u64 addr;
|
|
+ __u64 len;
|
|
+ __u32 chunk_size;
|
|
+ __u32 headroom;
|
|
+};
|
|
+
|
|
+struct xdp_statistics {
|
|
+ __u64 rx_dropped;
|
|
+ __u64 rx_invalid_descs;
|
|
+ __u64 tx_invalid_descs;
|
|
+};
|
|
+
|
|
+struct xdp_desc {
|
|
+ __u64 addr;
|
|
+ __u32 len;
|
|
+ __u32 options;
|
|
+};
|
|
+
|
|
+struct xdp_ring;
|
|
+
|
|
+struct xsk_queue {
|
|
+ struct xdp_umem_props umem_props;
|
|
+ u32 ring_mask;
|
|
+ u32 nentries;
|
|
+ u32 prod_head;
|
|
+ u32 prod_tail;
|
|
+ u32 cons_head;
|
|
+ u32 cons_tail;
|
|
+ struct xdp_ring *ring;
|
|
+ u64 invalid_descs;
|
|
+};
|
|
+
|
|
+struct xdp_ring {
|
|
+ u32 producer;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ u32 consumer;
|
|
+ long: 32;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+ long: 64;
|
|
+};
|
|
+
|
|
+struct xdp_rxtx_ring {
|
|
+ struct xdp_ring ptrs;
|
|
+ struct xdp_desc desc[0];
|
|
+};
|
|
+
|
|
+struct xdp_umem_ring {
|
|
+ struct xdp_ring ptrs;
|
|
+ u64 desc[0];
|
|
+};
|
|
+
|
|
+struct compress_format {
|
|
+ unsigned char magic[2];
|
|
+ const char *name;
|
|
+ decompress_fn decompressor;
|
|
+};
|
|
+
|
|
+struct group_data {
|
|
+ int limit[21];
|
|
+ int base[20];
|
|
+ int permute[258];
|
|
+ int minLen;
|
|
+ int maxLen;
|
|
+};
|
|
+
|
|
+struct bunzip_data {
|
|
+ int writeCopies;
|
|
+ int writePos;
|
|
+ int writeRunCountdown;
|
|
+ int writeCount;
|
|
+ int writeCurrent;
|
|
+ long int (*fill)(void *, long unsigned int);
|
|
+ long int inbufCount;
|
|
+ long int inbufPos;
|
|
+ unsigned char *inbuf;
|
|
+ unsigned int inbufBitCount;
|
|
+ unsigned int inbufBits;
|
|
+ unsigned int crc32Table[256];
|
|
+ unsigned int headerCRC;
|
|
+ unsigned int totalCRC;
|
|
+ unsigned int writeCRC;
|
|
+ unsigned int *dbuf;
|
|
+ unsigned int dbufSize;
|
|
+ unsigned char selectors[32768];
|
|
+ struct group_data groups[6];
|
|
+ int io_error;
|
|
+ int byteCount[256];
|
|
+ unsigned char symToByte[256];
|
|
+ unsigned char mtfSymbol[256];
|
|
+};
|
|
+
|
|
+struct rc {
|
|
+ long int (*fill)(void *, long unsigned int);
|
|
+ uint8_t *ptr;
|
|
+ uint8_t *buffer;
|
|
+ uint8_t *buffer_end;
|
|
+ long int buffer_size;
|
|
+ uint32_t code;
|
|
+ uint32_t range;
|
|
+ uint32_t bound;
|
|
+ void (*error)(char *);
|
|
+};
|
|
+
|
|
+struct lzma_header {
|
|
+ uint8_t pos;
|
|
+ uint32_t dict_size;
|
|
+ uint64_t dst_size;
|
|
+} __attribute__((packed));
|
|
+
|
|
+struct writer {
|
|
+ uint8_t *buffer;
|
|
+ uint8_t previous_byte;
|
|
+ size_t buffer_pos;
|
|
+ int bufsize;
|
|
+ size_t global_pos;
|
|
+ long int (*flush)(void *, long unsigned int);
|
|
+ struct lzma_header *header;
|
|
+};
|
|
+
|
|
+struct cstate {
|
|
+ int state;
|
|
+ uint32_t rep0;
|
|
+ uint32_t rep1;
|
|
+ uint32_t rep2;
|
|
+ uint32_t rep3;
|
|
+};
|
|
+
|
|
+struct xz_dec___2;
|
|
+
|
|
+enum cpio_fields {
|
|
+ C_MAGIC = 0,
|
|
+ C_INO = 1,
|
|
+ C_MODE = 2,
|
|
+ C_UID = 3,
|
|
+ C_GID = 4,
|
|
+ C_NLINK = 5,
|
|
+ C_MTIME = 6,
|
|
+ C_FILESIZE = 7,
|
|
+ C_MAJ = 8,
|
|
+ C_MIN = 9,
|
|
+ C_RMAJ = 10,
|
|
+ C_RMIN = 11,
|
|
+ C_NAMESIZE = 12,
|
|
+ C_CHKSUM = 13,
|
|
+ C_NFIELDS = 14,
|
|
+};
|
|
+
|
|
+struct fprop_local_single {
|
|
+ long unsigned int events;
|
|
+ unsigned int period;
|
|
+ raw_spinlock_t lock;
|
|
+};
|
|
+
|
|
+struct klist_waiter {
|
|
+ struct list_head list;
|
|
+ struct klist_node *node;
|
|
+ struct task_struct *process;
|
|
+ int woken;
|
|
+};
|
|
+
|
|
+struct uevent_sock {
|
|
+ struct list_head list;
|
|
+ struct sock *sk;
|
|
+};
|
|
+
|
|
+struct radix_tree_preload {
|
|
+ unsigned int nr;
|
|
+ struct radix_tree_node *nodes;
|
|
+};
|
|
+
|
|
+typedef struct {
|
|
+ long unsigned int key[2];
|
|
+} hsiphash_key_t;
|
|
+
|
|
+enum format_type {
|
|
+ FORMAT_TYPE_NONE = 0,
|
|
+ FORMAT_TYPE_WIDTH = 1,
|
|
+ FORMAT_TYPE_PRECISION = 2,
|
|
+ FORMAT_TYPE_CHAR = 3,
|
|
+ FORMAT_TYPE_STR = 4,
|
|
+ FORMAT_TYPE_PTR = 5,
|
|
+ FORMAT_TYPE_PERCENT_CHAR = 6,
|
|
+ FORMAT_TYPE_INVALID = 7,
|
|
+ FORMAT_TYPE_LONG_LONG = 8,
|
|
+ FORMAT_TYPE_ULONG = 9,
|
|
+ FORMAT_TYPE_LONG = 10,
|
|
+ FORMAT_TYPE_UBYTE = 11,
|
|
+ FORMAT_TYPE_BYTE = 12,
|
|
+ FORMAT_TYPE_USHORT = 13,
|
|
+ FORMAT_TYPE_SHORT = 14,
|
|
+ FORMAT_TYPE_UINT = 15,
|
|
+ FORMAT_TYPE_INT = 16,
|
|
+ FORMAT_TYPE_SIZE_T = 17,
|
|
+ FORMAT_TYPE_PTRDIFF = 18,
|
|
+};
|
|
+
|
|
+struct printf_spec {
|
|
+ unsigned int type: 8;
|
|
+ int field_width: 24;
|
|
+ unsigned int flags: 8;
|
|
+ unsigned int base: 8;
|
|
+ int precision: 16;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ st_wordstart = 0,
|
|
+ st_wordcmp = 1,
|
|
+ st_wordskip = 2,
|
|
+ st_bufcpy = 3,
|
|
+};
|
|
+
|
|
+enum {
|
|
+ st_wordstart___2 = 0,
|
|
+ st_wordcmp___2 = 1,
|
|
+ st_wordskip___2 = 2,
|
|
+};
|
|
+
|
|
+struct in6_addr___2;
|
|
+
|
|
+enum reg_type {
|
|
+ REG_TYPE_RM = 0,
|
|
+ REG_TYPE_INDEX = 1,
|
|
+ REG_TYPE_BASE = 2,
|
|
+};
|
|
+
|
|
--
|
|
2.27.0
|
|
|