diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc index f00cff6d8c5c..c25cc2823fc8 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget-uvc +++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc @@ -52,7 +52,7 @@ Date: Dec 2014 KernelVersion: 4.0 Description: Default output terminal descriptors - All attributes read only: + All attributes read only except bSourceID: ============== ============================================= iTerminal index of string descriptor diff --git a/Makefile b/Makefile index 5c1a40ea09f8..cb960ee4269d 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 1 -SUBLEVEL = 17 +SUBLEVEL = 20 EXTRAVERSION = NAME = Hurr durr I'ma ninja sloth diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 43485ca0588a..508abadc0492 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -2215,6 +2215,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x088fe257 } +pointer_reference { + id: 0x08ba388c + kind: POINTER + pointee_type_id: 0x08a804af +} pointer_reference { id: 0x08bd7371 kind: POINTER @@ -2405,6 +2410,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x0c3f5690 } +pointer_reference { + id: 0x09a1684b + kind: POINTER + pointee_type_id: 0x0cc547b2 +} pointer_reference { id: 0x09a83f1c kind: POINTER @@ -5615,6 +5625,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x12abe021 } +pointer_reference { + id: 0x0e3cbe6a + kind: POINTER + pointee_type_id: 0x12b21f36 +} pointer_reference { id: 0x0e3d09df kind: POINTER @@ -9495,6 +9510,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x43d7722e } +pointer_reference { + id: 0x1a6ea392 + kind: POINTER + pointee_type_id: 0x43fa68d6 +} pointer_reference { id: 0x1a7122b5 kind: POINTER @@ -9715,6 +9735,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x449a775b } +pointer_reference { + id: 0x1bba1b5d + kind: POINTER + pointee_type_id: 0x44a88bea +} pointer_reference { id: 0x1bc0f2e1 kind: POINTER @@ -10905,6 +10930,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xae47ce96 } +pointer_reference { + id: 0x2105ad7e + kind: POINTER + pointee_type_id: 0xae565365 +} pointer_reference { id: 0x21069feb kind: POINTER @@ -11530,6 +11560,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xbbf3ac86 } +pointer_reference { + id: 0x24706e28 + kind: POINTER + pointee_type_id: 0xbb815e3f +} pointer_reference { id: 0x24733af8 kind: POINTER @@ -12095,6 +12130,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xb7f41573 } +pointer_reference { + id: 0x277e5e23 + kind: POINTER + pointee_type_id: 0xb7b99e10 +} pointer_reference { id: 0x2780ad74 kind: POINTER @@ -12175,6 +12215,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xb59d3734 } +pointer_reference { + id: 0x2809dbcb + kind: POINTER + pointee_type_id: 0x8a6789b1 +} pointer_reference { id: 0x28136e4b kind: POINTER @@ -16875,6 +16920,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9fb0a0b3 } +pointer_reference { + id: 0x2d7c4530 + kind: POINTER + pointee_type_id: 0x9fb1f25d +} pointer_reference { id: 0x2d7c4e7c kind: POINTER @@ -17740,6 +17790,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9d262bee } +pointer_reference { + id: 0x2dd9d6a6 + kind: POINTER + pointee_type_id: 0x9d27bc05 +} pointer_reference { id: 0x2ddb3c91 kind: POINTER @@ -22220,6 +22275,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xed6f687f } +pointer_reference { + id: 0x31cd5f14 + kind: POINTER + pointee_type_id: 0xed759acf +} pointer_reference { id: 0x31d250f7 kind: POINTER @@ -23345,6 +23405,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xfb1e8da7 } +pointer_reference { + id: 0x345a9df3 + kind: POINTER + pointee_type_id: 0xfb2a9153 +} pointer_reference { id: 0x3460ff38 kind: POINTER @@ -23690,6 +23755,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xfeeea03f } +pointer_reference { + id: 0x35304fb1 + kind: POINTER + pointee_type_id: 0xfe81d85b +} pointer_reference { id: 0x35324dc4 kind: POINTER @@ -26820,6 +26890,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xd957a3fb } +pointer_reference { + id: 0x3cc89d58 + kind: POINTER + pointee_type_id: 0xd96293fe +} pointer_reference { id: 0x3ccb45cc kind: POINTER @@ -31080,6 +31155,11 @@ qualified { qualifier: CONST qualified_type_id: 0x15193966 } +qualified { + id: 0xd96293fe + qualifier: CONST + qualified_type_id: 0x15cdf971 +} qualified { id: 0xd96df1ac qualifier: CONST @@ -32675,6 +32755,11 @@ qualified { qualifier: CONST qualified_type_id: 0x9c3d8016 } +qualified { + id: 0xfb2a9153 + qualifier: CONST + qualified_type_id: 0x9cedf3c4 +} qualified { id: 0xfb4dc602 qualifier: CONST @@ -33079,6 +33164,11 @@ array { number_of_elements: 3 element_type_id: 0x6c952252 } +array { + id: 0x0aba0b5b + number_of_elements: 48 + element_type_id: 0x6720d32f +} array { id: 0x0aba1232 number_of_elements: 128 @@ -33199,6 +33289,11 @@ array { number_of_elements: 18 element_type_id: 0x295c7202 } +array { + id: 0x147d5006 + number_of_elements: 48 + element_type_id: 0x1c3dbe5a +} array { id: 0x14b63a09 number_of_elements: 241 @@ -33729,6 +33824,11 @@ array { number_of_elements: 32 element_type_id: 0x421531e8 } +array { + id: 0x4472656b + number_of_elements: 16 + element_type_id: 0x21069feb +} array { id: 0x44b60e20 number_of_elements: 16 @@ -34454,6 +34554,11 @@ array { number_of_elements: 2 element_type_id: 0x0b0dc1ff } +array { + id: 0x6e8341c8 + number_of_elements: 2 + element_type_id: 0x08a804af +} array { id: 0x6ece91e0 number_of_elements: 52 @@ -35199,6 +35304,11 @@ array { number_of_elements: 31 element_type_id: 0xb3e7bac9 } +array { + id: 0xa13264c9 + number_of_elements: 12 + element_type_id: 0xb49bd643 +} array { id: 0xa164938f number_of_elements: 9 @@ -35539,6 +35649,11 @@ array { number_of_elements: 4 element_type_id: 0xc9082b19 } +array { + id: 0xbe2ea865 + number_of_elements: 12 + element_type_id: 0xc8e8e4f1 +} array { id: 0xbe4edfe5 number_of_elements: 31 @@ -41243,6 +41358,12 @@ member { type_id: 0x74d29cf1 offset: 32 } +member { + id: 0x2256864e + name: "active" + type_id: 0x74d29cf1 + offset: 1568 +} member { id: 0x2267dc75 name: "active" @@ -41405,6 +41526,12 @@ member { type_id: 0xd3c80119 offset: 2752 } +member { + id: 0x9c81b085 + name: "active_mask" + type_id: 0x33756485 + offset: 44352 +} member { id: 0x787948f2 name: "active_memcg" @@ -43614,6 +43741,18 @@ member { type_id: 0x6720d32f offset: 320 } +member { + id: 0xed0f8396 + name: "altset" + type_id: 0x6720d32f + offset: 64 +} +member { + id: 0xaae12e05 + name: "altset_idx" + type_id: 0x5d8155a5 + offset: 344 +} member { id: 0x9254b4e3 name: "altsetting" @@ -43625,6 +43764,12 @@ member { name: "altsetting" type_id: 0x01ace255 } +member { + id: 0x92c6375b + name: "altsetting" + type_id: 0x5d8155a5 + offset: 328 +} member { id: 0xb5e94c16 name: "alu_limit" @@ -46050,6 +46195,12 @@ member { type_id: 0x079ced0e offset: 2496 } +member { + id: 0x031bf6f0 + name: "attributes" + type_id: 0x5d8155a5 + offset: 352 +} member { id: 0x038f7bd7 name: "attributes" @@ -46407,6 +46558,12 @@ member { type_id: 0x6d7f5ff6 offset: 33480 } +member { + id: 0xbdb80820 + name: "autoclock" + type_id: 0x6d7f5ff6 + offset: 2984 +} member { id: 0x2133b79d name: "autoconf" @@ -47464,6 +47621,12 @@ member { name: "bad_signal_integrity" type_id: 0x8b205321 } +member { + id: 0x8cb596c3 + name: "badd_profile" + type_id: 0x6720d32f + offset: 2048 +} member { id: 0x9d72d128 name: "badrange" @@ -51556,6 +51719,12 @@ member { type_id: 0x0baa70a7 offset: 512 } +member { + id: 0xfaea6330 + name: "buffer_bytes" + type_id: 0x4585663f + offset: 608 +} member { id: 0xdcf071ed name: "buffer_bytes_max" @@ -51635,6 +51804,12 @@ member { type_id: 0xe8034002 offset: 1568 } +member { + id: 0x305177e3 + name: "buffer_size" + type_id: 0x4585663f + offset: 64 +} member { id: 0x3086d7e7 name: "buffer_size" @@ -52615,6 +52790,12 @@ member { type_id: 0xf435685e offset: 1216 } +member { + id: 0xd72d8bb2 + name: "byte_idx" + type_id: 0x6720d32f + offset: 64 +} member { id: 0x78351206 name: "byte_offset" @@ -55770,6 +55951,12 @@ member { type_id: 0x4585663f offset: 608 } +member { + id: 0xea02d9cf + name: "channels" + type_id: 0x4585663f + offset: 192 +} member { id: 0xea02dba0 name: "channels" @@ -55844,6 +56031,12 @@ member { type_id: 0x4585663f offset: 256 } +member { + id: 0x8ca4039d + name: "channels_max" + type_id: 0x4585663f + offset: 384 +} member { id: 0x8cea246b name: "channels_max" @@ -56302,6 +56495,11 @@ member { type_id: 0x366f4294 offset: 2432 } +member { + id: 0xc6785d4f + name: "chip" + type_id: 0x1a6ea392 +} member { id: 0xc67ad152 name: "chip" @@ -56366,6 +56564,12 @@ member { type_id: 0x377127f9 offset: 192 } +member { + id: 0x7ed53ea4 + name: "chmap" + type_id: 0x2105ad7e + offset: 768 +} member { id: 0x19316439 name: "chmap_kctl" @@ -57549,6 +57753,17 @@ member { name: "clock" type_id: 0x41fa8278 } +member { + id: 0x459380da + name: "clock" + type_id: 0x5d8155a5 + offset: 704 +} +member { + id: 0x459382e7 + name: "clock" + type_id: 0x5d8155a5 +} member { id: 0x45a92361 name: "clock" @@ -57696,6 +57911,18 @@ member { name: "clock_rate" type_id: 0x4585663f } +member { + id: 0x62a690da + name: "clock_ref" + type_id: 0x277e5e23 + offset: 128 +} +member { + id: 0x137ec745 + name: "clock_ref_list" + type_id: 0xd3c80119 + offset: 2496 +} member { id: 0xcabf01dc name: "clock_settime" @@ -63826,6 +64053,12 @@ member { type_id: 0x6d7f5ff6 offset: 10688 } +member { + id: 0xc69c6653 + name: "ctl_intf_media_devnode" + type_id: 0x1349e73e + offset: 3136 +} member { id: 0x130ef0f0 name: "ctl_name" @@ -63949,6 +64182,12 @@ member { type_id: 0x026c3dea offset: 7744 } +member { + id: 0x94cca9a4 + name: "ctrl_intf" + type_id: 0x01ace255 + offset: 3008 +} member { id: 0x15868a4a name: "ctrl_refs" @@ -64205,18 +64444,42 @@ member { type_id: 0x01ace255 offset: 64 } +member { + id: 0x15c836df + name: "cur_audiofmt" + type_id: 0x3cc89d58 + offset: 45504 +} +member { + id: 0x15c83da7 + name: "cur_audiofmt" + type_id: 0x3cc89d58 + offset: 256 +} member { id: 0xb27b9dec name: "cur_blksize" type_id: 0x4585663f offset: 6336 } +member { + id: 0x0f396cf3 + name: "cur_buffer_periods" + type_id: 0x4585663f + offset: 45760 +} member { id: 0x97a04196 name: "cur_bus_speed" type_id: 0x5d8155a5 offset: 1752 } +member { + id: 0x50989b56 + name: "cur_channels" + type_id: 0x4585663f + offset: 45632 +} member { id: 0x5b87e752 name: "cur_cmd_info" @@ -64235,6 +64498,18 @@ member { type_id: 0x0d821a01 offset: 384 } +member { + id: 0xc593684b + name: "cur_format" + type_id: 0xc714b5b1 + offset: 45600 +} +member { + id: 0x9c3b5585 + name: "cur_frame_bytes" + type_id: 0x4585663f + offset: 45664 +} member { id: 0xa86034a6 name: "cur_image" @@ -64294,6 +64569,24 @@ member { type_id: 0x6d7f5ff6 offset: 8968 } +member { + id: 0xb34b435a + name: "cur_period_bytes" + type_id: 0x4585663f + offset: 45728 +} +member { + id: 0x92b660f8 + name: "cur_period_frames" + type_id: 0x4585663f + offset: 45696 +} +member { + id: 0x4a281c7b + name: "cur_rate" + type_id: 0x4585663f + offset: 45568 +} member { id: 0xa21ac0e7 name: "cur_rx_dma_dev" @@ -64335,6 +64628,12 @@ member { type_id: 0xc9082b19 offset: 640 } +member { + id: 0xf740bb6e + name: "curframesize" + type_id: 0x4585663f + offset: 45152 +} member { id: 0x2f9ca0f4 name: "curlft" @@ -64347,6 +64646,12 @@ member { type_id: 0xa2056fd8 offset: 4288 } +member { + id: 0xbccab28d + name: "curpacksize" + type_id: 0x4585663f + offset: 45120 +} member { id: 0x8d1c53d4 name: "curr" @@ -66063,6 +66368,12 @@ member { type_id: 0x18bd6530 offset: 5184 } +member { + id: 0x1f84d6b1 + name: "data_endpoint" + type_id: 0x24706e28 + offset: 832 +} member { id: 0x8a9be6c6 name: "data_hard_start" @@ -66290,6 +66601,12 @@ member { type_id: 0xb914bfab offset: 1152 } +member { + id: 0xbbf02f32 + name: "data_subs" + type_id: 0x08ba388c + offset: 512 +} member { id: 0x132ae04f name: "data_tag_unit_size" @@ -66355,6 +66672,18 @@ member { name: "dataformat" type_id: 0xe62ebf07 } +member { + id: 0x3029aa1e + name: "datainterval" + type_id: 0x5d8155a5 + offset: 416 +} +member { + id: 0x3031a149 + name: "datainterval" + type_id: 0x4585663f + offset: 45248 +} member { id: 0xb90191fa name: "datalen" @@ -69553,6 +69882,12 @@ member { type_id: 0x0d7ce7cc offset: 512 } +member { + id: 0xce349a4c + name: "dev" + type_id: 0x0d7ce7cc + offset: 64 +} member { id: 0xce349ead name: "dev" @@ -71565,6 +71900,12 @@ member { type_id: 0x4585663f offset: 6336 } +member { + id: 0xa0ffe776 + name: "direction" + type_id: 0x6720d32f + offset: 192 +} member { id: 0x2773b302 name: "direction_input" @@ -74852,6 +75193,30 @@ member { type_id: 0x96df7677 offset: 8 } +member { + id: 0xd39d535a + name: "dsd_bitrev" + type_id: 0x6d7f5ff6 + offset: 840 +} +member { + id: 0xc68d6012 + name: "dsd_dop" + type_id: 0x6d7f5ff6 + offset: 832 +} +member { + id: 0xc6d7422d + name: "dsd_dop" + type_id: 0x375c29e3 + offset: 1408 +} +member { + id: 0x05895873 + name: "dsd_raw" + type_id: 0x6d7f5ff6 + offset: 848 +} member { id: 0x3a026cf2 name: "dsn" @@ -77691,6 +78056,18 @@ member { type_id: 0x2c6a91d5 offset: 64 } +member { + id: 0xd28d836d + name: "endpoint" + type_id: 0x6720d32f + offset: 224 +} +member { + id: 0xd2b7290f + name: "endpoint" + type_id: 0x5d8155a5 + offset: 360 +} member { id: 0xd2e931bf name: "endpoint" @@ -78246,6 +78623,12 @@ member { type_id: 0xe5305400 offset: 64 } +member { + id: 0xb5827b12 + name: "ep" + type_id: 0x24706e28 + offset: 192 +} member { id: 0xb5a5932b name: "ep" @@ -78264,6 +78647,12 @@ member { type_id: 0x24275666 offset: 704 } +member { + id: 0xcd580f0d + name: "ep_attr" + type_id: 0x5d8155a5 + offset: 368 +} member { id: 0xd0ba5b1b name: "ep_dev" @@ -78277,6 +78666,12 @@ member { offset: 321 bitsize: 1 } +member { + id: 0xc7a83ff7 + name: "ep_idx" + type_id: 0x5d8155a5 + offset: 336 +} member { id: 0xd80e1005 name: "ep_in" @@ -78300,12 +78695,30 @@ member { type_id: 0xd3c80119 offset: 448 } +member { + id: 0xb8986853 + name: "ep_list" + type_id: 0xd3c80119 + offset: 2240 +} member { id: 0xb8986969 name: "ep_list" type_id: 0xd3c80119 offset: 192 } +member { + id: 0x1c1b9943 + name: "ep_num" + type_id: 0x4585663f + offset: 768 +} +member { + id: 0x1c3934f3 + name: "ep_num" + type_id: 0x6720d32f + offset: 256 +} member { id: 0x775f7c02 name: "ep_out" @@ -83268,6 +83681,13 @@ member { type_id: 0x2d963c99 offset: 1600 } +member { + id: 0x3316a2a2 + name: "fill_max" + type_id: 0x4585663f + offset: 45216 + bitsize: 1 +} member { id: 0x73e192b7 name: "fill_modes" @@ -83893,6 +84313,12 @@ member { type_id: 0x6720d32f offset: 6816 } +member { + id: 0x2899a5e8 + name: "fixed_rate" + type_id: 0x6d7f5ff6 + offset: 45440 +} member { id: 0xb8f884d1 name: "fixed_uV" @@ -84755,6 +85181,12 @@ member { type_id: 0x33756485 offset: 36160 } +member { + id: 0x2d5bf9c9 + name: "flags" + type_id: 0x33756485 + offset: 960 +} member { id: 0x2d5bf9f3 name: "flags" @@ -86040,12 +86472,42 @@ member { type_id: 0x1afc78a4 offset: 128 } +member { + id: 0xeec1037c + name: "fmt_bits" + type_id: 0x4585663f + offset: 256 +} +member { + id: 0x608f71fe + name: "fmt_list" + type_id: 0xd3c80119 + offset: 1216 +} member { id: 0xa94438aa name: "fmt_size" type_id: 0x4585663f offset: 896 } +member { + id: 0x2a040114 + name: "fmt_type" + type_id: 0x4585663f + offset: 224 +} +member { + id: 0x2a040547 + name: "fmt_type" + type_id: 0x4585663f + offset: 448 +} +member { + id: 0x2a040fd0 + name: "fmt_type" + type_id: 0x4585663f + offset: 160 +} member { id: 0xf70554da name: "fn" @@ -86548,6 +87010,18 @@ member { type_id: 0x92233392 offset: 64 } +member { + id: 0x28d35b80 + name: "formats" + type_id: 0x92233392 + offset: 1088 +} +member { + id: 0x28d35f34 + name: "formats" + type_id: 0x92233392 + offset: 128 +} member { id: 0x9030e796 name: "former_request" @@ -86979,6 +87453,12 @@ member { name: "frame_length" type_id: 0xe62ebf07 } +member { + id: 0xabb0a664 + name: "frame_limit" + type_id: 0x4585663f + offset: 736 +} member { id: 0xcfd4c74e name: "frame_num" @@ -87033,6 +87513,12 @@ member { type_id: 0xf435685e offset: 256 } +member { + id: 0xcdc3e2ff + name: "frame_size" + type_id: 0x4585663f + offset: 288 +} member { id: 0xb23a5082 name: "frame_sync" @@ -87853,6 +88339,30 @@ member { type_id: 0x92233392 offset: 1408 } +member { + id: 0x4cc73741 + name: "freqm" + type_id: 0x4585663f + offset: 44896 +} +member { + id: 0xe35e9914 + name: "freqmax" + type_id: 0x4585663f + offset: 44960 +} +member { + id: 0x8d8096c1 + name: "freqn" + type_id: 0x4585663f + offset: 44864 +} +member { + id: 0x9af62beb + name: "freqshift" + type_id: 0x6720d32f + offset: 44928 +} member { id: 0xe5245057 name: "frequency" @@ -89335,6 +89845,12 @@ member { type_id: 0x4585663f offset: 160 } +member { + id: 0x300a014e + name: "generic_implicit_fb" + type_id: 0x6d7f5ff6 + offset: 2976 +} member { id: 0xe46642f7 name: "genid" @@ -96453,6 +96969,12 @@ member { type_id: 0x81bb7781 offset: 2240 } +member { + id: 0xb6590178 + name: "hwptr_done" + type_id: 0x4585663f + offset: 672 +} member { id: 0x06e3e847 name: "hwpwm" @@ -98953,6 +99475,17 @@ member { type_id: 0xe62ebf07 offset: 736 } +member { + id: 0x81413815 + name: "iface" + type_id: 0x5d8155a5 +} +member { + id: 0x81413fd2 + name: "iface" + type_id: 0x5d8155a5 + offset: 320 +} member { id: 0x81e47045 name: "iface" @@ -98970,6 +99503,18 @@ member { type_id: 0x31b5ca26 offset: 512 } +member { + id: 0xbb70df22 + name: "iface_ref" + type_id: 0x31cd5f14 + offset: 64 +} +member { + id: 0xf7efab27 + name: "iface_ref_list" + type_id: 0xd3c80119 + offset: 2368 +} member { id: 0xd93134e1 name: "ifalias" @@ -99459,6 +100004,18 @@ member { type_id: 0xc9082b19 offset: 64 } +member { + id: 0xf089858f + name: "implicit_fb" + type_id: 0x6d7f5ff6 + offset: 376 +} +member { + id: 0xd49a75eb + name: "implicit_fb_sync" + type_id: 0x6d7f5ff6 + offset: 45408 +} member { id: 0xa429dfbf name: "implicit_on_dfl" @@ -100214,6 +100771,12 @@ member { type_id: 0x04fd619c offset: 1600 } +member { + id: 0x6fe5be72 + name: "inflight_bytes" + type_id: 0x4585663f + offset: 640 +} member { id: 0x6af718e5 name: "inflight_tracked" @@ -102242,6 +102805,12 @@ member { name: "intf" type_id: 0x2738d6de } +member { + id: 0x8a7e5be3 + name: "intf" + type_id: 0x4472656b + offset: 192 +} member { id: 0x8a8c82f2 name: "intf" @@ -102265,6 +102834,12 @@ member { type_id: 0x51d54e3e offset: 3264 } +member { + id: 0x02cbf0af + name: "intf_devnode" + type_id: 0x1349e73e + offset: 1024 +} member { id: 0x02cbfb9b name: "intf_devnode" @@ -102277,6 +102852,12 @@ member { type_id: 0x1349e73e offset: 960 } +member { + id: 0xa4ad6340 + name: "intf_link" + type_id: 0x3ae3ff84 + offset: 1088 +} member { id: 0x4a590e0a name: "intid" @@ -107973,6 +108554,12 @@ member { type_id: 0x33756485 offset: 2176 } +member { + id: 0xc312300f + name: "last_frame_number" + type_id: 0x4585663f + offset: 1376 +} member { id: 0xed9c508d name: "last_frame_ts" @@ -108039,6 +108626,12 @@ member { type_id: 0x6720d32f offset: 672 } +member { + id: 0x783e63c0 + name: "last_iface" + type_id: 0x6720d32f + offset: 1952 +} member { id: 0x4280578a name: "last_index" @@ -110884,6 +111477,12 @@ member { type_id: 0xd3c80119 offset: 1088 } +member { + id: 0x7c00e287 + name: "list" + type_id: 0xd3c80119 + offset: 45824 +} member { id: 0x7c00e340 name: "list" @@ -110992,6 +111591,12 @@ member { type_id: 0xd3c80119 offset: 640 } +member { + id: 0x7c00e99c + name: "list" + type_id: 0xd3c80119 + offset: 3392 +} member { id: 0x7c00eacd name: "list" @@ -111913,6 +112518,12 @@ member { type_id: 0xf313e71a offset: 2432 } +member { + id: 0x2d1fea10 + name: "lock" + type_id: 0xf313e71a + offset: 45792 +} member { id: 0x2d1fea8d name: "lock" @@ -112402,6 +113013,12 @@ member { type_id: 0x5f2909b3 offset: 4032 } +member { + id: 0x1403255a + name: "locked" + type_id: 0x74d29cf1 + offset: 32 +} member { id: 0x145ea68f name: "locked" @@ -113198,6 +113815,24 @@ member { type_id: 0x4585663f offset: 672 } +member { + id: 0xfb1e410b + name: "lowlatency" + type_id: 0x6d7f5ff6 + offset: 2992 +} +member { + id: 0x079f80c5 + name: "lowlatency_playback" + type_id: 0x6d7f5ff6 + offset: 1512 +} +member { + id: 0x079f821d + name: "lowlatency_playback" + type_id: 0x6d7f5ff6 + offset: 45416 +} member { id: 0x8bd8e3c1 name: "lowmem_reserve" @@ -115081,6 +115716,11 @@ member { type_id: 0x0ff0075d offset: 1024 } +member { + id: 0xbc8db669 + name: "marker" + type_id: 0x6720d32f +} member { id: 0x2236c9c7 name: "marks" @@ -117629,6 +118269,12 @@ member { type_id: 0x6720d32f offset: 384 } +member { + id: 0xea527421 + name: "max_urb_frames" + type_id: 0x4585663f + offset: 45088 +} member { id: 0xdc6f08fd name: "max_usage" @@ -117863,6 +118509,12 @@ member { type_id: 0x4585663f offset: 16960 } +member { + id: 0xd40d27d0 + name: "maxframesize" + type_id: 0x4585663f + offset: 45056 +} member { id: 0x3c1ee1f8 name: "maximum" @@ -117936,6 +118588,18 @@ member { offset: 384 bitsize: 16 } +member { + id: 0x4fcae8b0 + name: "maxpacksize" + type_id: 0x4585663f + offset: 45024 +} +member { + id: 0x4fcae9cb + name: "maxpacksize" + type_id: 0x4585663f + offset: 448 +} member { id: 0x6af82950 name: "maxrss" @@ -118658,17 +119322,47 @@ member { type_id: 0x1f755ae3 offset: 640 } +member { + id: 0x32f10052 + name: "media_ctl" + type_id: 0x1bba1b5d + offset: 1536 +} member { id: 0xdda9231e name: "media_dev" type_id: 0x16239d0c } +member { + id: 0xdda92c8f + name: "media_dev" + type_id: 0x16239d0c + offset: 3072 +} +member { + id: 0x68246ee4 + name: "media_entity" + type_id: 0x501df1d2 + offset: 64 +} member { id: 0x9ecbe5e8 name: "media_id" type_id: 0x295c7202 offset: 256 } +member { + id: 0x960530b0 + name: "media_pad" + type_id: 0x595a11b6 + offset: 1152 +} +member { + id: 0xf70ae3f7 + name: "media_pipe" + type_id: 0x76e78cd9 + offset: 1664 +} member { id: 0x83b96b6a name: "media_ptr" @@ -119516,6 +120210,12 @@ member { type_id: 0xc9082b19 offset: 16 } +member { + id: 0x80e66a61 + name: "midi_list" + type_id: 0xd3c80119 + offset: 2688 +} member { id: 0x0fb003f6 name: "migrate_entry" @@ -120504,6 +121204,12 @@ member { type_id: 0x3a3eb2f9 offset: 128 } +member { + id: 0x207998f5 + name: "mixer_list" + type_id: 0xd3c80119 + offset: 2816 +} member { id: 0x91245448 name: "mixername" @@ -123507,6 +124213,12 @@ member { type_id: 0xa7c362b0 offset: 320 } +member { + id: 0xad89857f + name: "mutex" + type_id: 0xa7c362b0 + offset: 1280 +} member { id: 0xad8985f0 name: "mutex" @@ -125815,6 +126527,13 @@ member { type_id: 0xc8f8a046 offset: 128 } +member { + id: 0x298ab6a9 + name: "need_delayed_register" + type_id: 0x4585663f + offset: 1888 + bitsize: 1 +} member { id: 0x34dbea4f name: "need_free" @@ -125846,6 +126565,12 @@ member { type_id: 0x6d7f5ff6 offset: 328 } +member { + id: 0x02f79d12 + name: "need_prepare" + type_id: 0x6d7f5ff6 + offset: 45432 +} member { id: 0x8b549786 name: "need_qs" @@ -125878,6 +126603,24 @@ member { type_id: 0x6720d32f offset: 7360 } +member { + id: 0x5472d811 + name: "need_setup" + type_id: 0x6d7f5ff6 + offset: 45424 +} +member { + id: 0x5472dd8c + name: "need_setup" + type_id: 0x6d7f5ff6 + offset: 128 +} +member { + id: 0x5472dfa8 + name: "need_setup" + type_id: 0x6d7f5ff6 + offset: 8 +} member { id: 0x394e1871 name: "need_sync" @@ -126942,6 +127685,24 @@ member { type_id: 0x2babdcfa offset: 192 } +member { + id: 0x11a14a0a + name: "next_packet" + type_id: 0xa13264c9 + offset: 25280 +} +member { + id: 0x9c92d4d9 + name: "next_packet_head" + type_id: 0x4585663f + offset: 44096 +} +member { + id: 0x0b6cdc5c + name: "next_packet_queued" + type_id: 0x4585663f + offset: 44128 +} member { id: 0x30f60bd2 name: "next_pn" @@ -129708,6 +130469,12 @@ member { type_id: 0x6720d32f offset: 1024 } +member { + id: 0x6cadac11 + name: "nr_rates" + type_id: 0x4585663f + offset: 576 +} member { id: 0x14baa11a name: "nr_reclaim_start" @@ -131055,6 +131822,12 @@ member { type_id: 0x6720d32f offset: 6336 } +member { + id: 0x7bcccefc + name: "num_formats" + type_id: 0x4585663f + offset: 1152 +} member { id: 0xee7562a7 name: "num_free" @@ -131182,6 +131955,12 @@ member { type_id: 0x6720d32f offset: 3424 } +member { + id: 0x4c24eb73 + name: "num_interfaces" + type_id: 0x6720d32f + offset: 1920 +} member { id: 0x1968f7ca name: "num_interrupt_in" @@ -131881,6 +132660,12 @@ member { type_id: 0xf435685e offset: 256 } +member { + id: 0xc9708c29 + name: "num_suspended_intf" + type_id: 0x6720d32f + offset: 1984 +} member { id: 0x4efba86c name: "num_sw_devs" @@ -132171,6 +132956,12 @@ member { type_id: 0x3e579862 offset: 320 } +member { + id: 0x54a4d18c + name: "nurbs" + type_id: 0x4585663f + offset: 44288 +} member { id: 0x47678426 name: "nutmrs" @@ -134008,6 +134799,24 @@ member { name: "openat2" type_id: 0x95539d46 } +member { + id: 0xe0f6c039 + name: "opened" + type_id: 0x6720d32f + offset: 32 +} +member { + id: 0xe0f6c5cb + name: "opened" + type_id: 0x6720d32f + offset: 192 +} +member { + id: 0xe0f6c9e3 + name: "opened" + type_id: 0x6720d32f + offset: 64 +} member { id: 0xe0fc9b4c name: "opened" @@ -136465,6 +137274,29 @@ member { type_id: 0xc9082b19 offset: 128 } +member { + id: 0xb06ecc6f + name: "packet_size" + type_id: 0x0aba0b5b + offset: 352 +} +member { + id: 0xb0700f6d + name: "packet_size" + type_id: 0x147d5006 +} +member { + id: 0x483bc789 + name: "packets" + type_id: 0x6720d32f + offset: 1536 +} +member { + id: 0x483bc9b7 + name: "packets" + type_id: 0x6720d32f + offset: 288 +} member { id: 0x4866d30f name: "packets" @@ -136494,6 +137326,12 @@ member { type_id: 0xb3e7bac9 offset: 24 } +member { + id: 0xdbefe5f2 + name: "packsize" + type_id: 0x7dc8196c + offset: 44704 +} member { id: 0x6b32a1dc name: "pad" @@ -138671,6 +139509,12 @@ member { type_id: 0x0fb1d80a offset: 896 } +member { + id: 0xe2cb80a8 + name: "pcm_devs" + type_id: 0x6720d32f + offset: 2624 +} member { id: 0xa37a7538 name: "pcm_elems" @@ -138689,12 +139533,24 @@ member { type_id: 0x0baa70a7 offset: 736 } +member { + id: 0xa6de1752 + name: "pcm_index" + type_id: 0x6720d32f + offset: 128 +} member { id: 0x1bbf4d02 name: "pcm_io_frames" type_id: 0xe62ebf07 offset: 96 } +member { + id: 0x5201ebe3 + name: "pcm_list" + type_id: 0xd3c80119 + offset: 2112 +} member { id: 0x69bbb080 name: "pcm_mutex" @@ -138737,6 +139593,12 @@ member { type_id: 0xe3575a9b offset: 1472 } +member { + id: 0xb1c38868 + name: "pcm_substream" + type_id: 0x14b9453b + offset: 128 +} member { id: 0x4d19040d name: "pconstructor" @@ -138878,6 +139740,18 @@ member { type_id: 0x6d7f5ff6 offset: 7264 } +member { + id: 0x24dcb717 + name: "pd_d1d0_rec" + type_id: 0x6720d32f + offset: 32 +} +member { + id: 0xa3f385a4 + name: "pd_d2d0_rec" + type_id: 0x6720d32f + offset: 64 +} member { id: 0x504294e3 name: "pd_event_lock" @@ -138896,6 +139770,11 @@ member { type_id: 0x32daa2f4 offset: 320 } +member { + id: 0x0e0f4b25 + name: "pd_id" + type_id: 0x6720d32f +} member { id: 0x148dd054 name: "pd_revision" @@ -139776,6 +140655,12 @@ member { type_id: 0xc9082b19 offset: 224 } +member { + id: 0x3d77e8c3 + name: "period_elapsed_pending" + type_id: 0x4585663f + offset: 576 +} member { id: 0x8a43cc03 name: "period_left" @@ -140281,6 +141166,12 @@ member { type_id: 0xe8034002 offset: 64 } +member { + id: 0x39cdf0ed + name: "phase" + type_id: 0x4585663f + offset: 44992 +} member { id: 0x39ef5b6d name: "phase" @@ -141210,6 +142101,12 @@ member { type_id: 0x4585663f offset: 896 } +member { + id: 0x99a9a891 + name: "pipe" + type_id: 0x4585663f + offset: 44672 +} member { id: 0x99a9aba6 name: "pipe" @@ -141418,6 +142315,12 @@ member { type_id: 0x6720d32f offset: 160 } +member { + id: 0x3b313ebb + name: "pkt_offset_adj" + type_id: 0x4585663f + offset: 480 +} member { id: 0xbcea5563 name: "pkt_out" @@ -143817,6 +144720,12 @@ member { type_id: 0x0fc6fef1 offset: 64 } +member { + id: 0x6b1f53ba + name: "pps" + type_id: 0x4585663f + offset: 44832 +} member { id: 0x6b3dfbc3 name: "pps" @@ -144543,6 +145452,12 @@ member { type_id: 0x4585663f offset: 1024 } +member { + id: 0x90c1fac3 + name: "prepare_data_urb" + type_id: 0x2d7c4530 + offset: 384 +} member { id: 0x85980de7 name: "prepare_fb" @@ -147423,6 +148338,12 @@ member { type_id: 0xb3e7bac9 offset: 72 } +member { + id: 0xdc82cf73 + name: "protocol" + type_id: 0x5d8155a5 + offset: 424 +} member { id: 0xdcaac21e name: "protocol" @@ -149134,6 +150055,12 @@ member { type_id: 0x6d7f5ff6 offset: 320 } +member { + id: 0x2adf5778 + name: "queued" + type_id: 0x6720d32f + offset: 320 +} member { id: 0x2ae02fcd name: "queued" @@ -149287,6 +150214,12 @@ member { offset: 7112 bitsize: 1 } +member { + id: 0xc871a772 + name: "quirk_flags" + type_id: 0x4585663f + offset: 1856 +} member { id: 0xdd7606f3 name: "quirk_max_rate" @@ -149307,6 +150240,12 @@ member { offset: 7114 bitsize: 1 } +member { + id: 0x877d5113 + name: "quirk_type" + type_id: 0xb3a3e4db + offset: 1248 +} member { id: 0xfcfe4a22 name: "quirk_zlp_not_supp" @@ -149875,6 +150814,12 @@ member { type_id: 0x4585663f offset: 576 } +member { + id: 0x5fb57bae + name: "rate" + type_id: 0x6720d32f + offset: 96 +} member { id: 0x5fd9fff9 name: "rate" @@ -150005,6 +150950,12 @@ member { type_id: 0x0baa70a7 offset: 512 } +member { + id: 0xe742b24c + name: "rate_max" + type_id: 0x4585663f + offset: 544 +} member { id: 0xe742b45a name: "rate_max" @@ -150017,6 +150968,12 @@ member { type_id: 0x0baa70a7 offset: 480 } +member { + id: 0x325fd91e + name: "rate_min" + type_id: 0x4585663f + offset: 512 +} member { id: 0x325fdfe6 name: "rate_min" @@ -150053,6 +151010,12 @@ member { type_id: 0xe62ebf07 offset: 704 } +member { + id: 0x284ec037 + name: "rate_table" + type_id: 0x1bf16028 + offset: 640 +} member { id: 0xa2923cb1 name: "rate_update" @@ -150071,6 +151034,12 @@ member { type_id: 0x4585663f offset: 128 } +member { + id: 0x3f103f0c + name: "rates" + type_id: 0x4585663f + offset: 480 +} member { id: 0x3f5338f3 name: "rates" @@ -151949,6 +152918,18 @@ member { type_id: 0xb3e7bac9 offset: 96 } +member { + id: 0xb8c769bd + name: "ready_list" + type_id: 0xd3c80119 + offset: 1920 +} +member { + id: 0xf6bbf3f2 + name: "ready_playback_urbs" + type_id: 0xd3c80119 + offset: 44160 +} member { id: 0x464757ea name: "real" @@ -157824,6 +158805,12 @@ member { type_id: 0x26927abb offset: 6976 } +member { + id: 0x074c5274 + name: "retire_data_urb" + type_id: 0x0e3cbe6a + offset: 448 +} member { id: 0x533104e4 name: "retrans" @@ -157854,6 +158841,12 @@ member { type_id: 0xe62ebf07 offset: 96 } +member { + id: 0x4f8c91e3 + name: "retries" + type_id: 0x6720d32f + offset: 160 +} member { id: 0x4f8c92df name: "retries" @@ -160090,6 +161083,13 @@ member { type_id: 0x33756485 offset: 1088 } +member { + id: 0x0492e97a + name: "running" + type_id: 0x4585663f + offset: 544 + bitsize: 1 +} member { id: 0xad05e43b name: "running" @@ -160113,6 +161113,12 @@ member { type_id: 0x6720d32f offset: 1440 } +member { + id: 0xad1c431e + name: "running" + type_id: 0x74d29cf1 + offset: 224 +} member { id: 0xad515797 name: "running" @@ -162142,6 +163148,12 @@ member { name: "sample" type_id: 0x0803e9c5 } +member { + id: 0x1beea273 + name: "sample_accum" + type_id: 0x4585663f + offset: 44800 +} member { id: 0xa26bd18c name: "sample_bits" @@ -162218,6 +163230,12 @@ member { type_id: 0xe62ebf07 offset: 96 } +member { + id: 0x58187514 + name: "sample_rate_read_error" + type_id: 0x6720d32f + offset: 2016 +} member { id: 0x23cb252f name: "sample_rates" @@ -162236,6 +163254,12 @@ member { type_id: 0xedf277ba offset: 640 } +member { + id: 0x0a3f8c5f + name: "sample_rem" + type_id: 0x4585663f + offset: 44768 +} member { id: 0xa30ebd21 name: "sample_size" @@ -167164,6 +168188,12 @@ member { type_id: 0x4585663f offset: 1600 } +member { + id: 0x84ae6459 + name: "setup" + type_id: 0x6720d32f + offset: 2944 +} member { id: 0x84c4d659 name: "setup" @@ -168173,6 +169203,12 @@ member { type_id: 0x0b57ce4e offset: 16192 } +member { + id: 0x9e81eb03 + name: "shutdown" + type_id: 0x74d29cf1 + offset: 1600 +} member { id: 0x9e92127e name: "shutdown" @@ -168353,6 +169389,12 @@ member { type_id: 0x2dd58efa offset: 576 } +member { + id: 0x63640b5d + name: "shutdown_wait" + type_id: 0x03913382 + offset: 1664 +} member { id: 0xce8d253e name: "shutting_down" @@ -168696,6 +169738,12 @@ member { type_id: 0xa8fff47c offset: 1664 } +member { + id: 0xab6f20ab + name: "silence_value" + type_id: 0x5d8155a5 + offset: 45312 +} member { id: 0x3efa0771 name: "simple_422" @@ -170180,6 +171228,12 @@ member { type_id: 0x6d7f5ff6 offset: 1408 } +member { + id: 0xb0732d0b + name: "skip_packets" + type_id: 0x6720d32f + offset: 45376 +} member { id: 0xdecf8b6a name: "skip_phy_initialization" @@ -171565,6 +172619,12 @@ member { type_id: 0x6720d32f offset: 256 } +member { + id: 0xa0af0fa0 + name: "speed" + type_id: 0x4585663f + offset: 1024 +} member { id: 0xa0b8e129 name: "speed" @@ -173939,6 +174999,12 @@ member { type_id: 0x74d29cf1 offset: 768 } +member { + id: 0x72872ac4 + name: "state" + type_id: 0x74d29cf1 + offset: 352 +} member { id: 0x72873e78 name: "state" @@ -175686,6 +176752,17 @@ member { type_id: 0xe62ebf07 offset: 128 } +member { + id: 0x4d41138a + name: "str_pd" + type_id: 0x09a1684b + offset: 320 +} +member { + id: 0xc01f41e7 + name: "stream" + type_id: 0x2809dbcb +} member { id: 0xc05068ef name: "stream" @@ -175823,6 +176900,12 @@ member { type_id: 0xe52a3418 offset: 416 } +member { + id: 0x9e2efb28 + name: "stream_offset_adj" + type_id: 0x4585663f + offset: 512 +} member { id: 0x90f74116 name: "stream_parser" @@ -175915,6 +176998,12 @@ member { type_id: 0x6d7f5ff6 offset: 4240 } +member { + id: 0x5e10fd0f + name: "stride" + type_id: 0x4585663f + offset: 45344 +} member { id: 0x5e325db2 name: "stride" @@ -176235,6 +177324,12 @@ member { type_id: 0x42189fc0 offset: 896 } +member { + id: 0x5e6786ba + name: "submitted_urbs" + type_id: 0x74d29cf1 + offset: 44480 +} member { id: 0x4f0fe730 name: "submitter" @@ -176300,6 +177395,12 @@ member { type_id: 0xd41e888f offset: 864 } +member { + id: 0x48baa384 + name: "subs" + type_id: 0x08ba388c + offset: 128 +} member { id: 0x6a890efe name: "subscribe_active" @@ -176353,6 +177454,12 @@ member { type_id: 0x14b9453b offset: 192 } +member { + id: 0x87496810 + name: "substream" + type_id: 0x6e8341c8 + offset: 192 +} member { id: 0xe80be976 name: "substream_count" @@ -177885,18 +178992,48 @@ member { type_id: 0x2d2b5fad offset: 448 } +member { + id: 0x052696c5 + name: "sync_altsetting" + type_id: 0x5d8155a5 + offset: 400 +} member { id: 0x4cf66d53 name: "sync_device_ts" type_id: 0xc9082b19 offset: 384 } +member { + id: 0xaa5b83a9 + name: "sync_dma" + type_id: 0xe02e14d6 + offset: 44608 +} member { id: 0x33ec9d46 name: "sync_dtim_count" type_id: 0x295c7202 offset: 416 } +member { + id: 0x349fecf2 + name: "sync_endpoint" + type_id: 0x24706e28 + offset: 896 +} +member { + id: 0x19c0d237 + name: "sync_ep" + type_id: 0x5d8155a5 + offset: 384 +} +member { + id: 0x2926683d + name: "sync_ep_idx" + type_id: 0x5d8155a5 + offset: 408 +} member { id: 0xd1c4a29b name: "sync_file_list" @@ -177915,6 +179052,12 @@ member { type_id: 0xe8034002 offset: 416 } +member { + id: 0x96fae129 + name: "sync_iface" + type_id: 0x5d8155a5 + offset: 392 +} member { id: 0x335c5ca9 name: "sync_interval" @@ -177991,6 +179134,18 @@ member { type_id: 0x0e7c0293 offset: 1024 } +member { + id: 0x6cdf2a62 + name: "sync_sink" + type_id: 0x24706e28 + offset: 640 +} +member { + id: 0x9ec45d54 + name: "sync_source" + type_id: 0x24706e28 + offset: 576 +} member { id: 0x2ae2f8e5 name: "sync_state" @@ -178039,6 +179194,12 @@ member { type_id: 0x2f4d85b2 offset: 64 } +member { + id: 0xf7e957a9 + name: "syncbuf" + type_id: 0x0483e6f8 + offset: 44544 +} member { id: 0xb574687c name: "synced" @@ -178052,6 +179213,18 @@ member { type_id: 0x0c254fa0 offset: 512 } +member { + id: 0xf820ab53 + name: "syncinterval" + type_id: 0x4585663f + offset: 45280 +} +member { + id: 0x699bf8aa + name: "syncmaxsize" + type_id: 0x4585663f + offset: 45184 +} member { id: 0xa06350f4 name: "syncobj_idr" @@ -179068,6 +180241,12 @@ member { type_id: 0x6720d32f offset: 256 } +member { + id: 0x5a8cea15 + name: "system_suspend" + type_id: 0x4585663f + offset: 1536 +} member { id: 0x545d0d76 name: "system_suspending" @@ -180765,6 +181944,13 @@ member { type_id: 0x6720d32f offset: 7232 } +member { + id: 0xb1694b94 + name: "tenor_fb_quirk" + type_id: 0x4585663f + offset: 45217 + bitsize: 1 +} member { id: 0xe4674157 name: "terminate" @@ -183665,6 +184851,12 @@ member { type_id: 0xe02e14d6 offset: 832 } +member { + id: 0xcd47b1ad + name: "transfer_done" + type_id: 0x4585663f + offset: 704 +} member { id: 0x8cd14077 name: "transfer_flags" @@ -184179,6 +185371,12 @@ member { type_id: 0x6d7f5ff6 offset: 256 } +member { + id: 0xe42cffef + name: "trigger_tstamp_pending_update" + type_id: 0x6d7f5ff6 + offset: 1504 +} member { id: 0x6782a7d6 name: "trigger_type" @@ -185744,6 +186942,13 @@ member { type_id: 0xf435685e offset: 64 } +member { + id: 0xd84fbda6 + name: "tx_length_quirk" + type_id: 0x4585663f + offset: 417 + bitsize: 1 +} member { id: 0x7ebca209 name: "tx_low_drive_cnt" @@ -186321,6 +187526,13 @@ member { name: "txerr" type_id: 0xe8034002 } +member { + id: 0x305e2804 + name: "txfr_quirk" + type_id: 0x4585663f + offset: 416 + bitsize: 1 +} member { id: 0x830688ae name: "txn" @@ -186642,6 +187854,12 @@ member { type_id: 0x6720d32f offset: 64 } +member { + id: 0x5c31f174 + name: "type" + type_id: 0x6720d32f + offset: 288 +} member { id: 0x5c31f2da name: "type" @@ -188850,6 +190068,12 @@ member { name: "unlink_flags" type_id: 0xe62ebf07 } +member { + id: 0x7cdb26d6 + name: "unlink_mask" + type_id: 0x33756485 + offset: 44416 +} member { id: 0x94611ed1 name: "unlinked" @@ -189734,6 +190958,17 @@ member { type_id: 0x3e10b518 offset: 64 } +member { + id: 0x3f51eb1f + name: "urb" + type_id: 0x0130219f +} +member { + id: 0x3feef7ab + name: "urb" + type_id: 0xbe2ea865 + offset: 704 +} member { id: 0xad94a034 name: "urb_dequeue" @@ -189885,6 +191120,12 @@ member { type_id: 0xe62ebf07 offset: 64 } +member { + id: 0x05a8e330 + name: "usage_count" + type_id: 0x74d29cf1 + offset: 1632 +} member { id: 0x05a8eb41 name: "usage_count" @@ -190117,6 +191358,12 @@ member { type_id: 0x3c9a9fb2 offset: 1408 } +member { + id: 0x075c8af5 + name: "usb_id" + type_id: 0xc9082b19 + offset: 1216 +} member { id: 0x171fc3a7 name: "usb_pd" @@ -202847,6 +204094,16 @@ struct_union { member_id: 0xb9227601 } } +struct_union { + id: 0x375c29e3 + kind: STRUCT + definition { + bytesize: 12 + member_id: 0xbc8db669 + member_id: 0x4cd5a4c0 + member_id: 0xd72d8bb2 + } +} struct_union { id: 0x378b9399 kind: STRUCT @@ -207829,6 +209086,45 @@ struct_union { member_id: 0xcbdb2ff3 } } +struct_union { + id: 0x15cdf971 + kind: STRUCT + name: "audioformat" + definition { + bytesize: 112 + member_id: 0x7c00ef52 + member_id: 0x28d35f34 + member_id: 0xea02d9cf + member_id: 0x2a040114 + member_id: 0xeec1037c + member_id: 0xcdc3e2ff + member_id: 0x81413fd2 + member_id: 0x92c6375b + member_id: 0xc7a83ff7 + member_id: 0xaae12e05 + member_id: 0x031bf6f0 + member_id: 0xd2b7290f + member_id: 0xcd580f0d + member_id: 0xf089858f + member_id: 0x19c0d237 + member_id: 0x96fae129 + member_id: 0x052696c5 + member_id: 0x2926683d + member_id: 0x3029aa1e + member_id: 0xdc82cf73 + member_id: 0x4fcae9cb + member_id: 0x3f103f0c + member_id: 0x325fd91e + member_id: 0xe742b24c + member_id: 0x6cadac11 + member_id: 0x284ec037 + member_id: 0x459380da + member_id: 0x7ed53ea4 + member_id: 0xc68d6012 + member_id: 0xd39d535a + member_id: 0x05895873 + } +} struct_union { id: 0x30293561 kind: STRUCT @@ -226684,6 +227980,7 @@ struct_union { member_id: 0xbf9f3e0b member_id: 0xb54d95a7 member_id: 0x114cfa02 + member_id: 0x4f8c91e3 member_id: 0x201ba81a } } @@ -231501,6 +232798,20 @@ struct_union { member_id: 0x2d0faa51 } } +struct_union { + id: 0x44a88bea + kind: STRUCT + name: "media_ctl" + definition { + bytesize: 248 + member_id: 0xdda9231e + member_id: 0x68246ee4 + member_id: 0x02cbf0af + member_id: 0xa4ad6340 + member_id: 0x960530b0 + member_id: 0xf70ae3f7 + } +} struct_union { id: 0x72ce92ac kind: STRUCT @@ -246428,6 +247739,241 @@ struct_union { member_id: 0x935d3998 } } +struct_union { + id: 0xc8e8e4f1 + kind: STRUCT + name: "snd_urb_ctx" + definition { + bytesize: 256 + member_id: 0x3f51eb1f + member_id: 0x305177e3 + member_id: 0x48baa384 + member_id: 0xb5827b12 + member_id: 0xad5e28c7 + member_id: 0x483bc9b7 + member_id: 0x2adf5778 + member_id: 0xb06ecc6f + member_id: 0xb8c769bd + } +} +struct_union { + id: 0x43fa68d6 + kind: STRUCT + name: "snd_usb_audio" + definition { + bytesize: 400 + member_id: 0xad5e286c + member_id: 0xce349a4c + member_id: 0x6c9f8bc7 + member_id: 0x8a7e5be3 + member_id: 0x075c8af5 + member_id: 0x877d5113 + member_id: 0xad89857f + member_id: 0x5a8cea15 + member_id: 0x2256864e + member_id: 0x9e81eb03 + member_id: 0x05a8e330 + member_id: 0x63640b5d + member_id: 0xc871a772 + member_id: 0x298ab6a9 + member_id: 0x4c24eb73 + member_id: 0x783e63c0 + member_id: 0xc9708c29 + member_id: 0x58187514 + member_id: 0x8cb596c3 + member_id: 0x5201ebe3 + member_id: 0xb8986853 + member_id: 0xf7efab27 + member_id: 0x137ec745 + member_id: 0xe2cb80a8 + member_id: 0x80e66a61 + member_id: 0x207998f5 + member_id: 0x84ae6459 + member_id: 0x300a014e + member_id: 0xbdb80820 + member_id: 0xfb1e410b + member_id: 0x94cca9a4 + member_id: 0xdda92c8f + member_id: 0xc69c6653 + } +} +struct_union { + id: 0xb7b99e10 + kind: STRUCT + name: "snd_usb_clock_ref" + definition { + bytesize: 40 + member_id: 0x459382e7 + member_id: 0x1403255a + member_id: 0xe0f6c9e3 + member_id: 0x5fb57bae + member_id: 0x5472dd8c + member_id: 0x7c00e79b + } +} +struct_union { + id: 0xbb815e3f + kind: STRUCT + name: "snd_usb_endpoint" + definition { + bytesize: 5744 + member_id: 0xc6785d4f + member_id: 0xbb70df22 + member_id: 0x62a690da + member_id: 0xe0f6c5cb + member_id: 0xad1c431e + member_id: 0x1c3934f3 + member_id: 0x5c31f174 + member_id: 0x81413fd2 + member_id: 0x92c6375b + member_id: 0xc7a83ff7 + member_id: 0x72872ac4 + member_id: 0x90c1fac3 + member_id: 0x074c5274 + member_id: 0xbbf02f32 + member_id: 0x9ec45d54 + member_id: 0x6cdf2a62 + member_id: 0x3feef7ab + member_id: 0x11a14a0a + member_id: 0x9c92d4d9 + member_id: 0x0b6cdc5c + member_id: 0xf6bbf3f2 + member_id: 0x54a4d18c + member_id: 0x9c81b085 + member_id: 0x7cdb26d6 + member_id: 0x5e6786ba + member_id: 0xf7e957a9 + member_id: 0xaa5b83a9 + member_id: 0x99a9a891 + member_id: 0xdbefe5f2 + member_id: 0x0a3f8c5f + member_id: 0x1beea273 + member_id: 0x6b1f53ba + member_id: 0x8d8096c1 + member_id: 0x4cc73741 + member_id: 0x9af62beb + member_id: 0xe35e9914 + member_id: 0x39cdf0ed + member_id: 0x4fcae8b0 + member_id: 0xd40d27d0 + member_id: 0xea527421 + member_id: 0xbccab28d + member_id: 0xf740bb6e + member_id: 0x699bf8aa + member_id: 0x3316a2a2 + member_id: 0xb1694b94 + member_id: 0x3031a149 + member_id: 0xf820ab53 + member_id: 0xab6f20ab + member_id: 0x5e10fd0f + member_id: 0xb0732d0b + member_id: 0xd49a75eb + member_id: 0x079f821d + member_id: 0x5472d811 + member_id: 0x02f79d12 + member_id: 0x2899a5e8 + member_id: 0x15c836df + member_id: 0x4a281c7b + member_id: 0xc593684b + member_id: 0x50989b56 + member_id: 0x9c3b5585 + member_id: 0x92b660f8 + member_id: 0xb34b435a + member_id: 0x0f396cf3 + member_id: 0x2d1fea10 + member_id: 0x7c00e287 + } +} +struct_union { + id: 0xed759acf + kind: STRUCT + name: "snd_usb_iface_ref" + definition { + bytesize: 32 + member_id: 0x81413815 + member_id: 0x5472dfa8 + member_id: 0xe0f6c039 + member_id: 0xed0f8396 + member_id: 0x7c00e690 + } +} +struct_union { + id: 0xb49bd643 + kind: STRUCT + name: "snd_usb_packet_info" + definition { + bytesize: 196 + member_id: 0xb0700f6d + member_id: 0x483bc789 + } +} +struct_union { + id: 0x0cc547b2 + kind: STRUCT + name: "snd_usb_power_domain" + definition { + bytesize: 12 + member_id: 0x0e0f4b25 + member_id: 0x24dcb717 + member_id: 0xa3f385a4 + } +} +struct_union { + id: 0x8a6789b1 + kind: STRUCT + name: "snd_usb_stream" + definition { + bytesize: 440 + member_id: 0xc6785d4f + member_id: 0xa60a47a9 + member_id: 0xa6de1752 + member_id: 0x2a040fd0 + member_id: 0x87496810 + member_id: 0x7c00e99c + } +} +struct_union { + id: 0x08a804af + kind: STRUCT + name: "snd_usb_substream" + definition { + bytesize: 200 + member_id: 0xc01f41e7 + member_id: 0xce349a4c + member_id: 0xb1c38868 + member_id: 0xa0ffe776 + member_id: 0xd28d836d + member_id: 0x15c83da7 + member_id: 0x4d41138a + member_id: 0x8ca4039d + member_id: 0x305e2804 + member_id: 0xd84fbda6 + member_id: 0x2a040547 + member_id: 0x3b313ebb + member_id: 0x9e2efb28 + member_id: 0x0492e97a + member_id: 0x3d77e8c3 + member_id: 0xfaea6330 + member_id: 0x6fe5be72 + member_id: 0xb6590178 + member_id: 0xcd47b1ad + member_id: 0xabb0a664 + member_id: 0x1c1b9943 + member_id: 0x1f84d6b1 + member_id: 0x349fecf2 + member_id: 0x2d5bf9c9 + member_id: 0xa0af0fa0 + member_id: 0x28d35b80 + member_id: 0x7bcccefc + member_id: 0x608f71fe + member_id: 0x2d1fe606 + member_id: 0xc312300f + member_id: 0xc6d7422d + member_id: 0xe42cffef + member_id: 0x079f80c5 + member_id: 0x32f10052 + } +} struct_union { id: 0x74799f87 kind: STRUCT @@ -279326,6 +280872,12 @@ function { parameter_id: 0x09451098 parameter_id: 0x18bd6530 } +function { + id: 0x12b21f36 + return_type_id: 0x48b5725f + parameter_id: 0x08ba388c + parameter_id: 0x0130219f +} function { id: 0x12b4c1e0 return_type_id: 0x48b5725f @@ -280083,6 +281635,12 @@ function { return_type_id: 0x48b5725f parameter_id: 0x0d8bad22 } +function { + id: 0x1401698f + return_type_id: 0x48b5725f + parameter_id: 0x1285100d + parameter_id: 0x0e1f07df +} function { id: 0x1402e05e return_type_id: 0x48b5725f @@ -280237,6 +281795,11 @@ function { parameter_id: 0x39c6a784 parameter_id: 0x92233392 } +function { + id: 0x14391590 + return_type_id: 0x48b5725f + parameter_id: 0x1285100d +} function { id: 0x1439657e return_type_id: 0x48b5725f @@ -280572,6 +282135,12 @@ function { parameter_id: 0x4478ba6b parameter_id: 0x4754c3e4 } +function { + id: 0x14bfdedb + return_type_id: 0x48b5725f + parameter_id: 0x1285100d + parameter_id: 0x21b2d2f4 +} function { id: 0x14c1b0e8 return_type_id: 0x48b5725f @@ -281143,6 +282712,11 @@ function { parameter_id: 0x2936263d parameter_id: 0x6720d32f } +function { + id: 0x1603f977 + return_type_id: 0x48b5725f + parameter_id: 0x1a6ea392 +} function { id: 0x1604193e return_type_id: 0x48b5725f @@ -281500,6 +283074,12 @@ function { parameter_id: 0x3e10b518 parameter_id: 0xf435685e } +function { + id: 0x169238cf + return_type_id: 0x48b5725f + parameter_id: 0x1a6ea392 + parameter_id: 0x24706e28 +} function { id: 0x16983453 return_type_id: 0x48b5725f @@ -287381,6 +288961,12 @@ function { parameter_id: 0x3f0185ef parameter_id: 0x4585663f } +function { + id: 0x1f5469d6 + return_type_id: 0x48b5725f + parameter_id: 0x3e10b518 + parameter_id: 0x120540d1 +} function { id: 0x1f558355 return_type_id: 0x48b5725f @@ -287622,6 +289208,15 @@ function { parameter_id: 0x3e10b518 parameter_id: 0xa52a0930 } +function { + id: 0x1fce8843 + return_type_id: 0x24706e28 + parameter_id: 0x1a6ea392 + parameter_id: 0x3cc89d58 + parameter_id: 0x345a9df3 + parameter_id: 0x6d7f5ff6 + parameter_id: 0x6d7f5ff6 +} function { id: 0x1fd10421 return_type_id: 0x48b5725f @@ -300483,6 +302078,15 @@ function { parameter_id: 0x4585663f parameter_id: 0x2e18f543 } +function { + id: 0x99011cda + return_type_id: 0x6720d32f + parameter_id: 0x1285100d + parameter_id: 0x0e1f07df + parameter_id: 0x6720d32f + parameter_id: 0x4585663f + parameter_id: 0x6720d32f +} function { id: 0x99015564 return_type_id: 0x6720d32f @@ -301720,6 +303324,14 @@ function { parameter_id: 0x1aeeade4 parameter_id: 0x6720d32f } +function { + id: 0x99afdc89 + return_type_id: 0x6720d32f + parameter_id: 0x1285100d + parameter_id: 0x21b2d2f4 + parameter_id: 0x2dd9d6a6 + parameter_id: 0xf1a6dfed +} function { id: 0x99b03c33 return_type_id: 0x6720d32f @@ -303379,6 +304991,11 @@ function { parameter_id: 0x13580d6c parameter_id: 0x13580d6c } +function { + id: 0x9b1b4bcb + return_type_id: 0x6720d32f + parameter_id: 0x1a6ea392 +} function { id: 0x9b1c809c return_type_id: 0x6720d32f @@ -303483,6 +305100,12 @@ function { parameter_id: 0x1b8590a8 parameter_id: 0x120540d1 } +function { + id: 0x9b2ba01c + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x21069feb +} function { id: 0x9b2c93fd return_type_id: 0x6720d32f @@ -303498,6 +305121,13 @@ function { parameter_id: 0x27a7c613 parameter_id: 0x27a7c613 } +function { + id: 0x9b2d3bb4 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x21069feb + parameter_id: 0x1a6ea392 +} function { id: 0x9b2eba1d return_type_id: 0x6720d32f @@ -303924,6 +305554,25 @@ function { parameter_id: 0x347303b4 parameter_id: 0x945e7ef6 } +function { + id: 0x9b5d6db7 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x3f84ee3c + parameter_id: 0x33756485 + parameter_id: 0x064d6086 +} +function { + id: 0x9b5db5aa + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x3f84ee3c + parameter_id: 0x33756485 + parameter_id: 0x33756485 + parameter_id: 0x35304fb1 + parameter_id: 0x6d7f5ff6 + parameter_id: 0x13580d6c +} function { id: 0x9b5e2574 return_type_id: 0x6720d32f @@ -304387,6 +306036,12 @@ function { parameter_id: 0x01241c02 parameter_id: 0x945e7ef6 } +function { + id: 0x9b8a8a73 + return_type_id: 0x6720d32f + parameter_id: 0x1a6ea392 + parameter_id: 0x24706e28 +} function { id: 0x9b8aad7a return_type_id: 0x6720d32f @@ -304659,6 +306314,13 @@ function { parameter_id: 0x00c83ba6 parameter_id: 0x11cfee5a } +function { + id: 0x9ba938bd + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x0258f96e + parameter_id: 0x3f84ee3c +} function { id: 0x9ba95149 return_type_id: 0x6720d32f @@ -310187,6 +311849,13 @@ function { parameter_id: 0x00c72527 parameter_id: 0x295c7202 } +function { + id: 0x9fb1f25d + return_type_id: 0x6720d32f + parameter_id: 0x08ba388c + parameter_id: 0x0130219f + parameter_id: 0x6d7f5ff6 +} function { id: 0x9fb27590 return_type_id: 0x6720d32f @@ -311003,6 +312672,13 @@ function { parameter_id: 0x0483e6f8 parameter_id: 0xf435685e } +function { + id: 0xa8db3e38 + return_type_id: 0x0e1f07df + parameter_id: 0x1285100d + parameter_id: 0x6d7f5ff6 + parameter_id: 0xf1a6dfed +} function { id: 0xa96969e0 return_type_id: 0x39a4e83f @@ -311026,6 +312702,16 @@ function { parameter_id: 0x0258f96e parameter_id: 0x3e10b518 } +function { + id: 0xa9d11801 + return_type_id: 0x21b2d2f4 + parameter_id: 0x1285100d + parameter_id: 0x4585663f + parameter_id: 0x4585663f + parameter_id: 0x5268af9d + parameter_id: 0x4585663f + parameter_id: 0xf1a6dfed +} function { id: 0xaa55ab2e return_type_id: 0x31b5a66f @@ -311331,6 +313017,12 @@ function { parameter_id: 0x11281698 parameter_id: 0x38d23361 } +function { + id: 0xb367c064 + return_type_id: 0xe02e14d6 + parameter_id: 0x3ef55b88 + parameter_id: 0x31da1e83 +} function { id: 0xb3a98317 return_type_id: 0x067c4b9a @@ -317009,6 +318701,15 @@ elf_symbol { type_id: 0x9ba7ef87 full_name: "__traceiter_android_rvh_arm64_serror_panic" } +elf_symbol { + id: 0x48420da9 + name: "__traceiter_android_rvh_audio_usb_offload_disconnect" + is_defined: true + symbol_type: FUNCTION + crc: 0x12459ac9 + type_id: 0x9b2ba01c + full_name: "__traceiter_android_rvh_audio_usb_offload_disconnect" +} elf_symbol { id: 0x192bbbd5 name: "__traceiter_android_rvh_build_perf_domains" @@ -317270,6 +318971,33 @@ elf_symbol { type_id: 0x9b8eabb4 full_name: "__traceiter_android_rvh_gic_v3_set_affinity" } +elf_symbol { + id: 0x0b1353da + name: "__traceiter_android_rvh_iommu_alloc_insert_iova" + is_defined: true + symbol_type: FUNCTION + crc: 0x101a2d5e + type_id: 0x9b5db5aa + full_name: "__traceiter_android_rvh_iommu_alloc_insert_iova" +} +elf_symbol { + id: 0x7edcea8d + name: "__traceiter_android_rvh_iommu_iovad_init_alloc_algo" + is_defined: true + symbol_type: FUNCTION + crc: 0x86b8d9b5 + type_id: 0x9ba938bd + full_name: "__traceiter_android_rvh_iommu_iovad_init_alloc_algo" +} +elf_symbol { + id: 0x72c43156 + name: "__traceiter_android_rvh_iommu_limit_align_shift" + is_defined: true + symbol_type: FUNCTION + crc: 0xddb7c31f + type_id: 0x9b5d6db7 + full_name: "__traceiter_android_rvh_iommu_limit_align_shift" +} elf_symbol { id: 0x61e95c07 name: "__traceiter_android_rvh_iommu_setup_dma_ops" @@ -317666,6 +319394,15 @@ elf_symbol { type_id: 0x9bdbdcc4 full_name: "__traceiter_android_rvh_wake_up_new_task" } +elf_symbol { + id: 0x528da532 + name: "__traceiter_android_vh_audio_usb_offload_connect" + is_defined: true + symbol_type: FUNCTION + crc: 0x93b3f3db + type_id: 0x9b2d3bb4 + full_name: "__traceiter_android_vh_audio_usb_offload_connect" +} elf_symbol { id: 0xc6c9353c name: "__traceiter_android_vh_binder_restore_priority" @@ -318422,6 +320159,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_rvh_arm64_serror_panic" } +elf_symbol { + id: 0xaf461bff + name: "__tracepoint_android_rvh_audio_usb_offload_disconnect" + is_defined: true + symbol_type: OBJECT + crc: 0xb46edc7c + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_rvh_audio_usb_offload_disconnect" +} elf_symbol { id: 0x1e8a7e23 name: "__tracepoint_android_rvh_build_perf_domains" @@ -318683,6 +320429,33 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_rvh_gic_v3_set_affinity" } +elf_symbol { + id: 0x58127eb8 + name: "__tracepoint_android_rvh_iommu_alloc_insert_iova" + is_defined: true + symbol_type: OBJECT + crc: 0x8a848d88 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_rvh_iommu_alloc_insert_iova" +} +elf_symbol { + id: 0x0991fcbf + name: "__tracepoint_android_rvh_iommu_iovad_init_alloc_algo" + is_defined: true + symbol_type: OBJECT + crc: 0x74057cd0 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_rvh_iommu_iovad_init_alloc_algo" +} +elf_symbol { + id: 0x693af5b0 + name: "__tracepoint_android_rvh_iommu_limit_align_shift" + is_defined: true + symbol_type: OBJECT + crc: 0xe72e34ff + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_rvh_iommu_limit_align_shift" +} elf_symbol { id: 0x59d74b45 name: "__tracepoint_android_rvh_iommu_setup_dma_ops" @@ -319079,6 +320852,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_rvh_wake_up_new_task" } +elf_symbol { + id: 0xfb7cdd24 + name: "__tracepoint_android_vh_audio_usb_offload_connect" + is_defined: true + symbol_type: OBJECT + crc: 0x33a52dcb + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_audio_usb_offload_connect" +} elf_symbol { id: 0x57a9a36a name: "__tracepoint_android_vh_binder_restore_priority" @@ -325740,6 +327522,15 @@ elf_symbol { type_id: 0x83bdcb7d full_name: "debugfs_lookup" } +elf_symbol { + id: 0x0d94ff56 + name: "debugfs_lookup_and_remove" + is_defined: true + symbol_type: FUNCTION + crc: 0x04b0b18c + type_id: 0x1f5469d6 + full_name: "debugfs_lookup_and_remove" +} elf_symbol { id: 0xc28b6d25 name: "debugfs_remove" @@ -329362,6 +331153,15 @@ elf_symbol { type_id: 0xab344d2b full_name: "drm_atomic_get_old_connector_for_encoder" } +elf_symbol { + id: 0xd9c6b1b6 + name: "drm_atomic_get_old_private_obj_state" + is_defined: true + symbol_type: FUNCTION + crc: 0x4084ee9b + type_id: 0x8715e05e + full_name: "drm_atomic_get_old_private_obj_state" +} elf_symbol { id: 0x0a479d68 name: "drm_atomic_get_plane_state" @@ -352705,6 +354505,51 @@ elf_symbol { type_id: 0x158152d0 full_name: "snd_timer_interrupt" } +elf_symbol { + id: 0xc6d51e10 + name: "snd_usb_autoresume" + is_defined: true + symbol_type: FUNCTION + crc: 0x1644be2c + type_id: 0x9b1b4bcb + full_name: "snd_usb_autoresume" +} +elf_symbol { + id: 0x9c066dd7 + name: "snd_usb_autosuspend" + is_defined: true + symbol_type: FUNCTION + crc: 0x5395c7d8 + type_id: 0x1603f977 + full_name: "snd_usb_autosuspend" +} +elf_symbol { + id: 0x1c3814ad + name: "snd_usb_endpoint_close" + is_defined: true + symbol_type: FUNCTION + crc: 0x808d1236 + type_id: 0x169238cf + full_name: "snd_usb_endpoint_close" +} +elf_symbol { + id: 0xd2e3da2e + name: "snd_usb_endpoint_open" + is_defined: true + symbol_type: FUNCTION + crc: 0x8dd8595c + type_id: 0x1fce8843 + full_name: "snd_usb_endpoint_open" +} +elf_symbol { + id: 0x14184fbf + name: "snd_usb_endpoint_prepare" + is_defined: true + symbol_type: FUNCTION + crc: 0xc31318aa + type_id: 0x9b8a8a73 + full_name: "snd_usb_endpoint_prepare" +} elf_symbol { id: 0x32cae5ee name: "snprintf" @@ -360286,6 +362131,33 @@ elf_symbol { type_id: 0x62e6e159 full_name: "xfrm_stateonly_find" } +elf_symbol { + id: 0xdf73207c + name: "xhci_alloc_command" + is_defined: true + symbol_type: FUNCTION + crc: 0xcb7d93e7 + type_id: 0xa8db3e38 + full_name: "xhci_alloc_command" +} +elf_symbol { + id: 0xd232cbb1 + name: "xhci_alloc_erst" + is_defined: true + symbol_type: FUNCTION + crc: 0xd6a06f4b + type_id: 0x99afdc89 + full_name: "xhci_alloc_erst" +} +elf_symbol { + id: 0xa6eac918 + name: "xhci_free_command" + is_defined: true + symbol_type: FUNCTION + crc: 0xb0ebca96 + type_id: 0x1401698f + full_name: "xhci_free_command" +} elf_symbol { id: 0xc46f1f1f name: "xhci_gen_setup" @@ -360313,6 +362185,15 @@ elf_symbol { type_id: 0x14d9dc3c full_name: "xhci_init_driver" } +elf_symbol { + id: 0xfa57b81b + name: "xhci_queue_stop_endpoint" + is_defined: true + symbol_type: FUNCTION + crc: 0xdbcc16ee + type_id: 0x99011cda + full_name: "xhci_queue_stop_endpoint" +} elf_symbol { id: 0x4603c1d4 name: "xhci_resume" @@ -360322,6 +362203,33 @@ elf_symbol { type_id: 0x98945a53 full_name: "xhci_resume" } +elf_symbol { + id: 0x54c09325 + name: "xhci_ring_alloc" + is_defined: true + symbol_type: FUNCTION + crc: 0x83fc5b72 + type_id: 0xa9d11801 + full_name: "xhci_ring_alloc" +} +elf_symbol { + id: 0xf1838f43 + name: "xhci_ring_cmd_db" + is_defined: true + symbol_type: FUNCTION + crc: 0xa08e2eae + type_id: 0x14391590 + full_name: "xhci_ring_cmd_db" +} +elf_symbol { + id: 0x9324879f + name: "xhci_ring_free" + is_defined: true + symbol_type: FUNCTION + crc: 0xa4f482b9 + type_id: 0x14bfdedb + full_name: "xhci_ring_free" +} elf_symbol { id: 0xcde37345 name: "xhci_run" @@ -360340,6 +362248,15 @@ elf_symbol { type_id: 0x98945a53 full_name: "xhci_suspend" } +elf_symbol { + id: 0x5827485d + name: "xhci_trb_virt_to_dma" + is_defined: true + symbol_type: FUNCTION + crc: 0x73ab2cd9 + type_id: 0xb367c064 + full_name: "xhci_trb_virt_to_dma" +} elf_symbol { id: 0x23b2a72a name: "xp_alloc" @@ -361811,6 +363728,10 @@ symbols { key: "__traceiter_android_rvh_arm64_serror_panic" value: 0x0b48afa1 } + symbol { + key: "__traceiter_android_rvh_audio_usb_offload_disconnect" + value: 0x48420da9 + } symbol { key: "__traceiter_android_rvh_build_perf_domains" value: 0x192bbbd5 @@ -361927,6 +363848,18 @@ symbols { key: "__traceiter_android_rvh_gic_v3_set_affinity" value: 0x41d29e07 } + symbol { + key: "__traceiter_android_rvh_iommu_alloc_insert_iova" + value: 0x0b1353da + } + symbol { + key: "__traceiter_android_rvh_iommu_iovad_init_alloc_algo" + value: 0x7edcea8d + } + symbol { + key: "__traceiter_android_rvh_iommu_limit_align_shift" + value: 0x72c43156 + } symbol { key: "__traceiter_android_rvh_iommu_setup_dma_ops" value: 0x61e95c07 @@ -362103,6 +364036,10 @@ symbols { key: "__traceiter_android_rvh_wake_up_new_task" value: 0xebcd0234 } + symbol { + key: "__traceiter_android_vh_audio_usb_offload_connect" + value: 0x528da532 + } symbol { key: "__traceiter_android_vh_binder_restore_priority" value: 0xc6c9353c @@ -362439,6 +364376,10 @@ symbols { key: "__tracepoint_android_rvh_arm64_serror_panic" value: 0x748c1fd7 } + symbol { + key: "__tracepoint_android_rvh_audio_usb_offload_disconnect" + value: 0xaf461bff + } symbol { key: "__tracepoint_android_rvh_build_perf_domains" value: 0x1e8a7e23 @@ -362555,6 +364496,18 @@ symbols { key: "__tracepoint_android_rvh_gic_v3_set_affinity" value: 0x47c27595 } + symbol { + key: "__tracepoint_android_rvh_iommu_alloc_insert_iova" + value: 0x58127eb8 + } + symbol { + key: "__tracepoint_android_rvh_iommu_iovad_init_alloc_algo" + value: 0x0991fcbf + } + symbol { + key: "__tracepoint_android_rvh_iommu_limit_align_shift" + value: 0x693af5b0 + } symbol { key: "__tracepoint_android_rvh_iommu_setup_dma_ops" value: 0x59d74b45 @@ -362731,6 +364684,10 @@ symbols { key: "__tracepoint_android_rvh_wake_up_new_task" value: 0xdcf22716 } + symbol { + key: "__tracepoint_android_vh_audio_usb_offload_connect" + value: 0xfb7cdd24 + } symbol { key: "__tracepoint_android_vh_binder_restore_priority" value: 0x57a9a36a @@ -365695,6 +367652,10 @@ symbols { key: "debugfs_lookup" value: 0x5e671464 } + symbol { + key: "debugfs_lookup_and_remove" + value: 0x0d94ff56 + } symbol { key: "debugfs_remove" value: 0xc28b6d25 @@ -367299,6 +369260,10 @@ symbols { key: "drm_atomic_get_old_connector_for_encoder" value: 0xf53e2c51 } + symbol { + key: "drm_atomic_get_old_private_obj_state" + value: 0xd9c6b1b6 + } symbol { key: "drm_atomic_get_plane_state" value: 0x0a479d68 @@ -377675,6 +379640,26 @@ symbols { key: "snd_timer_interrupt" value: 0xd30316c2 } + symbol { + key: "snd_usb_autoresume" + value: 0xc6d51e10 + } + symbol { + key: "snd_usb_autosuspend" + value: 0x9c066dd7 + } + symbol { + key: "snd_usb_endpoint_close" + value: 0x1c3814ad + } + symbol { + key: "snd_usb_endpoint_open" + value: 0xd2e3da2e + } + symbol { + key: "snd_usb_endpoint_prepare" + value: 0x14184fbf + } symbol { key: "snprintf" value: 0x32cae5ee @@ -381047,6 +383032,18 @@ symbols { key: "xfrm_stateonly_find" value: 0x64876ecd } + symbol { + key: "xhci_alloc_command" + value: 0xdf73207c + } + symbol { + key: "xhci_alloc_erst" + value: 0xd232cbb1 + } + symbol { + key: "xhci_free_command" + value: 0xa6eac918 + } symbol { key: "xhci_gen_setup" value: 0xc46f1f1f @@ -381059,10 +383056,26 @@ symbols { key: "xhci_init_driver" value: 0x71f2808e } + symbol { + key: "xhci_queue_stop_endpoint" + value: 0xfa57b81b + } symbol { key: "xhci_resume" value: 0x4603c1d4 } + symbol { + key: "xhci_ring_alloc" + value: 0x54c09325 + } + symbol { + key: "xhci_ring_cmd_db" + value: 0xf1838f43 + } + symbol { + key: "xhci_ring_free" + value: 0x9324879f + } symbol { key: "xhci_run" value: 0xcde37345 @@ -381071,6 +383084,10 @@ symbols { key: "xhci_suspend" value: 0xb834cd77 } + symbol { + key: "xhci_trb_virt_to_dma" + value: 0x5827485d + } symbol { key: "xp_alloc" value: 0x23b2a72a diff --git a/android/abi_gki_aarch64_db845c b/android/abi_gki_aarch64_db845c index c504b951d788..d8d064a847cf 100644 --- a/android/abi_gki_aarch64_db845c +++ b/android/abi_gki_aarch64_db845c @@ -70,7 +70,6 @@ debugfs_create_file debugfs_create_u32 debugfs_create_x32 - debugfs_lookup debugfs_remove default_llseek delayed_work_timer_fn @@ -1006,6 +1005,7 @@ drm_atomic_get_connector_state drm_atomic_get_crtc_state drm_atomic_get_new_private_obj_state + drm_atomic_get_old_private_obj_state drm_connector_register drm_connector_unregister __drm_crtc_commit_free @@ -1747,6 +1747,9 @@ rproc_get_by_child try_wait_for_completion +# required by qcom_tsens.ko + debugfs_lookup + # required by qcom_usb_vbus-regulator.ko regulator_get_current_limit_regmap regulator_set_current_limit_regmap @@ -2002,6 +2005,7 @@ ufshcd_uic_hibern8_exit # required by ulpi.ko + debugfs_lookup_and_remove of_clk_set_defaults of_device_request_module __request_module diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 8d9b24ecc195..0ec2a95eaaed 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -137,10 +137,10 @@ bpf_prog_put bpf_prog_sub bpf_stats_enabled_key - bpf_trace_run1 bpf_trace_run10 bpf_trace_run11 bpf_trace_run12 + bpf_trace_run1 bpf_trace_run2 bpf_trace_run3 bpf_trace_run4 @@ -1177,6 +1177,7 @@ __fdget fd_install fget + _find_first_and_bit _find_first_bit _find_first_zero_bit find_get_pid @@ -1633,8 +1634,8 @@ ieee80211_txq_may_transmit ieee80211_txq_schedule_start ieee80211_tx_rate_update - ieee80211_tx_status ieee80211_tx_status_8023 + ieee80211_tx_status ieee80211_tx_status_ext ieee80211_tx_status_irqsafe ieee80211_unregister_hw @@ -2087,8 +2088,8 @@ memremap memscan mem_section - memset memset64 + memset __memset_io memstart_addr memunmap @@ -2226,10 +2227,10 @@ nla_find nla_memcpy __nla_parse - nla_put nla_put_64bit - nla_reserve + nla_put nla_reserve_64bit + nla_reserve nla_strscpy __nla_validate __nlmsg_put @@ -3189,6 +3190,11 @@ snd_soc_unregister_card snd_soc_unregister_component snd_timer_interrupt + snd_usb_autoresume + snd_usb_autosuspend + snd_usb_endpoint_close + snd_usb_endpoint_open + snd_usb_endpoint_prepare snprintf soc_device_register soc_device_unregister @@ -3408,6 +3414,7 @@ __traceiter_android_rvh_account_irq __traceiter_android_rvh_after_dequeue_task __traceiter_android_rvh_after_enqueue_task + __traceiter_android_rvh_audio_usb_offload_disconnect __traceiter_android_rvh_build_perf_domains __traceiter_android_rvh_can_migrate_task __traceiter_android_rvh_check_preempt_tick @@ -3431,6 +3438,9 @@ __traceiter_android_rvh_flush_task __traceiter_android_rvh_get_nohz_timer_target __traceiter_android_rvh_gic_v3_set_affinity + __traceiter_android_rvh_iommu_alloc_insert_iova + __traceiter_android_rvh_iommu_iovad_init_alloc_algo + __traceiter_android_rvh_iommu_limit_align_shift __traceiter_android_rvh_iommu_setup_dma_ops __traceiter_android_rvh_is_cpu_allowed __traceiter_android_rvh_migrate_queued_task @@ -3473,6 +3483,7 @@ __traceiter_android_rvh_update_thermal_stats __traceiter_android_rvh_util_est_update __traceiter_android_rvh_wake_up_new_task + __traceiter_android_vh_audio_usb_offload_connect __traceiter_android_vh_binder_restore_priority __traceiter_android_vh_binder_set_priority __traceiter_android_vh_binder_wakeup_ilocked @@ -3519,6 +3530,7 @@ __traceiter_gpu_mem_total __traceiter_ipi_entry __traceiter_ipi_raise + __traceiter_irq_handler_entry __traceiter_mmap_lock_acquire_returned __traceiter_mmap_lock_released __traceiter_mmap_lock_start_locking @@ -3530,6 +3542,7 @@ __tracepoint_android_rvh_account_irq __tracepoint_android_rvh_after_dequeue_task __tracepoint_android_rvh_after_enqueue_task + __tracepoint_android_rvh_audio_usb_offload_disconnect __tracepoint_android_rvh_build_perf_domains __tracepoint_android_rvh_can_migrate_task __tracepoint_android_rvh_check_preempt_tick @@ -3553,6 +3566,9 @@ __tracepoint_android_rvh_flush_task __tracepoint_android_rvh_get_nohz_timer_target __tracepoint_android_rvh_gic_v3_set_affinity + __tracepoint_android_rvh_iommu_alloc_insert_iova + __tracepoint_android_rvh_iommu_iovad_init_alloc_algo + __tracepoint_android_rvh_iommu_limit_align_shift __tracepoint_android_rvh_iommu_setup_dma_ops __tracepoint_android_rvh_is_cpu_allowed __tracepoint_android_rvh_migrate_queued_task @@ -3595,6 +3611,7 @@ __tracepoint_android_rvh_update_thermal_stats __tracepoint_android_rvh_util_est_update __tracepoint_android_rvh_wake_up_new_task + __tracepoint_android_vh_audio_usb_offload_connect __tracepoint_android_vh_binder_restore_priority __tracepoint_android_vh_binder_set_priority __tracepoint_android_vh_binder_wakeup_ilocked @@ -3641,6 +3658,7 @@ __tracepoint_gpu_mem_total __tracepoint_ipi_entry __tracepoint_ipi_raise + __tracepoint_irq_handler_entry __tracepoint_mmap_lock_acquire_returned __tracepoint_mmap_lock_released __tracepoint_mmap_lock_start_locking @@ -4097,7 +4115,15 @@ xdp_rxq_info_unreg_mem_model xdp_warn xfrm_lookup + xhci_alloc_command + xhci_alloc_erst + xhci_free_command xhci_get_endpoint_index + xhci_queue_stop_endpoint + xhci_ring_alloc + xhci_ring_cmd_db + xhci_ring_free + xhci_trb_virt_to_dma xp_alloc xp_dma_map xp_dma_sync_for_cpu_slow diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c index 5b60c248de9e..cbefa5a77384 100644 --- a/arch/alpha/kernel/module.c +++ b/arch/alpha/kernel/module.c @@ -146,10 +146,8 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; - /* The small sections were sorted to the end of the segment. - The following should definitely cover them. */ - gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; got = sechdrs[me->arch.gotsecindex].sh_addr; + gp = got + 0x8000; for (i = 0; i < n; i++) { unsigned long r_sym = ELF64_R_SYM (rela[i].r_info); diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts index 34503ac9c51c..721e5ee7b680 100644 --- a/arch/arm/boot/dts/spear320-hmi.dts +++ b/arch/arm/boot/dts/spear320-hmi.dts @@ -241,7 +241,7 @@ irq-trigger = <0x1>; stmpegpio: stmpe-gpio { - compatible = "stmpe,gpio"; + compatible = "st,stmpe-gpio"; reg = <0>; gpio-controller; #gpio-cells = <2>; diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 2ace35211d9a..2fc21f5d7831 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -243,7 +243,6 @@ CONFIG_NET_SCH_FQ_CODEL=y CONFIG_NET_SCH_FQ=y CONFIG_NET_SCH_INGRESS=y CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_TCINDEX=y CONFIG_NET_CLS_FW=y CONFIG_NET_CLS_U32=y CONFIG_CLS_U32_MARK=y @@ -508,6 +507,7 @@ CONFIG_TYPEC=y CONFIG_TYPEC_TCPM=y CONFIG_TYPEC_TCPCI=y CONFIG_TYPEC_UCSI=y +CONFIG_TYPEC_DP_ALTMODE=y CONFIG_MMC=y # CONFIG_PWRSEQ_EMMC is not set # CONFIG_PWRSEQ_SIMPLE is not set diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index b13c22046de5..62c846be2d76 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -33,7 +33,7 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); ({ \ efi_virtmap_load(); \ __efi_fpsimd_begin(); \ - spin_lock(&efi_rt_lock); \ + raw_spin_lock(&efi_rt_lock); \ }) #undef arch_efi_call_virt @@ -42,12 +42,12 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); #define arch_efi_call_virt_teardown() \ ({ \ - spin_unlock(&efi_rt_lock); \ + raw_spin_unlock(&efi_rt_lock); \ __efi_fpsimd_end(); \ efi_virtmap_unload(); \ }) -extern spinlock_t efi_rt_lock; +extern raw_spinlock_t efi_rt_lock; extern u64 *efi_rt_stack_top; efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 760c62f8e22f..3f8199ba265a 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -37,6 +37,29 @@ void mte_free_tag_storage(char *storage); /* track which pages have valid allocation tags */ #define PG_mte_tagged PG_arch_2 +static inline void set_page_mte_tagged(struct page *page) +{ + /* + * Ensure that the tags written prior to this function are visible + * before the page flags update. + */ + smp_wmb(); + set_bit(PG_mte_tagged, &page->flags); +} + +static inline bool page_mte_tagged(struct page *page) +{ + bool ret = test_bit(PG_mte_tagged, &page->flags); + + /* + * If the page is tagged, ensure ordering with a likely subsequent + * read of the tags. + */ + if (ret) + smp_rmb(); + return ret; +} + void mte_zero_clear_page_tags(void *addr); void mte_sync_tags(pte_t old_pte, pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); @@ -56,6 +79,13 @@ size_t mte_probe_user_range(const char __user *uaddr, size_t size); /* unused if !CONFIG_ARM64_MTE, silence the compiler */ #define PG_mte_tagged 0 +static inline void set_page_mte_tagged(struct page *page) +{ +} +static inline bool page_mte_tagged(struct page *page) +{ + return false; +} static inline void mte_zero_clear_page_tags(void *addr) { } diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index f1cfc44ef52f..5d0f1f7b7600 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1050,7 +1050,7 @@ static inline void arch_swap_invalidate_area(int type) static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) { if (system_supports_mte() && mte_restore_tags(entry, &folio->page)) - set_bit(PG_mte_tagged, &folio->flags); + set_page_mte_tagged(&folio->page); } #endif /* CONFIG_ARM64_MTE */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 693c38746832..cfea7a61a5b8 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2075,8 +2075,10 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) * Clear the tags in the zero page. This needs to be done via the * linear map which has the Tagged attribute. */ - if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags)) + if (!page_mte_tagged(ZERO_PAGE(0))) { mte_clear_page_tags(lm_alias(empty_zero_page)); + set_page_mte_tagged(ZERO_PAGE(0)); + } kasan_init_hw_tags_cpu(); } diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b273900f4566..a30dbe4b95cd 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -146,7 +146,7 @@ asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f) return s; } -DEFINE_SPINLOCK(efi_rt_lock); +DEFINE_RAW_SPINLOCK(efi_rt_lock); asmlinkage u64 *efi_rt_stack_top __ro_after_init; diff --git a/arch/arm64/kernel/elfcore.c b/arch/arm64/kernel/elfcore.c index 662a61e5e75e..2e94d20c4ac7 100644 --- a/arch/arm64/kernel/elfcore.c +++ b/arch/arm64/kernel/elfcore.c @@ -46,7 +46,7 @@ static int mte_dump_tag_range(struct coredump_params *cprm, * Pages mapped in user space as !pte_access_permitted() (e.g. * PROT_EXEC only) may not have the PG_mte_tagged flag set. */ - if (!test_bit(PG_mte_tagged, &page->flags)) { + if (!page_mte_tagged(page)) { put_page(page); dump_skip(cprm, MTE_PAGE_TAG_STORAGE); continue; diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index af5df48ba915..788597a6b6a2 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -271,7 +271,7 @@ static int swsusp_mte_save_tags(void) if (!page) continue; - if (!test_bit(PG_mte_tagged, &page->flags)) + if (!page_mte_tagged(page)) continue; ret = save_tags(page, pfn); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 7467217c1eaf..84a085d536f8 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -41,8 +41,10 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte, if (check_swap && is_swap_pte(old_pte)) { swp_entry_t entry = pte_to_swp_entry(old_pte); - if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) + if (!non_swap_entry(entry) && mte_restore_tags(entry, page)) { + set_page_mte_tagged(page); return; + } } if (!pte_is_tagged) @@ -52,8 +54,10 @@ static void mte_sync_page_tags(struct page *page, pte_t old_pte, * Test PG_mte_tagged again in case it was racing with another * set_pte_at(). */ - if (!test_and_set_bit(PG_mte_tagged, &page->flags)) + if (!page_mte_tagged(page)) { mte_clear_page_tags(page_address(page)); + set_page_mte_tagged(page); + } } void mte_sync_tags(pte_t old_pte, pte_t pte) @@ -69,9 +73,11 @@ void mte_sync_tags(pte_t old_pte, pte_t pte) /* if PG_mte_tagged is set, tags have already been initialised */ for (i = 0; i < nr_pages; i++, page++) { - if (!test_bit(PG_mte_tagged, &page->flags)) + if (!page_mte_tagged(page)) { mte_sync_page_tags(page, old_pte, check_swap, pte_is_tagged); + set_page_mte_tagged(page); + } } /* ensure the tags are visible before the PTE is set */ @@ -96,8 +102,7 @@ int memcmp_pages(struct page *page1, struct page *page2) * pages is tagged, set_pte_at() may zero or change the tags of the * other page via mte_sync_tags(). */ - if (test_bit(PG_mte_tagged, &page1->flags) || - test_bit(PG_mte_tagged, &page2->flags)) + if (page_mte_tagged(page1) || page_mte_tagged(page2)) return addr1 != addr2; return ret; @@ -454,7 +459,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, put_page(page); break; } - WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags)); + WARN_ON_ONCE(!page_mte_tagged(page)); /* limit access to the end of the page */ offset = offset_in_page(addr); diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 89cb03e2ed06..a3b31f2c7a6a 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1061,7 +1061,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, maddr = page_address(page); if (!write) { - if (test_bit(PG_mte_tagged, &page->flags)) + if (page_mte_tagged(page)) num_tags = mte_copy_tags_to_user(tags, maddr, MTE_GRANULES_PER_PAGE); else @@ -1078,7 +1078,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, * completed fully */ if (num_tags == MTE_GRANULES_PER_PAGE) - set_bit(PG_mte_tagged, &page->flags); + set_page_mte_tagged(page); kvm_release_pfn_dirty(pfn); } diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 9b6df5286dd5..863f152c4cd5 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1271,9 +1271,9 @@ static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, return -EFAULT; for (i = 0; i < nr_pages; i++, page++) { - if (!test_bit(PG_mte_tagged, &page->flags)) { + if (!page_mte_tagged(page)) { mte_clear_page_tags(page_address(page)); - set_bit(PG_mte_tagged, &page->flags); + set_page_mte_tagged(page); } } diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 24913271e898..6dbc822332f2 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -21,9 +21,11 @@ void copy_highpage(struct page *to, struct page *from) copy_page(kto, kfrom); - if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { - set_bit(PG_mte_tagged, &to->flags); + if (system_supports_mte() && page_mte_tagged(from)) { + if (kasan_hw_tags_enabled()) + page_kasan_tag_reset(to); mte_copy_page_tags(kto, kfrom); + set_page_mte_tagged(to); } } EXPORT_SYMBOL(copy_highpage); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index f59d647702d1..3ebdd643de23 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -972,5 +972,5 @@ struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, void tag_clear_highpage(struct page *page) { mte_zero_clear_page_tags(page_address(page)); - set_bit(PG_mte_tagged, &page->flags); + set_page_mte_tagged(page); } diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c index bed803d8e158..70f913205db9 100644 --- a/arch/arm64/mm/mteswap.c +++ b/arch/arm64/mm/mteswap.c @@ -24,7 +24,7 @@ int mte_save_tags(struct page *page) { void *tag_storage, *ret; - if (!test_bit(PG_mte_tagged, &page->flags)) + if (!page_mte_tagged(page)) return 0; tag_storage = mte_allocate_tag_storage(); diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index 3a2bb2e8fdad..fbff1cea62ca 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -326,16 +326,16 @@ void __init setup_arch(char **cmdline_p) panic("No configuration setup"); } -#ifdef CONFIG_BLK_DEV_INITRD - if (m68k_ramdisk.size) { + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size); + + paging_init(); + + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && m68k_ramdisk.size) { initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); initrd_end = initrd_start + m68k_ramdisk.size; pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end); } -#endif - - paging_init(); #ifdef CONFIG_NATFEAT nf_init(); diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h index 9a6eefd12757..3eb767c8a4ee 100644 --- a/arch/mips/include/asm/mach-rc32434/pci.h +++ b/arch/mips/include/asm/mach-rc32434/pci.h @@ -374,7 +374,7 @@ struct pci_msu { PCI_CFG04_STAT_SSE | \ PCI_CFG04_STAT_PE) -#define KORINA_CNFG1 ((KORINA_STAT<<16)|KORINA_CMD) +#define KORINA_CNFG1 (KORINA_STAT | KORINA_CMD) #define KORINA_REVID 0 #define KORINA_CLASS_CODE 0 diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts index 73f8c998c64d..d4f5f159d6f2 100644 --- a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts +++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts @@ -10,7 +10,6 @@ / { model = "fsl,T1040RDB-REV-A"; - compatible = "fsl,T1040RDB-REV-A"; }; &seville_port0 { diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index eb6d094083fd..317659fdeacf 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -36,15 +36,17 @@ #define PACA_IRQ_DEC 0x08 /* Or FIT */ #define PACA_IRQ_HMI 0x10 #define PACA_IRQ_PMI 0x20 +#define PACA_IRQ_REPLAYING 0x40 /* * Some soft-masked interrupts must be hard masked until they are replayed * (e.g., because the soft-masked handler does not clear the exception). + * Interrupt replay itself must remain hard masked too. */ #ifdef CONFIG_PPC_BOOK3S -#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI) +#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI|PACA_IRQ_REPLAYING) #else -#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE) +#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_REPLAYING) #endif #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 09f1790d0ae1..0ab3511a47d7 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -295,7 +295,6 @@ extern void free_unused_pacas(void); #else /* CONFIG_PPC64 */ -static inline void allocate_paca_ptrs(void) { } static inline void allocate_paca(int cpu) { } static inline void free_unused_pacas(void) { } diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index f63505d74932..6c6cb53d7045 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -26,6 +26,7 @@ #include extern int boot_cpuid; +extern int boot_cpu_hwid; /* PPC64 only */ extern int spinning_secondaries; extern u32 *cpu_to_phys_id; extern bool coregroup_enabled; diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index caebe1431596..ee95937bdaf1 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -67,11 +67,9 @@ static void iommu_debugfs_add(struct iommu_table *tbl) static void iommu_debugfs_del(struct iommu_table *tbl) { char name[10]; - struct dentry *liobn_entry; sprintf(name, "%08lx", tbl->it_index); - liobn_entry = debugfs_lookup(name, iommu_debugfs_dir); - debugfs_remove(liobn_entry); + debugfs_lookup_and_remove(name, iommu_debugfs_dir); } #else static void iommu_debugfs_add(struct iommu_table *tbl){} diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c index eb2b380e52a0..9dc0ad3c533a 100644 --- a/arch/powerpc/kernel/irq_64.c +++ b/arch/powerpc/kernel/irq_64.c @@ -70,22 +70,19 @@ int distribute_irqs = 1; static inline void next_interrupt(struct pt_regs *regs) { - /* - * Softirq processing can enable/disable irqs, which will leave - * MSR[EE] enabled and the soft mask set to IRQS_DISABLED. Fix - * this up. - */ - if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) - hard_irq_disable(); - else - irq_soft_mask_set(IRQS_ALL_DISABLED); + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { + WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)); + WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED); + } /* * We are responding to the next interrupt, so interrupt-off * latencies should be reset here. */ + lockdep_hardirq_exit(); trace_hardirqs_on(); trace_hardirqs_off(); + lockdep_hardirq_enter(); } static inline bool irq_happened_test_and_clear(u8 irq) @@ -97,22 +94,11 @@ static inline bool irq_happened_test_and_clear(u8 irq) return false; } -void replay_soft_interrupts(void) +static void __replay_soft_interrupts(void) { struct pt_regs regs; /* - * Be careful here, calling these interrupt handlers can cause - * softirqs to be raised, which they may run when calling irq_exit, - * which will cause local_irq_enable() to be run, which can then - * recurse into this function. Don't keep any state across - * interrupt handler calls which may change underneath us. - * - * Softirqs can not be disabled over replay to stop this recursion - * because interrupts taken in idle code may require RCU softirq - * to run in the irq RCU tracking context. This is a hard problem - * to fix without changes to the softirq or idle layer. - * * We use local_paca rather than get_paca() to avoid all the * debug_smp_processor_id() business in this low level function. */ @@ -120,13 +106,20 @@ void replay_soft_interrupts(void) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { WARN_ON_ONCE(mfmsr() & MSR_EE); WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)); + WARN_ON(local_paca->irq_happened & PACA_IRQ_REPLAYING); } + /* + * PACA_IRQ_REPLAYING prevents interrupt handlers from enabling + * MSR[EE] to get PMIs, which can result in more IRQs becoming + * pending. + */ + local_paca->irq_happened |= PACA_IRQ_REPLAYING; + ppc_save_regs(®s); regs.softe = IRQS_ENABLED; regs.msr |= MSR_EE; -again: /* * Force the delivery of pending soft-disabled interrupts on PS3. * Any HV call will have this side effect. @@ -175,13 +168,14 @@ again: next_interrupt(®s); } - /* - * Softirq processing can enable and disable interrupts, which can - * result in new irqs becoming pending. Must keep looping until we - * have cleared out all pending interrupts. - */ - if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) - goto again; + local_paca->irq_happened &= ~PACA_IRQ_REPLAYING; +} + +void replay_soft_interrupts(void) +{ + irq_enter(); /* See comment in arch_local_irq_restore */ + __replay_soft_interrupts(); + irq_exit(); } #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) @@ -200,13 +194,13 @@ static inline void replay_soft_interrupts_irqrestore(void) if (kuap_state != AMR_KUAP_BLOCKED) set_kuap(AMR_KUAP_BLOCKED); - replay_soft_interrupts(); + __replay_soft_interrupts(); if (kuap_state != AMR_KUAP_BLOCKED) set_kuap(kuap_state); } #else -#define replay_soft_interrupts_irqrestore() replay_soft_interrupts() +#define replay_soft_interrupts_irqrestore() __replay_soft_interrupts() #endif notrace void arch_local_irq_restore(unsigned long mask) @@ -219,9 +213,13 @@ notrace void arch_local_irq_restore(unsigned long mask) return; } - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - WARN_ON_ONCE(in_nmi() || in_hardirq()); + if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { + WARN_ON_ONCE(in_nmi()); + WARN_ON_ONCE(in_hardirq()); + WARN_ON_ONCE(local_paca->irq_happened & PACA_IRQ_REPLAYING); + } +again: /* * After the stb, interrupts are unmasked and there are no interrupts * pending replay. The restart sequence makes this atomic with @@ -248,6 +246,12 @@ notrace void arch_local_irq_restore(unsigned long mask) if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) WARN_ON_ONCE(!(mfmsr() & MSR_EE)); + /* + * If we came here from the replay below, we might have a preempt + * pending (due to preempt_enable_no_resched()). Have to check now. + */ + preempt_check_resched(); + return; happened: @@ -261,6 +265,7 @@ happened: irq_soft_mask_set(IRQS_ENABLED); local_paca->irq_happened = 0; __hard_irq_enable(); + preempt_check_resched(); return; } @@ -296,12 +301,38 @@ happened: irq_soft_mask_set(IRQS_ALL_DISABLED); trace_hardirqs_off(); + /* + * Now enter interrupt context. The interrupt handlers themselves + * also call irq_enter/exit (which is okay, they can nest). But call + * it here now to hold off softirqs until the below irq_exit(). If + * we allowed replayed handlers to run softirqs, that enables irqs, + * which must replay interrupts, which recurses in here and makes + * things more complicated. The recursion is limited to 2, and it can + * be made to work, but it's complicated. + * + * local_bh_disable can not be used here because interrupts taken in + * idle are not in the right context (RCU, tick, etc) to run softirqs + * so irq_enter must be called. + */ + irq_enter(); + replay_soft_interrupts_irqrestore(); + irq_exit(); + + if (unlikely(local_paca->irq_happened != PACA_IRQ_HARD_DIS)) { + /* + * The softirq processing in irq_exit() may enable interrupts + * temporarily, which can result in MSR[EE] being enabled and + * more irqs becoming pending. Go around again if that happens. + */ + trace_hardirqs_on(); + preempt_enable_no_resched(); + goto again; + } + trace_hardirqs_on(); irq_soft_mask_set(IRQS_ENABLED); - if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) - WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); local_paca->irq_happened = 0; __hard_irq_enable(); preempt_enable(); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 1eed87d954ba..8537c354c560 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -366,8 +366,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node, be32_to_cpu(intserv[found_thread])); boot_cpuid = found; - // Pass the boot CPU's hard CPU id back to our caller - *((u32 *)data) = be32_to_cpu(intserv[found_thread]); + if (IS_ENABLED(CONFIG_PPC64)) + boot_cpu_hwid = be32_to_cpu(intserv[found_thread]); /* * PAPR defines "logical" PVR values for cpus that @@ -751,7 +751,6 @@ static inline void save_fscr_to_task(void) {} void __init early_init_devtree(void *params) { - u32 boot_cpu_hwid; phys_addr_t limit; DBG(" -> early_init_devtree(%px)\n", params); @@ -847,7 +846,7 @@ void __init early_init_devtree(void *params) /* Retrieve CPU related informations from the flat tree * (altivec support, boot CPU ID, ...) */ - of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid); + of_scan_flat_dt(early_init_dt_scan_cpus, NULL); if (boot_cpuid < 0) { printk("Failed to identify boot CPU !\n"); BUG(); @@ -864,11 +863,6 @@ void __init early_init_devtree(void *params) mmu_early_init_devtree(); - // NB. paca is not installed until later in early_setup() - allocate_paca_ptrs(); - allocate_paca(boot_cpuid); - set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid); - #ifdef CONFIG_PPC_POWERNV /* Scan and build the list of machine check recoverable ranges */ of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 6d041993a45d..efb301a4987c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -86,6 +86,10 @@ EXPORT_SYMBOL(machine_id); int boot_cpuid = -1; EXPORT_SYMBOL_GPL(boot_cpuid); +#ifdef CONFIG_PPC64 +int boot_cpu_hwid = -1; +#endif + /* * These are used in binfmt_elf.c to put aux entries on the stack * for each elf executable being started. diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a0dee7354fe6..b2e0d3ce4261 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -385,17 +385,21 @@ void __init early_setup(unsigned long dt_ptr) /* * Do early initialization using the flattened device * tree, such as retrieving the physical memory map or - * calculating/retrieving the hash table size. + * calculating/retrieving the hash table size, discover + * boot_cpuid and boot_cpu_hwid. */ early_init_devtree(__va(dt_ptr)); - /* Now we know the logical id of our boot cpu, setup the paca. */ - if (boot_cpuid != 0) { - /* Poison paca_ptrs[0] again if it's not the boot cpu */ - memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0])); - } + allocate_paca_ptrs(); + allocate_paca(boot_cpuid); + set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid); fixup_boot_paca(paca_ptrs[boot_cpuid]); setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */ + // smp_processor_id() now reports boot_cpuid + +#ifdef CONFIG_SMP + task_thread_info(current)->cpu = boot_cpuid; // fix task_cpu(current) +#endif /* * Configure exception handlers. This include setting up trampolines diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index f157552d79b3..285159e65a3b 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -374,7 +374,7 @@ void vtime_flush(struct task_struct *tsk) #define calc_cputime_factors() #endif -void __delay(unsigned long loops) +void __no_kcsan __delay(unsigned long loops) { unsigned long start; @@ -395,7 +395,7 @@ void __delay(unsigned long loops) } EXPORT_SYMBOL(__delay); -void udelay(unsigned long usecs) +void __no_kcsan udelay(unsigned long usecs) { __delay(tb_ticks_per_usec * usecs); } diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c index a379b0ce19ff..8643b2c8b76e 100644 --- a/arch/powerpc/net/bpf_jit_comp32.c +++ b/arch/powerpc/net/bpf_jit_comp32.c @@ -79,6 +79,20 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) #define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */ #define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */ +static inline bool bpf_has_stack_frame(struct codegen_context *ctx) +{ + /* + * We only need a stack frame if: + * - we call other functions (kernel helpers), or + * - we use non volatile registers, or + * - we use tail call counter + * - the bpf program uses its stack area + * The latter condition is deduced from the usage of BPF_REG_FP + */ + return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) || + bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)); +} + void bpf_jit_realloc_regs(struct codegen_context *ctx) { unsigned int nvreg_mask; @@ -118,7 +132,8 @@ void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx) #define BPF_TAILCALL_PROLOGUE_SIZE 4 - EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx))); + if (bpf_has_stack_frame(ctx)) + EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx))); if (ctx->seen & SEEN_TAILCALL) EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC))); @@ -171,7 +186,8 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF)); /* Tear down our stack frame */ - EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx))); + if (bpf_has_stack_frame(ctx)) + EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx))); if (ctx->seen & SEEN_FUNC) EMIT(PPC_RAW_MTLR(_R0)); diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index ba8050e63acf..8b4ddccea279 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -87,6 +87,13 @@ endif # Avoid generating .eh_frame sections. KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables +# The RISC-V attributes frequently cause compatibility issues and provide no +# information, so just turn them off. +KBUILD_CFLAGS += $(call cc-option,-mno-riscv-attribute) +KBUILD_AFLAGS += $(call cc-option,-mno-riscv-attribute) +KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr) +KBUILD_AFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr) + KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 9e73922e1e2e..d47d87c2d7e3 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop #endif -#endif +#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* _ASM_RISCV_FTRACE_H */ diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h index f36368de839f..3cd00332d70f 100644 --- a/arch/riscv/include/asm/parse_asm.h +++ b/arch/riscv/include/asm/parse_asm.h @@ -3,6 +3,9 @@ * Copyright (C) 2020 SiFive */ +#ifndef _ASM_RISCV_INSN_H +#define _ASM_RISCV_INSN_H + #include /* The bit field of immediate value in I-type instruction */ @@ -217,3 +220,5 @@ static inline bool is_ ## INSN_NAME ## _insn(long insn) \ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); }) + +#endif /* _ASM_RISCV_INSN_H */ diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h index 9a7d7346001e..98d9de07cba1 100644 --- a/arch/riscv/include/asm/patch.h +++ b/arch/riscv/include/asm/patch.h @@ -9,4 +9,6 @@ int patch_text_nosync(void *addr, const void *insns, size_t len); int patch_text(void *addr, u32 insn); +extern int riscv_patch_in_stop_machine; + #endif /* _ASM_RISCV_PATCH_H */ diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile index 260daf3236d3..7f34f3c7c882 100644 --- a/arch/riscv/kernel/compat_vdso/Makefile +++ b/arch/riscv/kernel/compat_vdso/Makefile @@ -14,6 +14,10 @@ COMPAT_LD := $(LD) COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32 COMPAT_LD_FLAGS := -melf32lriscv +# Disable attributes, as they're useless and break the build. +COMPAT_CC_FLAGS += $(call cc-option,-mno-riscv-attribute) +COMPAT_CC_FLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr) + # Files to link into the compat_vdso obj-compat_vdso = $(patsubst %, %.o, $(compat_vdso-syms)) note.o diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 5bff37af4770..03a6434a8cdd 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -15,10 +15,19 @@ void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) { mutex_lock(&text_mutex); + + /* + * The code sequences we use for ftrace can't be patched while the + * kernel is running, so we need to use stop_machine() to modify them + * for now. This doesn't play nice with text_mutex, we use this flag + * to elide the check. + */ + riscv_patch_in_stop_machine = true; } void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) { + riscv_patch_in_stop_machine = false; mutex_unlock(&text_mutex); } @@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { int out; - ftrace_arch_code_modify_prepare(); + mutex_lock(&text_mutex); out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); - ftrace_arch_code_modify_post_process(); + mutex_unlock(&text_mutex); return out; } diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 765004b60513..e099961453cc 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -11,6 +11,7 @@ #include #include #include +#include #include struct patch_insn { @@ -19,6 +20,8 @@ struct patch_insn { atomic_t cpu_count; }; +int riscv_patch_in_stop_machine = false; + #ifdef CONFIG_MMU /* * The fix_to_virt(, idx) needs a const value (not a dynamic variable of @@ -59,8 +62,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len) * Before reaching here, it was expected to lock the text_mutex * already, so we don't need to give another lock here and could * ensure that it was safe between each cores. + * + * We're currently using stop_machine() for ftrace & kprobes, and while + * that ensures text_mutex is held before installing the mappings it + * does not ensure text_mutex is held by the calling thread. That's + * safe but triggers a lockdep failure, so just elide it for that + * specific case. */ - lockdep_assert_held(&text_mutex); + if (!riscv_patch_in_stop_machine) + lockdep_assert_held(&text_mutex); if (across_pages) patch_map(addr + len, FIX_TEXT_POKE1); @@ -121,13 +131,25 @@ NOKPROBE_SYMBOL(patch_text_cb); int patch_text(void *addr, u32 insn) { + int ret; struct patch_insn patch = { .addr = addr, .insn = insn, .cpu_count = ATOMIC_INIT(0), }; - return stop_machine_cpuslocked(patch_text_cb, - &patch, cpu_online_mask); + /* + * kprobes takes text_mutex, before calling patch_text(), but as we call + * calls stop_machine(), the lockdep assertion in patch_insn_write() + * gets confused by the context in which the lock is taken. + * Instead, ensure the lock is held before calling stop_machine(), and + * set riscv_patch_in_stop_machine to skip the check in + * patch_insn_write(). + */ + lockdep_assert_held(&text_mutex); + riscv_patch_in_stop_machine = true; + ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); + riscv_patch_in_stop_machine = false; + return ret; } NOKPROBE_SYMBOL(patch_text); diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 85cd5442d2f8..17d7383f201a 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -92,7 +92,7 @@ void notrace walk_stackframe(struct task_struct *task, while (!kstack_end(ksp)) { if (__kernel_text_address(pc) && unlikely(!fn(arg, pc))) break; - pc = (*ksp++) - 0x4; + pc = READ_ONCE_NOCHECK(*ksp++) - 0x4; } } diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index ded7c47d2fbe..131b7cb29576 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -767,6 +767,7 @@ static int vector_config(char *str, char **error_out) if (parsed == NULL) { *error_out = "vector_config failed to parse parameters"; + kfree(params); return -EINVAL; } diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c index 3ac220dafec4..5472b1a0a039 100644 --- a/arch/um/drivers/virt-pci.c +++ b/arch/um/drivers/virt-pci.c @@ -132,8 +132,11 @@ static int um_pci_send_cmd(struct um_pci_device *dev, out ? 1 : 0, posted ? cmd : HANDLE_NO_FREE(cmd), GFP_ATOMIC); - if (ret) + if (ret) { + if (posted) + kfree(cmd); goto out; + } if (posted) { virtqueue_kick(dev->cmd_vq); @@ -623,22 +626,33 @@ static void um_pci_virtio_remove(struct virtio_device *vdev) struct um_pci_device *dev = vdev->priv; int i; - /* Stop all virtqueues */ - virtio_reset_device(vdev); - vdev->config->del_vqs(vdev); - device_set_wakeup_enable(&vdev->dev, false); mutex_lock(&um_pci_mtx); for (i = 0; i < MAX_DEVICES; i++) { if (um_pci_devices[i].dev != dev) continue; + um_pci_devices[i].dev = NULL; irq_free_desc(dev->irq); + + break; } mutex_unlock(&um_pci_mtx); - um_pci_rescan(); + if (i < MAX_DEVICES) { + struct pci_dev *pci_dev; + + pci_dev = pci_get_slot(bridge->bus, i); + if (pci_dev) + pci_stop_and_remove_bus_device_locked(pci_dev); + } + + /* Stop all virtqueues */ + virtio_reset_device(vdev); + dev->cmd_vq = NULL; + dev->irq_vq = NULL; + vdev->config->del_vqs(vdev); kfree(dev); } diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c index 588930a0ced1..ddd080f6dd82 100644 --- a/arch/um/drivers/virtio_uml.c +++ b/arch/um/drivers/virtio_uml.c @@ -168,7 +168,8 @@ static void vhost_user_check_reset(struct virtio_uml_device *vu_dev, if (!vu_dev->registered) return; - virtio_break_device(&vu_dev->vdev); + vu_dev->registered = 0; + schedule_work(&pdata->conn_broken_wk); } @@ -1136,6 +1137,15 @@ void virtio_uml_set_no_vq_suspend(struct virtio_device *vdev, static void vu_of_conn_broken(struct work_struct *wk) { + struct virtio_uml_platform_data *pdata; + struct virtio_uml_device *vu_dev; + + pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk); + + vu_dev = platform_get_drvdata(pdata->pdev); + + virtio_break_device(&vu_dev->vdev); + /* * We can't remove the device from the devicetree so the only thing we * can do is warn. @@ -1266,8 +1276,14 @@ static int vu_unregister_cmdline_device(struct device *dev, void *data) static void vu_conn_broken(struct work_struct *wk) { struct virtio_uml_platform_data *pdata; + struct virtio_uml_device *vu_dev; pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk); + + vu_dev = platform_get_drvdata(pdata->pdev); + + virtio_break_device(&vu_dev->vdev); + vu_unregister_cmdline_device(&pdata->pdev->dev, NULL); } diff --git a/arch/um/kernel/vmlinux.lds.S b/arch/um/kernel/vmlinux.lds.S index 16e49bfa2b42..53d719c04ba9 100644 --- a/arch/um/kernel/vmlinux.lds.S +++ b/arch/um/kernel/vmlinux.lds.S @@ -1,4 +1,4 @@ - +#define RUNTIME_DISCARD_EXIT KERNEL_STACK_SIZE = 4096 * (1 << CONFIG_KERNEL_STACK_ORDER); #ifdef CONFIG_LD_SCRIPT_STATIC diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig index d9c8e89c0dd9..503b999de9f5 100644 --- a/arch/x86/configs/gki_defconfig +++ b/arch/x86/configs/gki_defconfig @@ -240,7 +240,6 @@ CONFIG_NET_SCH_FQ_CODEL=y CONFIG_NET_SCH_FQ=y CONFIG_NET_SCH_INGRESS=y CONFIG_NET_CLS_BASIC=y -CONFIG_NET_CLS_TCINDEX=y CONFIG_NET_CLS_FW=y CONFIG_NET_CLS_U32=y CONFIG_CLS_U32_MARK=y @@ -474,6 +473,7 @@ CONFIG_TYPEC=y CONFIG_TYPEC_TCPM=y CONFIG_TYPEC_TCPCI=y CONFIG_TYPEC_UCSI=y +CONFIG_TYPEC_DP_ALTMODE=y CONFIG_MMC=y # CONFIG_PWRSEQ_EMMC is not set # CONFIG_PWRSEQ_SIMPLE is not set diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f05ebaa26f0f..ef8cabfbe854 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1695,6 +1695,9 @@ extern struct kvm_x86_ops kvm_x86_ops; #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP #include +int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops); +void kvm_x86_vendor_exit(void); + #define __KVM_HAVE_ARCH_VM_ALLOC static inline struct kvm *kvm_arch_alloc_vm(void) { diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index d24b04ebf950..a4641d68eaba 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -51,7 +51,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); * simple as possible. * Must be called with preemption disabled. */ -static void __resctrl_sched_in(void) +static inline void __resctrl_sched_in(struct task_struct *tsk) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); u32 closid = state->default_closid; @@ -63,13 +63,13 @@ static void __resctrl_sched_in(void) * Else use the closid/rmid assigned to this cpu. */ if (static_branch_likely(&rdt_alloc_enable_key)) { - tmp = READ_ONCE(current->closid); + tmp = READ_ONCE(tsk->closid); if (tmp) closid = tmp; } if (static_branch_likely(&rdt_mon_enable_key)) { - tmp = READ_ONCE(current->rmid); + tmp = READ_ONCE(tsk->rmid); if (tmp) rmid = tmp; } @@ -90,17 +90,17 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } -static inline void resctrl_sched_in(void) +static inline void resctrl_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) - __resctrl_sched_in(); + __resctrl_sched_in(tsk); } void resctrl_cpu_detect(struct cpuinfo_x86 *c); #else -static inline void resctrl_sched_in(void) {} +static inline void resctrl_sched_in(struct task_struct *tsk) {} static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} #endif /* CONFIG_X86_CPU_RESCTRL */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c75d75b9f11a..d2dbbc50b3a7 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -880,6 +880,15 @@ void init_spectral_chicken(struct cpuinfo_x86 *c) } } #endif + /* + * Work around Erratum 1386. The XSAVES instruction malfunctions in + * certain circumstances on Zen1/2 uarch, and not all parts have had + * updated microcode at the time of writing (March 2023). + * + * Affected parts all have no supervisor XSAVE states, meaning that + * the XSAVEC instruction (which works fine) is equivalent. + */ + clear_cpu_cap(c, X86_FEATURE_XSAVES); } static void init_amd_zn(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 5993da21d822..87b670d540b8 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -314,7 +314,7 @@ static void update_cpu_closid_rmid(void *info) * executing task might have its own closid selected. Just reuse * the context switch code. */ - resctrl_sched_in(); + resctrl_sched_in(current); } /* @@ -535,7 +535,7 @@ static void _update_task_closid_rmid(void *task) * Otherwise, the MSR is updated when the task is scheduled in. */ if (task == current) - resctrl_sched_in(); + resctrl_sched_in(task); } static void update_task_closid_rmid(struct task_struct *t) diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 2f314b170c9f..ceab14b6118f 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(); /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(); + resctrl_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6b3418bff326..7f94dbbc397b 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -656,7 +656,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(); + resctrl_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index bfe93a1c4f92..3629dd979667 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5080,15 +5080,34 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { static int __init svm_init(void) { + int r; + __unused_size_checks(); - return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), - __alignof__(struct vcpu_svm), THIS_MODULE); + r = kvm_x86_vendor_init(&svm_init_ops); + if (r) + return r; + + /* + * Common KVM initialization _must_ come last, after this, /dev/kvm is + * exposed to userspace! + */ + r = kvm_init(&svm_init_ops, sizeof(struct vcpu_svm), + __alignof__(struct vcpu_svm), THIS_MODULE); + if (r) + goto err_kvm_init; + + return 0; + +err_kvm_init: + kvm_x86_vendor_exit(); + return r; } static void __exit svm_exit(void) { kvm_exit(); + kvm_x86_vendor_exit(); } module_init(svm_init) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index f5c1cb7cec8a..bc868958e91f 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -551,6 +551,33 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu) return 0; } +static void hv_reset_evmcs(void) +{ + struct hv_vp_assist_page *vp_ap; + + if (!static_branch_unlikely(&enable_evmcs)) + return; + + /* + * KVM should enable eVMCS if and only if all CPUs have a VP assist + * page, and should reject CPU onlining if eVMCS is enabled the CPU + * doesn't have a VP assist page allocated. + */ + vp_ap = hv_get_vp_assist_page(smp_processor_id()); + if (WARN_ON_ONCE(!vp_ap)) + return; + + /* + * Reset everything to support using non-enlightened VMCS access later + * (e.g. when we reload the module with enlightened_vmcs=0) + */ + vp_ap->nested_control.features.directhypercall = 0; + vp_ap->current_nested_vmcs = 0; + vp_ap->enlighten_vmentry = 0; +} + +#else /* IS_ENABLED(CONFIG_HYPERV) */ +static void hv_reset_evmcs(void) {} #endif /* IS_ENABLED(CONFIG_HYPERV) */ /* @@ -2501,6 +2528,8 @@ static void vmx_hardware_disable(void) if (cpu_vmxoff()) kvm_spurious_fault(); + hv_reset_evmcs(); + intel_pt_handle_vmx(0); } @@ -8427,41 +8456,23 @@ static void vmx_cleanup_l1d_flush(void) l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; } -static void vmx_exit(void) +static void __vmx_exit(void) { + allow_smaller_maxphyaddr = false; + #ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); #endif - - kvm_exit(); - -#if IS_ENABLED(CONFIG_HYPERV) - if (static_branch_unlikely(&enable_evmcs)) { - int cpu; - struct hv_vp_assist_page *vp_ap; - /* - * Reset everything to support using non-enlightened VMCS - * access later (e.g. when we reload the module with - * enlightened_vmcs=0) - */ - for_each_online_cpu(cpu) { - vp_ap = hv_get_vp_assist_page(cpu); - - if (!vp_ap) - continue; - - vp_ap->nested_control.features.directhypercall = 0; - vp_ap->current_nested_vmcs = 0; - vp_ap->enlighten_vmentry = 0; - } - - static_branch_disable(&enable_evmcs); - } -#endif vmx_cleanup_l1d_flush(); +} - allow_smaller_maxphyaddr = false; +static void vmx_exit(void) +{ + kvm_exit(); + kvm_x86_vendor_exit(); + + __vmx_exit(); } module_exit(vmx_exit); @@ -8502,23 +8513,20 @@ static int __init vmx_init(void) } #endif - r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx), - __alignof__(struct vcpu_vmx), THIS_MODULE); + r = kvm_x86_vendor_init(&vmx_init_ops); if (r) return r; /* - * Must be called after kvm_init() so enable_ept is properly set + * Must be called after common x86 init so enable_ept is properly set * up. Hand the parameter mitigation value in which was stored in * the pre module init parser. If no parameter was given, it will * contain 'auto' which will be turned into the default 'cond' * mitigation mode. */ r = vmx_setup_l1d_flush(vmentry_l1d_flush_param); - if (r) { - vmx_exit(); - return r; - } + if (r) + goto err_l1d_flush; vmx_setup_fb_clear_ctrl(); @@ -8542,6 +8550,21 @@ static int __init vmx_init(void) if (!enable_ept) allow_smaller_maxphyaddr = true; + /* + * Common KVM initialization _must_ come last, after this, /dev/kvm is + * exposed to userspace! + */ + r = kvm_init(&vmx_init_ops, sizeof(struct vcpu_vmx), + __alignof__(struct vcpu_vmx), THIS_MODULE); + if (r) + goto err_kvm_init; + return 0; + +err_kvm_init: + __vmx_exit(); +err_l1d_flush: + kvm_x86_vendor_exit(); + return r; } module_init(vmx_init); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 68827b8dc37a..ab09d292bded 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9351,7 +9351,16 @@ static struct notifier_block pvclock_gtod_notifier = { int kvm_arch_init(void *opaque) { - struct kvm_x86_init_ops *ops = opaque; + return 0; +} + +void kvm_arch_exit(void) +{ + +} + +int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) +{ u64 host_pat; int r; @@ -9441,8 +9450,9 @@ out_free_x86_emulator_cache: kmem_cache_destroy(x86_emulator_cache); return r; } +EXPORT_SYMBOL_GPL(kvm_x86_vendor_init); -void kvm_arch_exit(void) +void kvm_x86_vendor_exit(void) { #ifdef CONFIG_X86_64 if (hypervisor_is_type(X86_HYPER_MS_HYPERV)) @@ -9468,6 +9478,7 @@ void kvm_arch_exit(void) WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key)); #endif } +EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit); static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) { diff --git a/arch/x86/um/vdso/um_vdso.c b/arch/x86/um/vdso/um_vdso.c index 2112b8d14668..ff0f3b4b6c45 100644 --- a/arch/x86/um/vdso/um_vdso.c +++ b/arch/x86/um/vdso/um_vdso.c @@ -17,8 +17,10 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts) { long ret; - asm("syscall" : "=a" (ret) : - "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory"); + asm("syscall" + : "=a" (ret) + : "0" (__NR_clock_gettime), "D" (clock), "S" (ts) + : "rcx", "r11", "memory"); return ret; } @@ -29,8 +31,10 @@ int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { long ret; - asm("syscall" : "=a" (ret) : - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); + asm("syscall" + : "=a" (ret) + : "0" (__NR_gettimeofday), "D" (tv), "S" (tz) + : "rcx", "r11", "memory"); return ret; } diff --git a/block/blk.h b/block/blk.h index 8b75a95b28d6..a186ea20f39d 100644 --- a/block/blk.h +++ b/block/blk.h @@ -436,7 +436,7 @@ static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu) } struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu); -int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner); +int disk_scan_partitions(struct gendisk *disk, fmode_t mode); int disk_alloc_events(struct gendisk *disk); void disk_add_events(struct gendisk *disk); diff --git a/block/genhd.c b/block/genhd.c index c4765681a8b4..0b6928e948f3 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -356,9 +356,10 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action) } EXPORT_SYMBOL_GPL(disk_uevent); -int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner) +int disk_scan_partitions(struct gendisk *disk, fmode_t mode) { struct block_device *bdev; + int ret = 0; if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) return -EINVAL; @@ -366,16 +367,29 @@ int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner) return -EINVAL; if (disk->open_partitions) return -EBUSY; - /* Someone else has bdev exclusively open? */ - if (disk->part0->bd_holder && disk->part0->bd_holder != owner) - return -EBUSY; set_bit(GD_NEED_PART_SCAN, &disk->state); - bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL); + /* + * If the device is opened exclusively by current thread already, it's + * safe to scan partitons, otherwise, use bd_prepare_to_claim() to + * synchronize with other exclusive openers and other partition + * scanners. + */ + if (!(mode & FMODE_EXCL)) { + ret = bd_prepare_to_claim(disk->part0, disk_scan_partitions); + if (ret) + return ret; + } + + bdev = blkdev_get_by_dev(disk_devt(disk), mode & ~FMODE_EXCL, NULL); if (IS_ERR(bdev)) - return PTR_ERR(bdev); - blkdev_put(bdev, mode); - return 0; + ret = PTR_ERR(bdev); + else + blkdev_put(bdev, mode & ~FMODE_EXCL); + + if (!(mode & FMODE_EXCL)) + bd_abort_claiming(disk->part0, disk_scan_partitions); + return ret; } /** @@ -501,9 +515,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, if (ret) goto out_unregister_bdi; + /* Make sure the first partition scan will be proceed */ + if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) && + !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state)) + set_bit(GD_NEED_PART_SCAN, &disk->state); + bdev_add(disk->part0, ddev->devt); if (get_capacity(disk)) - disk_scan_partitions(disk, FMODE_READ, NULL); + disk_scan_partitions(disk, FMODE_READ); /* * Announce the disk and partitions after all partitions are diff --git a/block/ioctl.c b/block/ioctl.c index 96617512982e..9c5f637ff153 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -467,10 +467,10 @@ static int blkdev_bszset(struct block_device *bdev, fmode_t mode, * user space. Note the separate arg/argp parameters that are needed * to deal with the compat_ptr() conversion. */ -static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd, - unsigned long arg, void __user *argp) +static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg, + void __user *argp) { - struct block_device *bdev = I_BDEV(file->f_mapping->host); unsigned int max_sectors; switch (cmd) { @@ -528,8 +528,7 @@ static int blkdev_common_ioctl(struct file *file, fmode_t mode, unsigned cmd, return -EACCES; if (bdev_is_partition(bdev)) return -EINVAL; - return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL, - file); + return disk_scan_partitions(bdev->bd_disk, mode); case BLKTRACESTART: case BLKTRACESTOP: case BLKTRACETEARDOWN: @@ -607,7 +606,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) break; } - ret = blkdev_common_ioctl(file, mode, cmd, arg, argp); + ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp); if (ret != -ENOIOCTLCMD) return ret; @@ -676,7 +675,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) break; } - ret = blkdev_common_ioctl(file, mode, cmd, arg, argp); + ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp); if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl) ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 97450f4003cc..f007116a8427 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -484,6 +484,25 @@ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev) acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL); } +/** + * acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling + * @adev: ACPI companion of the target device. + * + * Evaluate _S0W for @adev and return the value produced by it or return + * ACPI_STATE_UNKNOWN on errors (including _S0W not present). + */ +u8 acpi_dev_power_state_for_wake(struct acpi_device *adev) +{ + unsigned long long state; + acpi_status status; + + status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state); + if (ACPI_FAILURE(status)) + return ACPI_STATE_UNKNOWN; + + return state; +} + #ifdef CONFIG_PM static DEFINE_MUTEX(acpi_pm_notifier_lock); static DEFINE_MUTEX(acpi_pm_notifier_install_lock); diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index a901fb0e59f1..989edeb56a43 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -49,13 +49,13 @@ #include #include #include -#include #include #include #include #include #include #include +#include /* * Export tracepoints that act as a bare tracehook (ie: have no trace event @@ -161,11 +161,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_set_affinity); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_affinity_init); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterrupt_tasks); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterrupt_tasks_done); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_vendor_set); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_ep_action); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_synctype); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_connect); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_audio_usb_offload_disconnect); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_report_bug); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_watchdog_timer_softlockup); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_to_freeze_todo); @@ -179,3 +174,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_el1_fpac); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_panic_unhandled); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_arm64_serror_panic); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_disable_thermal_cooling_stats); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_audio_usb_offload_connect); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_audio_usb_offload_disconnect); diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c index 8b2a0eb3f32a..d56a5d508ccd 100644 --- a/drivers/auxdisplay/hd44780.c +++ b/drivers/auxdisplay/hd44780.c @@ -322,8 +322,10 @@ fail1: static int hd44780_remove(struct platform_device *pdev) { struct charlcd *lcd = platform_get_drvdata(pdev); + struct hd44780_common *hdc = lcd->drvdata; charlcd_unregister(lcd); + kfree(hdc->hd44780); kfree(lcd->drvdata); kfree(lcd); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index 4b5cd08c5a65..f30256a524be 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -251,7 +251,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; - unsigned int index; + unsigned int index, sib_index; int ret = 0; if (this_cpu_ci->cpu_map_populated) @@ -279,11 +279,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) if (i == cpu || !sib_cpu_ci->info_list) continue;/* skip if itself or no cacheinfo */ - - sib_leaf = per_cpu_cacheinfo_idx(i, index); - if (cache_leaves_are_shared(this_leaf, sib_leaf)) { - cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); - cpumask_set_cpu(i, &this_leaf->shared_cpu_map); + for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) { + sib_leaf = per_cpu_cacheinfo_idx(i, sib_index); + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { + cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); + cpumask_set_cpu(i, &this_leaf->shared_cpu_map); + break; + } } } /* record the maximum cache line size */ @@ -297,7 +299,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu) static void cache_shared_cpu_map_remove(unsigned int cpu) { struct cacheinfo *this_leaf, *sib_leaf; - unsigned int sibling, index; + unsigned int sibling, index, sib_index; for (index = 0; index < cache_leaves(cpu); index++) { this_leaf = per_cpu_cacheinfo_idx(cpu, index); @@ -308,9 +310,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu) if (sibling == cpu || !sib_cpu_ci->info_list) continue;/* skip if itself or no cacheinfo */ - sib_leaf = per_cpu_cacheinfo_idx(sibling, index); - cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); - cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); + for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) { + sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index); + if (cache_leaves_are_shared(this_leaf, sib_leaf)) { + cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); + cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); + break; + } + } } if (of_have_populated_dt()) of_node_put(this_leaf->fw_token); diff --git a/drivers/base/component.c b/drivers/base/component.c index 5eadeac6c532..7dbf14a1d915 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -125,7 +125,7 @@ static void component_debugfs_add(struct aggregate_device *m) static void component_debugfs_del(struct aggregate_device *m) { - debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir)); + debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir); } #else diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 9ae2b5c4fc49..c463173f1fb1 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -372,7 +372,7 @@ late_initcall(deferred_probe_initcall); static void __exit deferred_probe_exit(void) { - debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL)); + debugfs_lookup_and_remove("devices_deferred", NULL); } __exitcall(deferred_probe_exit); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index df628e30bca4..793ae876918c 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -977,13 +977,13 @@ loop_set_status_from_info(struct loop_device *lo, return -EINVAL; } + /* Avoid assigning overflow values */ + if (info->lo_offset > LLONG_MAX || info->lo_sizelimit > LLONG_MAX) + return -EOVERFLOW; + lo->lo_offset = info->lo_offset; lo->lo_sizelimit = info->lo_sizelimit; - /* loff_t vars have been assigned __u64 */ - if (lo->lo_offset < 0 || lo->lo_sizelimit < 0) - return -EOVERFLOW; - memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); lo->lo_file_name[LO_NAME_SIZE-1] = 0; lo->lo_flags = info->lo_flags; @@ -1853,35 +1853,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, static void loop_handle_cmd(struct loop_cmd *cmd) { + struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css; + struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css; struct request *rq = blk_mq_rq_from_pdu(cmd); const bool write = op_is_write(req_op(rq)); struct loop_device *lo = rq->q->queuedata; int ret = 0; struct mem_cgroup *old_memcg = NULL; + const bool use_aio = cmd->use_aio; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { ret = -EIO; goto failed; } - if (cmd->blkcg_css) - kthread_associate_blkcg(cmd->blkcg_css); - if (cmd->memcg_css) + if (cmd_blkcg_css) + kthread_associate_blkcg(cmd_blkcg_css); + if (cmd_memcg_css) old_memcg = set_active_memcg( - mem_cgroup_from_css(cmd->memcg_css)); + mem_cgroup_from_css(cmd_memcg_css)); + /* + * do_req_filebacked() may call blk_mq_complete_request() synchronously + * or asynchronously if using aio. Hence, do not touch 'cmd' after + * do_req_filebacked() has returned unless we are sure that 'cmd' has + * not yet been completed. + */ ret = do_req_filebacked(lo, rq); - if (cmd->blkcg_css) + if (cmd_blkcg_css) kthread_associate_blkcg(NULL); - if (cmd->memcg_css) { + if (cmd_memcg_css) { set_active_memcg(old_memcg); - css_put(cmd->memcg_css); + css_put(cmd_memcg_css); } failed: /* complete non-aio request */ - if (!cmd->use_aio || ret) { + if (!use_aio || ret) { if (ret == -EOPNOTSUPP) cmd->ret = ret; else diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 9c4288681841..edd153dda40c 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -219,7 +219,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele mutex_unlock(&mhi_chan->lock); break; case MHI_PKT_TYPE_RESET_CHAN_CMD: - dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id); + dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id); if (!ch_ring->started) { dev_err(dev, "Channel (%u) not opened\n", ch_id); return -ENODEV; @@ -990,44 +990,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) static void mhi_ep_reset_worker(struct work_struct *work) { struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); - struct device *dev = &mhi_cntrl->mhi_dev->dev; enum mhi_state cur_state; - int ret; - mhi_ep_abort_transfer(mhi_cntrl); + mhi_ep_power_down(mhi_cntrl); + + mutex_lock(&mhi_cntrl->state_lock); - spin_lock_bh(&mhi_cntrl->state_lock); /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ mhi_ep_mmio_reset(mhi_cntrl); cur_state = mhi_cntrl->mhi_state; - spin_unlock_bh(&mhi_cntrl->state_lock); /* * Only proceed further if the reset is due to SYS_ERR. The host will * issue reset during shutdown also and we don't need to do re-init in * that case. */ - if (cur_state == MHI_STATE_SYS_ERR) { - mhi_ep_mmio_init(mhi_cntrl); + if (cur_state == MHI_STATE_SYS_ERR) + mhi_ep_power_up(mhi_cntrl); - /* Set AMSS EE before signaling ready state */ - mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS); - - /* All set, notify the host that we are ready */ - ret = mhi_ep_set_ready_state(mhi_cntrl); - if (ret) - return; - - dev_dbg(dev, "READY state notification sent to the host\n"); - - ret = mhi_ep_enable(mhi_cntrl); - if (ret) { - dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); - return; - } - - enable_irq(mhi_cntrl->irq); - } + mutex_unlock(&mhi_cntrl->state_lock); } /* @@ -1106,11 +1087,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up); void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) { - if (mhi_cntrl->enabled) + if (mhi_cntrl->enabled) { mhi_ep_abort_transfer(mhi_cntrl); - - kfree(mhi_cntrl->mhi_event); - disable_irq(mhi_cntrl->irq); + kfree(mhi_cntrl->mhi_event); + disable_irq(mhi_cntrl->irq); + } } EXPORT_SYMBOL_GPL(mhi_ep_power_down); @@ -1400,8 +1381,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); INIT_LIST_HEAD(&mhi_cntrl->ch_db_list); - spin_lock_init(&mhi_cntrl->state_lock); spin_lock_init(&mhi_cntrl->list_lock); + mutex_init(&mhi_cntrl->state_lock); mutex_init(&mhi_cntrl->event_lock); /* Set MHI version and AMSS EE before enumeration */ diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c index 3655c19e23c7..fd200b2ac0bb 100644 --- a/drivers/bus/mhi/ep/sm.c +++ b/drivers/bus/mhi/ep/sm.c @@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) int ret; /* If MHI is in M3, resume suspended channels */ - spin_lock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; if (old_state == MHI_STATE_M3) mhi_ep_resume_channels(mhi_cntrl); ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); - spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) { mhi_ep_handle_syserr(mhi_cntrl); - return ret; + goto err_unlock; } /* Signal host that the device moved to M0 */ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); if (ret) { dev_err(dev, "Failed sending M0 state change event\n"); - return ret; + goto err_unlock; } if (old_state == MHI_STATE_READY) { @@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS); if (ret) { dev_err(dev, "Failed sending AMSS EE event\n"); - return ret; + goto err_unlock; } } - return 0; +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; } int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) @@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) struct device *dev = &mhi_cntrl->mhi_dev->dev; int ret; - spin_lock_bh(&mhi_cntrl->state_lock); - ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); - spin_unlock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); if (ret) { mhi_ep_handle_syserr(mhi_cntrl); - return ret; + goto err_unlock; } mhi_ep_suspend_channels(mhi_cntrl); @@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); if (ret) { dev_err(dev, "Failed sending M3 state change event\n"); - return ret; + goto err_unlock; } - return 0; +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + + return ret; } int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) @@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) enum mhi_state mhi_state; int ret, is_ready; - spin_lock_bh(&mhi_cntrl->state_lock); + mutex_lock(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK); is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK); if (mhi_state != MHI_STATE_RESET || is_ready) { dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); - spin_unlock_bh(&mhi_cntrl->state_lock); - return -EIO; + ret = -EIO; + goto err_unlock; } ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); - spin_unlock_bh(&mhi_cntrl->state_lock); - if (ret) mhi_ep_handle_syserr(mhi_cntrl); +err_unlock: + mutex_unlock(&mhi_cntrl->state_lock); + return ret; } diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 7c606c49cd53..a5ddebb1edea 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -74,7 +74,8 @@ /* * Timer values */ -#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */ +#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */ +#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */ #define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */ /* How many times to we retry sending/receiving the message. */ @@ -82,7 +83,9 @@ #define SSIF_RECV_RETRIES 250 #define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000) +#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000) #define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC) +#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC) #define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC) /* @@ -229,6 +232,9 @@ struct ssif_info { bool got_alert; bool waiting_alert; + /* Used to inform the timeout that it should do a resend. */ + bool do_resend; + /* * If set to true, this will request events the next time the * state machine is idle. @@ -241,12 +247,6 @@ struct ssif_info { */ bool req_flags; - /* - * Used to perform timer operations when run-to-completion - * mode is on. This is a countdown timer. - */ - int rtc_us_timer; - /* Used for sending/receiving data. +1 for the length. */ unsigned char data[IPMI_MAX_MSG_LENGTH + 1]; unsigned int data_len; @@ -530,7 +530,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, static void start_get(struct ssif_info *ssif_info) { - ssif_info->rtc_us_timer = 0; ssif_info->multi_pos = 0; ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, @@ -538,22 +537,28 @@ static void start_get(struct ssif_info *ssif_info) ssif_info->recv, I2C_SMBUS_BLOCK_DATA); } +static void start_resend(struct ssif_info *ssif_info); + static void retry_timeout(struct timer_list *t) { struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer); unsigned long oflags, *flags; - bool waiting; + bool waiting, resend; if (ssif_info->stopping) return; flags = ipmi_ssif_lock_cond(ssif_info, &oflags); + resend = ssif_info->do_resend; + ssif_info->do_resend = false; waiting = ssif_info->waiting_alert; ssif_info->waiting_alert = false; ipmi_ssif_unlock_cond(ssif_info, flags); if (waiting) start_get(ssif_info); + if (resend) + start_resend(ssif_info); } static void watch_timeout(struct timer_list *t) @@ -602,8 +607,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type, start_get(ssif_info); } -static void start_resend(struct ssif_info *ssif_info); - static void msg_done_handler(struct ssif_info *ssif_info, int result, unsigned char *data, unsigned int len) { @@ -622,7 +625,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, flags = ipmi_ssif_lock_cond(ssif_info, &oflags); ssif_info->waiting_alert = true; - ssif_info->rtc_us_timer = SSIF_MSG_USEC; if (!ssif_info->stopping) mod_timer(&ssif_info->retry_timer, jiffies + SSIF_MSG_JIFFIES); @@ -909,7 +911,13 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, if (result < 0) { ssif_info->retries_left--; if (ssif_info->retries_left > 0) { - start_resend(ssif_info); + /* + * Wait the retry timeout time per the spec, + * then redo the send. + */ + ssif_info->do_resend = true; + mod_timer(&ssif_info->retry_timer, + jiffies + SSIF_REQ_RETRY_JIFFIES); return; } @@ -973,7 +981,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, /* Wait a jiffie then request the next message */ ssif_info->waiting_alert = true; ssif_info->retries_left = SSIF_RECV_RETRIES; - ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; if (!ssif_info->stopping) mod_timer(&ssif_info->retry_timer, jiffies + SSIF_MSG_PART_JIFFIES); @@ -1320,8 +1327,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg, ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg); if (ret) { retry_cnt--; - if (retry_cnt > 0) + if (retry_cnt > 0) { + msleep(SSIF_REQ_RETRY_MSEC); goto retry1; + } return -ENODEV; } @@ -1462,8 +1471,10 @@ retry_write: 32, msg); if (ret) { retry_cnt--; - if (retry_cnt > 0) + if (retry_cnt > 0) { + msleep(SSIF_REQ_RETRY_MSEC); goto retry_write; + } dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n"); return ret; } diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index 0913d3eb8d51..cd266021d010 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -143,8 +143,12 @@ int tpm_read_log_acpi(struct tpm_chip *chip) ret = -EIO; virt = acpi_os_map_iomem(start, len); - if (!virt) + if (!virt) { + dev_warn(&chip->dev, "%s: Failed to map ACPI memory\n", __func__); + /* try EFI log next */ + ret = -ENODEV; goto err; + } memcpy_fromio(log->bios_event_log, virt, len); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 783d65fc71f0..409682d06309 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -507,6 +507,63 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip) return 0; } +/* + * Some AMD fTPM versions may cause stutter + * https://www.amd.com/en/support/kb/faq/pa-410 + * + * Fixes are available in two series of fTPM firmware: + * 6.x.y.z series: 6.0.18.6 + + * 3.x.y.z series: 3.57.y.5 + + */ +static bool tpm_amd_is_rng_defective(struct tpm_chip *chip) +{ + u32 val1, val2; + u64 version; + int ret; + + if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) + return false; + + ret = tpm_request_locality(chip); + if (ret) + return false; + + ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val1, NULL); + if (ret) + goto release; + if (val1 != 0x414D4400U /* AMD */) { + ret = -ENODEV; + goto release; + } + ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_1, &val1, NULL); + if (ret) + goto release; + ret = tpm2_get_tpm_pt(chip, TPM2_PT_FIRMWARE_VERSION_2, &val2, NULL); + +release: + tpm_relinquish_locality(chip); + + if (ret) + return false; + + version = ((u64)val1 << 32) | val2; + if ((version >> 48) == 6) { + if (version >= 0x0006000000180006ULL) + return false; + } else if ((version >> 48) == 3) { + if (version >= 0x0003005700000005ULL) + return false; + } else { + return false; + } + + dev_warn(&chip->dev, + "AMD fTPM version 0x%llx causes system stutter; hwrng disabled\n", + version); + + return true; +} + static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng); @@ -516,7 +573,8 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait) static int tpm_add_hwrng(struct tpm_chip *chip) { - if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip)) + if (!IS_ENABLED(CONFIG_HW_RANDOM_TPM) || tpm_is_firmware_upgrade(chip) || + tpm_amd_is_rng_defective(chip)) return 0; snprintf(chip->hwrng_name, sizeof(chip->hwrng_name), diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 24ee4e1cc452..830014a26609 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -150,6 +150,79 @@ enum tpm_sub_capabilities { TPM_CAP_PROP_TIS_DURATION = 0x120, }; +enum tpm2_pt_props { + TPM2_PT_NONE = 0x00000000, + TPM2_PT_GROUP = 0x00000100, + TPM2_PT_FIXED = TPM2_PT_GROUP * 1, + TPM2_PT_FAMILY_INDICATOR = TPM2_PT_FIXED + 0, + TPM2_PT_LEVEL = TPM2_PT_FIXED + 1, + TPM2_PT_REVISION = TPM2_PT_FIXED + 2, + TPM2_PT_DAY_OF_YEAR = TPM2_PT_FIXED + 3, + TPM2_PT_YEAR = TPM2_PT_FIXED + 4, + TPM2_PT_MANUFACTURER = TPM2_PT_FIXED + 5, + TPM2_PT_VENDOR_STRING_1 = TPM2_PT_FIXED + 6, + TPM2_PT_VENDOR_STRING_2 = TPM2_PT_FIXED + 7, + TPM2_PT_VENDOR_STRING_3 = TPM2_PT_FIXED + 8, + TPM2_PT_VENDOR_STRING_4 = TPM2_PT_FIXED + 9, + TPM2_PT_VENDOR_TPM_TYPE = TPM2_PT_FIXED + 10, + TPM2_PT_FIRMWARE_VERSION_1 = TPM2_PT_FIXED + 11, + TPM2_PT_FIRMWARE_VERSION_2 = TPM2_PT_FIXED + 12, + TPM2_PT_INPUT_BUFFER = TPM2_PT_FIXED + 13, + TPM2_PT_HR_TRANSIENT_MIN = TPM2_PT_FIXED + 14, + TPM2_PT_HR_PERSISTENT_MIN = TPM2_PT_FIXED + 15, + TPM2_PT_HR_LOADED_MIN = TPM2_PT_FIXED + 16, + TPM2_PT_ACTIVE_SESSIONS_MAX = TPM2_PT_FIXED + 17, + TPM2_PT_PCR_COUNT = TPM2_PT_FIXED + 18, + TPM2_PT_PCR_SELECT_MIN = TPM2_PT_FIXED + 19, + TPM2_PT_CONTEXT_GAP_MAX = TPM2_PT_FIXED + 20, + TPM2_PT_NV_COUNTERS_MAX = TPM2_PT_FIXED + 22, + TPM2_PT_NV_INDEX_MAX = TPM2_PT_FIXED + 23, + TPM2_PT_MEMORY = TPM2_PT_FIXED + 24, + TPM2_PT_CLOCK_UPDATE = TPM2_PT_FIXED + 25, + TPM2_PT_CONTEXT_HASH = TPM2_PT_FIXED + 26, + TPM2_PT_CONTEXT_SYM = TPM2_PT_FIXED + 27, + TPM2_PT_CONTEXT_SYM_SIZE = TPM2_PT_FIXED + 28, + TPM2_PT_ORDERLY_COUNT = TPM2_PT_FIXED + 29, + TPM2_PT_MAX_COMMAND_SIZE = TPM2_PT_FIXED + 30, + TPM2_PT_MAX_RESPONSE_SIZE = TPM2_PT_FIXED + 31, + TPM2_PT_MAX_DIGEST = TPM2_PT_FIXED + 32, + TPM2_PT_MAX_OBJECT_CONTEXT = TPM2_PT_FIXED + 33, + TPM2_PT_MAX_SESSION_CONTEXT = TPM2_PT_FIXED + 34, + TPM2_PT_PS_FAMILY_INDICATOR = TPM2_PT_FIXED + 35, + TPM2_PT_PS_LEVEL = TPM2_PT_FIXED + 36, + TPM2_PT_PS_REVISION = TPM2_PT_FIXED + 37, + TPM2_PT_PS_DAY_OF_YEAR = TPM2_PT_FIXED + 38, + TPM2_PT_PS_YEAR = TPM2_PT_FIXED + 39, + TPM2_PT_SPLIT_MAX = TPM2_PT_FIXED + 40, + TPM2_PT_TOTAL_COMMANDS = TPM2_PT_FIXED + 41, + TPM2_PT_LIBRARY_COMMANDS = TPM2_PT_FIXED + 42, + TPM2_PT_VENDOR_COMMANDS = TPM2_PT_FIXED + 43, + TPM2_PT_NV_BUFFER_MAX = TPM2_PT_FIXED + 44, + TPM2_PT_MODES = TPM2_PT_FIXED + 45, + TPM2_PT_MAX_CAP_BUFFER = TPM2_PT_FIXED + 46, + TPM2_PT_VAR = TPM2_PT_GROUP * 2, + TPM2_PT_PERMANENT = TPM2_PT_VAR + 0, + TPM2_PT_STARTUP_CLEAR = TPM2_PT_VAR + 1, + TPM2_PT_HR_NV_INDEX = TPM2_PT_VAR + 2, + TPM2_PT_HR_LOADED = TPM2_PT_VAR + 3, + TPM2_PT_HR_LOADED_AVAIL = TPM2_PT_VAR + 4, + TPM2_PT_HR_ACTIVE = TPM2_PT_VAR + 5, + TPM2_PT_HR_ACTIVE_AVAIL = TPM2_PT_VAR + 6, + TPM2_PT_HR_TRANSIENT_AVAIL = TPM2_PT_VAR + 7, + TPM2_PT_HR_PERSISTENT = TPM2_PT_VAR + 8, + TPM2_PT_HR_PERSISTENT_AVAIL = TPM2_PT_VAR + 9, + TPM2_PT_NV_COUNTERS = TPM2_PT_VAR + 10, + TPM2_PT_NV_COUNTERS_AVAIL = TPM2_PT_VAR + 11, + TPM2_PT_ALGORITHM_SET = TPM2_PT_VAR + 12, + TPM2_PT_LOADED_CURVES = TPM2_PT_VAR + 13, + TPM2_PT_LOCKOUT_COUNTER = TPM2_PT_VAR + 14, + TPM2_PT_MAX_AUTH_FAIL = TPM2_PT_VAR + 15, + TPM2_PT_LOCKOUT_INTERVAL = TPM2_PT_VAR + 16, + TPM2_PT_LOCKOUT_RECOVERY = TPM2_PT_VAR + 17, + TPM2_PT_NV_WRITE_RECOVERY = TPM2_PT_VAR + 18, + TPM2_PT_AUDIT_COUNTER_0 = TPM2_PT_VAR + 19, + TPM2_PT_AUDIT_COUNTER_1 = TPM2_PT_VAR + 20, +}; /* 128 bytes is an arbitrary cap. This could be as large as TPM_BUFSIZE - 18 * bytes, but 128 is still a relatively large number of random bytes and diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index cacaf9b87d26..37632a0659d8 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -22,7 +22,7 @@ config CLK_RENESAS select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793 select CLK_R8A7792 if ARCH_R8A7792 select CLK_R8A7794 if ARCH_R8A7794 - select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951 + select CLK_R8A7795 if ARCH_R8A77951 select CLK_R8A77960 if ARCH_R8A77960 select CLK_R8A77961 if ARCH_R8A77961 select CLK_R8A77965 if ARCH_R8A77965 diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index 301475c74f50..7a585a777d38 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -128,7 +128,6 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = { }; static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { - DEF_MOD("fdp1-2", 117, R8A7795_CLK_S2D1), /* ES1.x */ DEF_MOD("fdp1-1", 118, R8A7795_CLK_S0D1), DEF_MOD("fdp1-0", 119, R8A7795_CLK_S0D1), DEF_MOD("tmu4", 121, R8A7795_CLK_S0D6), @@ -162,7 +161,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("pcie1", 318, R8A7795_CLK_S3D1), DEF_MOD("pcie0", 319, R8A7795_CLK_S3D1), DEF_MOD("usb-dmac30", 326, R8A7795_CLK_S3D1), - DEF_MOD("usb3-if1", 327, R8A7795_CLK_S3D1), /* ES1.x */ DEF_MOD("usb3-if0", 328, R8A7795_CLK_S3D1), DEF_MOD("usb-dmac31", 329, R8A7795_CLK_S3D1), DEF_MOD("usb-dmac0", 330, R8A7795_CLK_S3D1), @@ -187,28 +185,21 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("hscif0", 520, R8A7795_CLK_S3D1), DEF_MOD("thermal", 522, R8A7795_CLK_CP), DEF_MOD("pwm", 523, R8A7795_CLK_S0D12), - DEF_MOD("fcpvd3", 600, R8A7795_CLK_S2D1), /* ES1.x */ DEF_MOD("fcpvd2", 601, R8A7795_CLK_S0D2), DEF_MOD("fcpvd1", 602, R8A7795_CLK_S0D2), DEF_MOD("fcpvd0", 603, R8A7795_CLK_S0D2), DEF_MOD("fcpvb1", 606, R8A7795_CLK_S0D1), DEF_MOD("fcpvb0", 607, R8A7795_CLK_S0D1), - DEF_MOD("fcpvi2", 609, R8A7795_CLK_S2D1), /* ES1.x */ DEF_MOD("fcpvi1", 610, R8A7795_CLK_S0D1), DEF_MOD("fcpvi0", 611, R8A7795_CLK_S0D1), - DEF_MOD("fcpf2", 613, R8A7795_CLK_S2D1), /* ES1.x */ DEF_MOD("fcpf1", 614, R8A7795_CLK_S0D1), DEF_MOD("fcpf0", 615, R8A7795_CLK_S0D1), - DEF_MOD("fcpci1", 616, R8A7795_CLK_S2D1), /* ES1.x */ - DEF_MOD("fcpci0", 617, R8A7795_CLK_S2D1), /* ES1.x */ DEF_MOD("fcpcs", 619, R8A7795_CLK_S0D1), - DEF_MOD("vspd3", 620, R8A7795_CLK_S2D1), /* ES1.x */ DEF_MOD("vspd2", 621, R8A7795_CLK_S0D2), DEF_MOD("vspd1", 622, R8A7795_CLK_S0D2), DEF_MOD("vspd0", 623, R8A7795_CLK_S0D2), DEF_MOD("vspbc", 624, R8A7795_CLK_S0D1), DEF_MOD("vspbd", 626, R8A7795_CLK_S0D1), - DEF_MOD("vspi2", 629, R8A7795_CLK_S2D1), /* ES1.x */ DEF_MOD("vspi1", 630, R8A7795_CLK_S0D1), DEF_MOD("vspi0", 631, R8A7795_CLK_S0D1), DEF_MOD("ehci3", 700, R8A7795_CLK_S3D2), @@ -221,7 +212,6 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("cmm2", 709, R8A7795_CLK_S2D1), DEF_MOD("cmm1", 710, R8A7795_CLK_S2D1), DEF_MOD("cmm0", 711, R8A7795_CLK_S2D1), - DEF_MOD("csi21", 713, R8A7795_CLK_CSI0), /* ES1.x */ DEF_MOD("csi20", 714, R8A7795_CLK_CSI0), DEF_MOD("csi41", 715, R8A7795_CLK_CSI0), DEF_MOD("csi40", 716, R8A7795_CLK_CSI0), @@ -350,103 +340,26 @@ static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { { 2, 192, 1, 192, 1, 32, }, }; -static const struct soc_device_attribute r8a7795es1[] __initconst = { +static const struct soc_device_attribute r8a7795_denylist[] __initconst = { { .soc_id = "r8a7795", .revision = "ES1.*" }, { /* sentinel */ } }; - - /* - * Fixups for R-Car H3 ES1.x - */ - -static const unsigned int r8a7795es1_mod_nullify[] __initconst = { - MOD_CLK_ID(326), /* USB-DMAC3-0 */ - MOD_CLK_ID(329), /* USB-DMAC3-1 */ - MOD_CLK_ID(700), /* EHCI/OHCI3 */ - MOD_CLK_ID(705), /* HS-USB-IF3 */ - -}; - -static const struct mssr_mod_reparent r8a7795es1_mod_reparent[] __initconst = { - { MOD_CLK_ID(118), R8A7795_CLK_S2D1 }, /* FDP1-1 */ - { MOD_CLK_ID(119), R8A7795_CLK_S2D1 }, /* FDP1-0 */ - { MOD_CLK_ID(121), R8A7795_CLK_S3D2 }, /* TMU4 */ - { MOD_CLK_ID(217), R8A7795_CLK_S3D1 }, /* SYS-DMAC2 */ - { MOD_CLK_ID(218), R8A7795_CLK_S3D1 }, /* SYS-DMAC1 */ - { MOD_CLK_ID(219), R8A7795_CLK_S3D1 }, /* SYS-DMAC0 */ - { MOD_CLK_ID(408), R8A7795_CLK_S3D1 }, /* INTC-AP */ - { MOD_CLK_ID(501), R8A7795_CLK_S3D1 }, /* AUDMAC1 */ - { MOD_CLK_ID(502), R8A7795_CLK_S3D1 }, /* AUDMAC0 */ - { MOD_CLK_ID(523), R8A7795_CLK_S3D4 }, /* PWM */ - { MOD_CLK_ID(601), R8A7795_CLK_S2D1 }, /* FCPVD2 */ - { MOD_CLK_ID(602), R8A7795_CLK_S2D1 }, /* FCPVD1 */ - { MOD_CLK_ID(603), R8A7795_CLK_S2D1 }, /* FCPVD0 */ - { MOD_CLK_ID(606), R8A7795_CLK_S2D1 }, /* FCPVB1 */ - { MOD_CLK_ID(607), R8A7795_CLK_S2D1 }, /* FCPVB0 */ - { MOD_CLK_ID(610), R8A7795_CLK_S2D1 }, /* FCPVI1 */ - { MOD_CLK_ID(611), R8A7795_CLK_S2D1 }, /* FCPVI0 */ - { MOD_CLK_ID(614), R8A7795_CLK_S2D1 }, /* FCPF1 */ - { MOD_CLK_ID(615), R8A7795_CLK_S2D1 }, /* FCPF0 */ - { MOD_CLK_ID(619), R8A7795_CLK_S2D1 }, /* FCPCS */ - { MOD_CLK_ID(621), R8A7795_CLK_S2D1 }, /* VSPD2 */ - { MOD_CLK_ID(622), R8A7795_CLK_S2D1 }, /* VSPD1 */ - { MOD_CLK_ID(623), R8A7795_CLK_S2D1 }, /* VSPD0 */ - { MOD_CLK_ID(624), R8A7795_CLK_S2D1 }, /* VSPBC */ - { MOD_CLK_ID(626), R8A7795_CLK_S2D1 }, /* VSPBD */ - { MOD_CLK_ID(630), R8A7795_CLK_S2D1 }, /* VSPI1 */ - { MOD_CLK_ID(631), R8A7795_CLK_S2D1 }, /* VSPI0 */ - { MOD_CLK_ID(804), R8A7795_CLK_S2D1 }, /* VIN7 */ - { MOD_CLK_ID(805), R8A7795_CLK_S2D1 }, /* VIN6 */ - { MOD_CLK_ID(806), R8A7795_CLK_S2D1 }, /* VIN5 */ - { MOD_CLK_ID(807), R8A7795_CLK_S2D1 }, /* VIN4 */ - { MOD_CLK_ID(808), R8A7795_CLK_S2D1 }, /* VIN3 */ - { MOD_CLK_ID(809), R8A7795_CLK_S2D1 }, /* VIN2 */ - { MOD_CLK_ID(810), R8A7795_CLK_S2D1 }, /* VIN1 */ - { MOD_CLK_ID(811), R8A7795_CLK_S2D1 }, /* VIN0 */ - { MOD_CLK_ID(812), R8A7795_CLK_S3D2 }, /* EAVB-IF */ - { MOD_CLK_ID(820), R8A7795_CLK_S2D1 }, /* IMR3 */ - { MOD_CLK_ID(821), R8A7795_CLK_S2D1 }, /* IMR2 */ - { MOD_CLK_ID(822), R8A7795_CLK_S2D1 }, /* IMR1 */ - { MOD_CLK_ID(823), R8A7795_CLK_S2D1 }, /* IMR0 */ - { MOD_CLK_ID(905), R8A7795_CLK_CP }, /* GPIO7 */ - { MOD_CLK_ID(906), R8A7795_CLK_CP }, /* GPIO6 */ - { MOD_CLK_ID(907), R8A7795_CLK_CP }, /* GPIO5 */ - { MOD_CLK_ID(908), R8A7795_CLK_CP }, /* GPIO4 */ - { MOD_CLK_ID(909), R8A7795_CLK_CP }, /* GPIO3 */ - { MOD_CLK_ID(910), R8A7795_CLK_CP }, /* GPIO2 */ - { MOD_CLK_ID(911), R8A7795_CLK_CP }, /* GPIO1 */ - { MOD_CLK_ID(912), R8A7795_CLK_CP }, /* GPIO0 */ - { MOD_CLK_ID(918), R8A7795_CLK_S3D2 }, /* I2C6 */ - { MOD_CLK_ID(919), R8A7795_CLK_S3D2 }, /* I2C5 */ - { MOD_CLK_ID(927), R8A7795_CLK_S3D2 }, /* I2C4 */ - { MOD_CLK_ID(928), R8A7795_CLK_S3D2 }, /* I2C3 */ -}; - - - /* - * Fixups for R-Car H3 ES2.x - */ - -static const unsigned int r8a7795es2_mod_nullify[] __initconst = { - MOD_CLK_ID(117), /* FDP1-2 */ - MOD_CLK_ID(327), /* USB3-IF1 */ - MOD_CLK_ID(600), /* FCPVD3 */ - MOD_CLK_ID(609), /* FCPVI2 */ - MOD_CLK_ID(613), /* FCPF2 */ - MOD_CLK_ID(616), /* FCPCI1 */ - MOD_CLK_ID(617), /* FCPCI0 */ - MOD_CLK_ID(620), /* VSPD3 */ - MOD_CLK_ID(629), /* VSPI2 */ - MOD_CLK_ID(713), /* CSI21 */ -}; - static int __init r8a7795_cpg_mssr_init(struct device *dev) { const struct rcar_gen3_cpg_pll_config *cpg_pll_config; u32 cpg_mode; int error; + /* + * We panic here to ensure removed SoCs and clk updates are always in + * sync to avoid overclocking damages. The panic can only be seen with + * commandline args 'earlycon keep_bootcon'. But these SoCs were for + * developers only anyhow. + */ + if (soc_device_match(r8a7795_denylist)) + panic("SoC not supported anymore!\n"); + error = rcar_rst_read_mode_pins(&cpg_mode); if (error) return error; @@ -457,25 +370,6 @@ static int __init r8a7795_cpg_mssr_init(struct device *dev) return -EINVAL; } - if (soc_device_match(r8a7795es1)) { - cpg_core_nullify_range(r8a7795_core_clks, - ARRAY_SIZE(r8a7795_core_clks), - R8A7795_CLK_S0D2, R8A7795_CLK_S0D12); - mssr_mod_nullify(r8a7795_mod_clks, - ARRAY_SIZE(r8a7795_mod_clks), - r8a7795es1_mod_nullify, - ARRAY_SIZE(r8a7795es1_mod_nullify)); - mssr_mod_reparent(r8a7795_mod_clks, - ARRAY_SIZE(r8a7795_mod_clks), - r8a7795es1_mod_reparent, - ARRAY_SIZE(r8a7795es1_mod_reparent)); - } else { - mssr_mod_nullify(r8a7795_mod_clks, - ARRAY_SIZE(r8a7795_mod_clks), - r8a7795es2_mod_nullify, - ARRAY_SIZE(r8a7795es2_mod_nullify)); - } - return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); } diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c index e668f23c75e7..b3ef62fa612e 100644 --- a/drivers/clk/renesas/rcar-gen3-cpg.c +++ b/drivers/clk/renesas/rcar-gen3-cpg.c @@ -310,19 +310,10 @@ static unsigned int cpg_clk_extalr __initdata; static u32 cpg_mode __initdata; static u32 cpg_quirks __initdata; -#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */ #define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */ static const struct soc_device_attribute cpg_quirks_match[] __initconst = { - { - .soc_id = "r8a7795", .revision = "ES1.0", - .data = (void *)(PLL_ERRATA | RCKCR_CKSEL), - }, - { - .soc_id = "r8a7795", .revision = "ES1.*", - .data = (void *)(RCKCR_CKSEL), - }, { .soc_id = "r8a7796", .revision = "ES1.0", .data = (void *)(RCKCR_CKSEL), @@ -355,9 +346,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, * multiplier when cpufreq changes between normal and boost * modes. */ - mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2; return cpg_pll_clk_register(core->name, __clk_get_name(parent), - base, mult, CPG_PLL0CR, 0); + base, 2, CPG_PLL0CR, 0); case CLK_TYPE_GEN3_PLL1: mult = cpg_pll_config->pll1_mult; @@ -370,9 +360,8 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, * multiplier when cpufreq changes between normal and boost * modes. */ - mult = (cpg_quirks & PLL_ERRATA) ? 4 : 2; return cpg_pll_clk_register(core->name, __clk_get_name(parent), - base, mult, CPG_PLL2CR, 2); + base, 2, CPG_PLL2CR, 2); case CLK_TYPE_GEN3_PLL3: mult = cpg_pll_config->pll3_mult; @@ -388,8 +377,6 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev, */ value = readl(base + CPG_PLL4CR); mult = (((value >> 24) & 0x7f) + 1) * 2; - if (cpg_quirks & PLL_ERRATA) - mult *= 2; break; case CLK_TYPE_GEN3_SDH: diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index 1a0cdf001b2f..523fd4523157 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -1113,19 +1113,6 @@ static int __init cpg_mssr_init(void) subsys_initcall(cpg_mssr_init); -void __init cpg_core_nullify_range(struct cpg_core_clk *core_clks, - unsigned int num_core_clks, - unsigned int first_clk, - unsigned int last_clk) -{ - unsigned int i; - - for (i = 0; i < num_core_clks; i++) - if (core_clks[i].id >= first_clk && - core_clks[i].id <= last_clk) - core_clks[i].name = NULL; -} - void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks, unsigned int num_mod_clks, const unsigned int *clks, unsigned int n) @@ -1139,19 +1126,5 @@ void __init mssr_mod_nullify(struct mssr_mod_clk *mod_clks, } } -void __init mssr_mod_reparent(struct mssr_mod_clk *mod_clks, - unsigned int num_mod_clks, - const struct mssr_mod_reparent *clks, - unsigned int n) -{ - unsigned int i, j; - - for (i = 0, j = 0; i < num_mod_clks && j < n; i++) - if (mod_clks[i].id == clks[j].clk) { - mod_clks[i].parent = clks[j].parent; - j++; - } -} - MODULE_DESCRIPTION("Renesas CPG/MSSR Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h index 1c3c057d17f5..80c5b462924a 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.h +++ b/drivers/clk/renesas/renesas-cpg-mssr.h @@ -187,21 +187,7 @@ void __init cpg_mssr_early_init(struct device_node *np, /* * Helpers for fixing up clock tables depending on SoC revision */ - -struct mssr_mod_reparent { - unsigned int clk, parent; -}; - - -extern void cpg_core_nullify_range(struct cpg_core_clk *core_clks, - unsigned int num_core_clks, - unsigned int first_clk, - unsigned int last_clk); extern void mssr_mod_nullify(struct mssr_mod_clk *mod_clks, unsigned int num_mod_clks, const unsigned int *clks, unsigned int n); -extern void mssr_mod_reparent(struct mssr_mod_clk *mod_clks, - unsigned int num_mod_clks, - const struct mssr_mod_reparent *clks, - unsigned int n); #endif diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 1e77907848ed..e65bd4d2472d 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -43,6 +43,39 @@ struct dma_buf_list { static struct dma_buf_list db_list; +/** + * dma_buf_get_each - Helps in traversing the db_list and calls the + * callback function which can extract required info out of each + * dmabuf. + * The db_list needs to be locked to prevent the db_list from being + * dynamically updated during the traversal process. + * + * @callback: [in] Handle for each dmabuf buffer in db_list. + * @private: [in] User-defined, used to pass in when callback is + * called. + * + * Returns 0 on success, otherwise returns a non-zero value for + * mutex_lock_interruptible or callback. + */ +int dma_buf_get_each(int (*callback)(const struct dma_buf *dmabuf, + void *private), void *private) +{ + struct dma_buf *buf; + int ret = mutex_lock_interruptible(&db_list.lock); + + if (ret) + return ret; + + list_for_each_entry(buf, &db_list.head, list_node) { + ret = callback(buf, private); + if (ret) + break; + } + mutex_unlock(&db_list.lock); + return ret; +} +EXPORT_SYMBOL_NS_GPL(dma_buf_get_each, MINIDUMP); + static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { struct dma_buf *dmabuf; diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c index 7882d4b3f2be..f06fdacc9bc8 100644 --- a/drivers/firmware/efi/sysfb_efi.c +++ b/drivers/firmware/efi/sysfb_efi.c @@ -264,6 +264,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = { "Lenovo ideapad D330-10IGM"), }, }, + { + /* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, + "IdeaPad Duet 3 10IGL5"), + }, + }, {}, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 6853b93ac82e..df3388e8dec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -393,9 +393,10 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(nv_allowed_read_registers); i++) { en = &nv_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] - + en->reg_offset)) + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + + en->reg_offset)) continue; *value = nv_get_register_value(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 7cd17dda32ce..2eddd7f6cd41 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -439,8 +439,9 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { en = &soc15_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + en->reg_offset)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 230e15fed755..9c52af500525 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -47,19 +47,31 @@ static const struct amd_ip_funcs soc21_common_ip_funcs; /* SOC21 */ -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] = +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode = +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = { - .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array), - .codec_array = vcn_4_0_0_video_codecs_encode_array, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, }; -static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] = +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0), + .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0, +}; + +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1), + .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1, +}; + +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, @@ -68,23 +80,47 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; -static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode = +static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] = { - .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array), - .codec_array = vcn_4_0_0_video_codecs_decode_array, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0), + .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0, +}; + +static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = +{ + .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1), + .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1, }; static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - switch (adev->ip_versions[UVD_HWIP][0]) { + if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) + return -EINVAL; + switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 2): - if (encode) - *codecs = &vcn_4_0_0_video_codecs_encode; - else - *codecs = &vcn_4_0_0_video_codecs_decode; + case IP_VERSION(4, 0, 4): + if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; + } return 0; default: return -EINVAL; @@ -254,9 +290,10 @@ static int soc21_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(soc21_allowed_read_registers); i++) { en = &soc21_allowed_read_registers[i]; - if (adev->reg_offset[en->hwip][en->inst] && - reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] - + en->reg_offset)) + if (!adev->reg_offset[en->hwip][en->inst]) + continue; + else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] + + en->reg_offset)) continue; *value = soc21_get_register_value(adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index cd4e61bf0493..3ac599f74fea 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -280,7 +280,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd) if (!pdd->doorbell_index) { int r = kfd_alloc_process_doorbells(pdd->dev, &pdd->doorbell_index); - if (r) + if (r < 0) return 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 9919c39f7ea0..d70c64a9fcb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -2109,13 +2109,19 @@ static bool dcn32_resource_construct( dc->caps.max_cursor_size = 64; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 0; + dc->caps.mall_size_per_mem_channel = 4; dc->caps.mall_size_total = 0; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; - dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64 + + /* Calculate the available MALL space */ + dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc, dc->ctx->dc_bios->vram_info.num_chans) * + dc->caps.mall_size_per_mem_channel * 1024 * 1024; + dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; + dc->caps.subvp_fw_processing_delay_us = 15; dc->caps.subvp_prefetch_end_to_mall_start_us = 15; dc->caps.subvp_swath_height_margin_lines = 16; @@ -2545,3 +2551,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer( return idle_pipe; } + +unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans) +{ + /* + * DCN32 and DCN321 SKUs may have different sizes for MALL + * but we may not be able to access all the MALL space. + * If the num_chans is power of 2, then we can access all + * of the available MALL space. Otherwise, we can only + * access: + * + * max_cab_size_in_bytes = total_cache_size_in_bytes * + * ((2^floor(log2(num_chans)))/num_chans) + * + * Calculating the MALL sizes for all available SKUs, we + * have come up with the follow simplified check. + * - we have max_chans which provides the max MALL size. + * Each chans supports 4MB of MALL so: + * + * total_cache_size_in_bytes = max_chans * 4 MB + * + * - we have avail_chans which shows the number of channels + * we can use if we can't access the entire MALL space. + * It is generally half of max_chans + * - so we use the following checks: + * + * if (num_chans == max_chans), return max_chans + * if (num_chans < max_chans), return avail_chans + * + * - exception is GC_11_0_0 where we can't access max_chans, + * so we define max_avail_chans as the maximum available + * MALL space + * + */ + int gc_11_0_0_max_chans = 48; + int gc_11_0_0_max_avail_chans = 32; + int gc_11_0_0_avail_chans = 16; + int gc_11_0_3_max_chans = 16; + int gc_11_0_3_avail_chans = 8; + int gc_11_0_2_max_chans = 8; + int gc_11_0_2_avail_chans = 4; + + if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_0_max_chans) ? + gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans; + } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_2_max_chans) ? + gc_11_0_2_max_chans : gc_11_0_2_avail_chans; + } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) { + return (num_chans == gc_11_0_3_max_chans) ? + gc_11_0_3_max_chans : gc_11_0_3_avail_chans; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h index f76120e67c16..615244a1f95d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h @@ -142,6 +142,10 @@ void dcn32_restore_mall_state(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config); +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe); + +unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 6292ac515d1a..d320e21680da 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1697,11 +1697,18 @@ static bool dcn321_resource_construct( dc->caps.max_cursor_size = 64; dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; - dc->caps.mall_size_per_mem_channel = 0; + dc->caps.mall_size_per_mem_channel = 4; dc->caps.mall_size_total = 0; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; + + /* Calculate the available MALL space */ + dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall( + dc, dc->ctx->dc_bios->vram_info.num_chans) * + dc->caps.mall_size_per_mem_channel * 1024 * 1024; + dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; + dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32 dc->caps.subvp_fw_processing_delay_us = 15; dc->caps.subvp_prefetch_end_to_mall_start_us = 15; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 04cc96e70098..e22b4b3880af 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -676,7 +676,9 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, */ if (pipe->plane_state && !pipe->top_pipe && pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && - vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) { + (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || + (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && + dcn32_allow_subvp_with_active_margin(pipe)))) { while (pipe) { num_pipes++; pipe = pipe->bottom_pipe; @@ -2379,8 +2381,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa } /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) + if (dc->ctx->dc_bios->vram_info.num_chans) { dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + } if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; @@ -2558,3 +2563,30 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; } + +bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe) +{ + bool allow = false; + uint32_t refresh_rate = 0; + + /* Allow subvp on displays that have active margin for 2560x1440@60hz displays + * only for now. There must be no scaling as well. + * + * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs + * for p-state switching. + */ + if (pipe->stream && pipe->plane_state) { + refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 + + pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1) + / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total); + if (pipe->stream->timing.v_addressable == 1440 && + pipe->stream->timing.h_addressable == 2560 && + refresh_rate >= 55 && refresh_rate <= 65 && + pipe->plane_state->src_rect.height == 1440 && + pipe->plane_state->src_rect.width == 2560 && + pipe->plane_state->dst_rect.height == 1440 && + pipe->plane_state->dst_rect.width == 2560) + allow = true; + } + return allow; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 0ea406145c1d..b80cef70fa60 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p } /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) + if (dc->ctx->dc_bios->vram_info.num_chans) { dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + } if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 4ca37261584a..e77c674b37ca 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -3309,8 +3309,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int ret; port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); - if (!port) + if (!port) { + drm_dbg_kms(mgr->dev, + "VCPI %d for port %p not in topology, not creating a payload\n", + payload->vcpi, payload->port); + payload->vc_start_slot = -1; return 0; + } if (mgr->payload_count == 0) mgr->next_start_slot = mst_state->start_slot; @@ -3644,6 +3649,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; mgr->payload_id_table_cleared = false; + + memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv)); + memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv)); } out_unlock: @@ -3856,7 +3864,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv; if (!drm_dp_get_one_sb_msg(mgr, false, &mstb)) - goto out; + goto out_clear_reply; /* Multi-packet message transmission, don't clear the reply */ if (!msg->have_eomt) @@ -5355,27 +5363,52 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); /** - * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any + * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any * @state: global atomic state * @mgr: MST topology manager, also the private object in this case * - * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic + * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic * state vtable so that the private object state returned is that of a MST * topology object. * * Returns: * - * The MST topology state, or NULL if there's no topology state for this MST mgr + * The old MST topology state, or NULL if there's no topology state for this MST mgr + * in the global atomic state + */ +struct drm_dp_mst_topology_state * +drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_private_state *old_priv_state = + drm_atomic_get_old_private_obj_state(state, &mgr->base); + + return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL; +} +EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state); + +/** + * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any + * @state: global atomic state + * @mgr: MST topology manager, also the private object in this case + * + * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic + * state vtable so that the private object state returned is that of a MST + * topology object. + * + * Returns: + * + * The new MST topology state, or NULL if there's no topology state for this MST mgr * in the global atomic state */ struct drm_dp_mst_topology_state * drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) { - struct drm_private_state *priv_state = + struct drm_private_state *new_priv_state = drm_atomic_get_new_private_obj_state(state, &mgr->base); - return priv_state ? to_dp_mst_topology_state(priv_state) : NULL; + return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL; } EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state); diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c index 0264abe55278..faf5e9efa7d3 100644 --- a/drivers/gpu/drm/display/drm_hdmi_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_helper.c @@ -44,10 +44,8 @@ int drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, /* Sink EOTF is Bit map while infoframe is absolute values */ if (!is_eotf_supported(hdr_metadata->hdmi_metadata_type1.eotf, - connector->hdr_sink_metadata.hdmi_type1.eotf)) { - DRM_DEBUG_KMS("EOTF Not Supported\n"); - return -EINVAL; - } + connector->hdr_sink_metadata.hdmi_type1.eotf)) + DRM_DEBUG_KMS("Unknown EOTF %d\n", hdr_metadata->hdmi_metadata_type1.eotf); err = hdmi_drm_infoframe_init(frame); if (err < 0) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index f197f59f6d99..c0dc5858a723 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1070,6 +1070,7 @@ static void drm_atomic_connector_print_state(struct drm_printer *p, drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name); drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)"); drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware); + drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc); if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) if (state->writeback_job && state->writeback_job->fb) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 3efce05d7b57..3a6e176d77aa 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -107,9 +107,6 @@ config DRM_I915_USERPTR If in doubt, say "Y". -config DRM_I915_GVT - bool - config DRM_I915_GVT_KVMGT tristate "Enable KVM host support Intel GVT-g graphics virtualization" depends on DRM_I915 @@ -160,3 +157,6 @@ menu "drm/i915 Unstable Evolution" depends on DRM_I915 source "drivers/gpu/drm/i915/Kconfig.unstable" endmenu + +config DRM_I915_GVT + bool diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index ed4d93942dbd..ecd6c5c3f4de 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -2053,7 +2053,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) /* attach connector to encoder */ intel_connector_attach_encoder(intel_connector, encoder); - intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL); + encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port); + intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL); mutex_lock(&dev->mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(intel_connector); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 178a8cbb7583..a70b7061742a 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -620,14 +620,14 @@ static void dump_pnp_id(struct drm_i915_private *i915, static int opregion_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, - const struct edid *edid) + const struct edid *edid, bool use_fallback) { return intel_opregion_get_panel_type(i915); } static int vbt_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, - const struct edid *edid) + const struct edid *edid, bool use_fallback) { const struct bdb_lvds_options *lvds_options; @@ -652,7 +652,7 @@ static int vbt_get_panel_type(struct drm_i915_private *i915, static int pnpid_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, - const struct edid *edid) + const struct edid *edid, bool use_fallback) { const struct bdb_lvds_lfp_data *data; const struct bdb_lvds_lfp_data_ptrs *ptrs; @@ -701,9 +701,9 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915, static int fallback_get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, - const struct edid *edid) + const struct edid *edid, bool use_fallback) { - return 0; + return use_fallback ? 0 : -1; } enum panel_type { @@ -715,13 +715,13 @@ enum panel_type { static int get_panel_type(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, - const struct edid *edid) + const struct edid *edid, bool use_fallback) { struct { const char *name; int (*get_panel_type)(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata, - const struct edid *edid); + const struct edid *edid, bool use_fallback); int panel_type; } panel_types[] = { [PANEL_TYPE_OPREGION] = { @@ -744,7 +744,8 @@ static int get_panel_type(struct drm_i915_private *i915, int i; for (i = 0; i < ARRAY_SIZE(panel_types); i++) { - panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid); + panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, + edid, use_fallback); drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf && panel_types[i].panel_type != 0xff); @@ -2592,6 +2593,12 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata) devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR; } +static bool +intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata) +{ + return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT; +} + static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->i915->display.vbt.version < 158) @@ -2642,7 +2649,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata, { struct drm_i915_private *i915 = devdata->i915; const struct child_device_config *child = &devdata->child; - bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt; + bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt; int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock; is_dvi = intel_bios_encoder_supports_dvi(devdata); @@ -2650,13 +2657,14 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata, is_crt = intel_bios_encoder_supports_crt(devdata); is_hdmi = intel_bios_encoder_supports_hdmi(devdata); is_edp = intel_bios_encoder_supports_edp(devdata); + is_dsi = intel_bios_encoder_supports_dsi(devdata); supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata); supports_tbt = intel_bios_encoder_supports_tbt(devdata); drm_dbg_kms(&i915->drm, - "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", - port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, + "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n", + port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi, HAS_LSPCON(i915) && child->lspcon, supports_typec_usb, supports_tbt, devdata->dsc != NULL); @@ -2701,6 +2709,8 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata) enum port port; port = dvo_port_to_port(i915, child->dvo_port); + if (port == PORT_NONE && DISPLAY_VER(i915) >= 11) + port = dsi_dvo_port_to_port(i915, child->dvo_port); if (port == PORT_NONE) return; @@ -3191,14 +3201,26 @@ out: kfree(oprom_vbt); } -void intel_bios_init_panel(struct drm_i915_private *i915, - struct intel_panel *panel, - const struct intel_bios_encoder_data *devdata, - const struct edid *edid) +static void intel_bios_init_panel(struct drm_i915_private *i915, + struct intel_panel *panel, + const struct intel_bios_encoder_data *devdata, + const struct edid *edid, + bool use_fallback) { - init_vbt_panel_defaults(panel); + /* already have it? */ + if (panel->vbt.panel_type >= 0) { + drm_WARN_ON(&i915->drm, !use_fallback); + return; + } - panel->vbt.panel_type = get_panel_type(i915, devdata, edid); + panel->vbt.panel_type = get_panel_type(i915, devdata, + edid, use_fallback); + if (panel->vbt.panel_type < 0) { + drm_WARN_ON(&i915->drm, use_fallback); + return; + } + + init_vbt_panel_defaults(panel); parse_panel_options(i915, panel); parse_generic_dtd(i915, panel); @@ -3213,6 +3235,21 @@ void intel_bios_init_panel(struct drm_i915_private *i915, parse_mipi_sequence(i915, panel); } +void intel_bios_init_panel_early(struct drm_i915_private *i915, + struct intel_panel *panel, + const struct intel_bios_encoder_data *devdata) +{ + intel_bios_init_panel(i915, panel, devdata, NULL, false); +} + +void intel_bios_init_panel_late(struct drm_i915_private *i915, + struct intel_panel *panel, + const struct intel_bios_encoder_data *devdata, + const struct edid *edid) +{ + intel_bios_init_panel(i915, panel, devdata, edid, true); +} + /** * intel_bios_driver_remove - Free any resources allocated by intel_bios_init() * @i915: i915 device instance diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index e375405a7828..ff1fdd2e0c1c 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -232,10 +232,13 @@ struct mipi_pps_data { } __packed; void intel_bios_init(struct drm_i915_private *dev_priv); -void intel_bios_init_panel(struct drm_i915_private *dev_priv, - struct intel_panel *panel, - const struct intel_bios_encoder_data *devdata, - const struct edid *edid); +void intel_bios_init_panel_early(struct drm_i915_private *dev_priv, + struct intel_panel *panel, + const struct intel_bios_encoder_data *devdata); +void intel_bios_init_panel_late(struct drm_i915_private *dev_priv, + struct intel_panel *panel, + const struct intel_bios_encoder_data *devdata, + const struct edid *edid); void intel_bios_fini_panel(struct intel_panel *panel); void intel_bios_driver_remove(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 6d5cbeb8df4d..8bb296f3d625 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -54,7 +54,7 @@ int intel_connector_init(struct intel_connector *connector) __drm_atomic_helper_connector_reset(&connector->base, &conn_state->base); - INIT_LIST_HEAD(&connector->panel.fixed_modes); + intel_panel_init_alloc(connector); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index de77054195c6..4bbb84847ecb 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -5969,6 +5969,10 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state) if (ret) return ret; + ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc); + if (ret) + return ret; + ret = intel_atomic_add_affected_planes(state, crtc); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 298d00a11f47..135dbcab62b2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -291,7 +291,7 @@ struct intel_vbt_panel_data { struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ /* Feature bits */ - unsigned int panel_type:4; + int panel_type; unsigned int lvds_dither:1; unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index b94bcceeff70..2e09899f2f92 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -5179,6 +5179,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, return false; } + intel_bios_init_panel_early(dev_priv, &intel_connector->panel, + encoder->devdata); + intel_pps_init(intel_dp); /* Cache DPCD and EDID for edp. */ @@ -5213,8 +5216,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, } intel_connector->edid = edid; - intel_bios_init_panel(dev_priv, &intel_connector->panel, - encoder->devdata, IS_ERR(edid) ? NULL : edid); + intel_bios_init_panel_late(dev_priv, &intel_connector->panel, + encoder->devdata, IS_ERR(edid) ? NULL : edid); intel_panel_add_edid_fixed_modes(intel_connector, true); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 03604a37931c..27c2098dd707 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -1003,3 +1003,64 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state) return crtc_state->mst_master_transcoder != INVALID_TRANSCODER && crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder; } + +/** + * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector + * @state: atomic state + * @connector: connector to add the state for + * @crtc: the CRTC @connector is attached to + * + * Add the MST topology state for @connector to @state. + * + * Returns 0 on success, negative error code on failure. + */ +static int +intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state, + struct intel_connector *connector, + struct intel_crtc *crtc) +{ + struct drm_dp_mst_topology_state *mst_state; + + if (!connector->mst_port) + return 0; + + mst_state = drm_atomic_get_mst_topology_state(&state->base, + &connector->mst_port->mst_mgr); + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base); + + return 0; +} + +/** + * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC + * @state: atomic state + * @crtc: CRTC to add the state for + * + * Add the MST topology state for @crtc to @state. + * + * Returns 0 on success, negative error code on failure. + */ +int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_connector *_connector; + struct drm_connector_state *conn_state; + int i; + + for_each_new_connector_in_state(&state->base, _connector, conn_state, i) { + struct intel_connector *connector = to_intel_connector(_connector); + int ret; + + if (conn_state->crtc != &crtc->base) + continue; + + ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h index f7301de6cdfb..f1815bb72267 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.h +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h @@ -8,6 +8,8 @@ #include +struct intel_atomic_state; +struct intel_crtc; struct intel_crtc_state; struct intel_digital_port; struct intel_dp; @@ -18,5 +20,7 @@ int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port); bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state); bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state); bool intel_dp_mst_source_support(struct intel_dp *intel_dp); +int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); #endif /* __INTEL_DP_MST_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 112aa0447a0d..9899b5dcd291 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -624,7 +624,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev; struct fb_info *info; - if (!ifbdev || !ifbdev->vma) + if (!ifbdev) + return; + + if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv))) + return; + + if (!ifbdev->vma) goto set_suspend; info = ifbdev->helper.fbdev; diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index e5352239b2a2..a749a5a66d62 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -967,8 +967,8 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) } intel_connector->edid = edid; - intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, - IS_ERR(edid) ? NULL : edid); + intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, + IS_ERR(edid) ? NULL : edid); /* Try EDID first */ intel_panel_add_edid_fixed_modes(intel_connector, diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index f72f4646c0d7..b50db0dd20fc 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -648,6 +648,14 @@ intel_panel_mode_valid(struct intel_connector *connector, return MODE_OK; } +void intel_panel_init_alloc(struct intel_connector *connector) +{ + struct intel_panel *panel = &connector->panel; + + connector->panel.vbt.panel_type = -1; + INIT_LIST_HEAD(&panel->fixed_modes); +} + int intel_panel_init(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index 5c5b5b7f95b6..4b51e1c51da6 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -18,6 +18,7 @@ struct intel_connector; struct intel_crtc_state; struct intel_encoder; +void intel_panel_init_alloc(struct intel_connector *connector); int intel_panel_init(struct intel_connector *connector); void intel_panel_fini(struct intel_connector *connector); enum drm_connector_status diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 774c1dc31a52..a15e09b55170 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2891,7 +2891,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) goto err; - intel_bios_init_panel(i915, &intel_connector->panel, NULL, NULL); + intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL); /* * Fetch modes from VBT. For SDVO prefer the VBT mode since some diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index b3f5ca280ef2..90e3e41095b3 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -1925,7 +1925,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_dsi->panel_power_off_time = ktime_get_boottime(); - intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL); + intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL); if (intel_connector->panel.vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 3dcec7acb384..4f0dbeebb79f 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -151,8 +151,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) OUT_RING(ring, 1); /* Enable local preemption for finegrain preemption */ - OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1); - OUT_RING(ring, 0x02); + OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1); + OUT_RING(ring, 0x1); /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */ OUT_PKT7(ring, CP_YIELD_ENABLE, 1); @@ -808,7 +808,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); /* Set the highest bank bit */ - if (adreno_is_a540(adreno_gpu)) + if (adreno_is_a540(adreno_gpu) || adreno_is_a530(adreno_gpu)) regbit = 2; else regbit = 1; diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c index 8abc9a2b114a..e0eef47dae63 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c @@ -63,7 +63,7 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) struct msm_ringbuffer *ring = gpu->rb[i]; spin_lock_irqsave(&ring->preempt_lock, flags); - empty = (get_wptr(ring) == ring->memptrs->rptr); + empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); spin_unlock_irqrestore(&ring->preempt_lock, flags); if (!empty) @@ -208,6 +208,7 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu) a5xx_gpu->preempt[i]->wptr = 0; a5xx_gpu->preempt[i]->rptr = 0; a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova; + a5xx_gpu->preempt[i]->rptr_addr = shadowptr(a5xx_gpu, gpu->rb[i]); } /* Write a 0 to signal that we aren't switching pagetables */ @@ -259,7 +260,6 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu, ptr->data = 0; ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE; - ptr->rptr_addr = shadowptr(a5xx_gpu, ring); ptr->counter = counters_iova; return 0; diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 628806423f7d..c5c4c93b3689 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -551,13 +551,15 @@ static int adreno_bind(struct device *dev, struct device *master, void *data) return 0; } +static int adreno_system_suspend(struct device *dev); static void adreno_unbind(struct device *dev, struct device *master, void *data) { struct msm_drm_private *priv = dev_get_drvdata(master); struct msm_gpu *gpu = dev_to_gpu(dev); - pm_runtime_force_suspend(dev); + if (pm_runtime_enabled(dev)) + WARN_ON_ONCE(adreno_system_suspend(dev)); gpu->funcs->destroy(gpu); priv->gpu_pdev = NULL; @@ -609,7 +611,7 @@ static int adreno_remove(struct platform_device *pdev) static void adreno_shutdown(struct platform_device *pdev) { - pm_runtime_force_suspend(&pdev->dev); + WARN_ON_ONCE(adreno_system_suspend(&pdev->dev)); } static const struct of_device_id dt_match[] = { diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c index 365738f40976..41c93a18d5cb 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c @@ -12,11 +12,15 @@ #include "dpu_hw_catalog.h" #include "dpu_kms.h" -#define VIG_MASK \ +#define VIG_BASE_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ - BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) |\ + BIT(DPU_SSPP_CDP) |\ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT)) +#define VIG_MASK \ + (VIG_BASE_MASK | \ + BIT(DPU_SSPP_CSC_10BIT)) + #define VIG_MSM8998_MASK \ (VIG_MASK | BIT(DPU_SSPP_SCALER_QSEED3)) @@ -29,7 +33,7 @@ #define VIG_SM8250_MASK \ (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL) | BIT(DPU_SSPP_SCALER_QSEED3LITE)) -#define VIG_QCM2290_MASK (VIG_MASK | BIT(DPU_SSPP_QOS_8LVL)) +#define VIG_QCM2290_MASK (VIG_BASE_MASK | BIT(DPU_SSPP_QOS_8LVL)) #define DMA_MSM8998_MASK \ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) |\ @@ -51,7 +55,7 @@ (DMA_MSM8998_MASK | BIT(DPU_SSPP_CURSOR)) #define MIXER_MSM8998_MASK \ - (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER)) + (BIT(DPU_MIXER_SOURCESPLIT)) #define MIXER_SDM845_MASK \ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA)) @@ -283,7 +287,6 @@ static const struct dpu_caps qcm2290_dpu_caps = { .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH, .max_mixer_blendstages = 0x4, .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, - .ubwc_version = DPU_HW_UBWC_VER_20, .has_dim_layer = true, .has_idle_pc = true, .max_linewidth = 2160, @@ -604,19 +607,19 @@ static const struct dpu_ctl_cfg sdm845_ctl[] = { static const struct dpu_ctl_cfg sc7180_ctl[] = { { .name = "ctl_0", .id = CTL_0, - .base = 0x1000, .len = 0xE4, + .base = 0x1000, .len = 0x1dc, .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9), }, { .name = "ctl_1", .id = CTL_1, - .base = 0x1200, .len = 0xE4, + .base = 0x1200, .len = 0x1dc, .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10), }, { .name = "ctl_2", .id = CTL_2, - .base = 0x1400, .len = 0xE4, + .base = 0x1400, .len = 0x1dc, .features = BIT(DPU_CTL_ACTIVE_CFG), .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11), }, @@ -810,9 +813,9 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = { SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_MSM8998_MASK, sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1), SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_MSM8998_MASK, - sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0), + sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2), SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_MSM8998_MASK, - sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1), + sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3), }; static const struct dpu_sspp_cfg sdm845_sspp[] = { @@ -1918,8 +1921,6 @@ static const struct dpu_mdss_cfg qcm2290_dpu_cfg = { .intf = qcm2290_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), .vbif = sdm845_vbif, - .reg_dma_count = 1, - .dma_cfg = &sdm845_regdma, .perf = &qcm2290_perf_data, .mdss_irqs = IRQ_SC7180_MASK, }; diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c index 7ada957adbbb..58abf5fe97e2 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c @@ -572,6 +572,8 @@ void dpu_rm_release(struct dpu_global_state *global_state, ARRAY_SIZE(global_state->ctl_to_enc_id), enc->base.id); _dpu_rm_clear_mapping(global_state->dsc_to_enc_id, ARRAY_SIZE(global_state->dsc_to_enc_id), enc->base.id); + _dpu_rm_clear_mapping(global_state->dspp_to_enc_id, + ARRAY_SIZE(global_state->dspp_to_enc_id), enc->base.id); } int dpu_rm_reserve( diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 7c2cc1262c05..d8c9d184190b 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -627,8 +627,8 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, int ret = 0; uint32_t i, j; - post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps), - GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); + post_deps = kcalloc(nr_syncobjs, sizeof(*post_deps), + GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (!post_deps) return ERR_PTR(-ENOMEM); @@ -643,7 +643,6 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev, } post_deps[i].point = syncobj_desc.point; - post_deps[i].chain = NULL; if (syncobj_desc.flags) { ret = -EINVAL; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index 591c852f326b..76a6ae5d5652 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -35,8 +35,9 @@ struct nv50_wndw { int nv50_wndw_new_(const struct nv50_wndw_func *, struct drm_device *, enum drm_plane_type, const char *name, int index, - const u32 *format, enum nv50_disp_interlock_type, - u32 interlock_data, u32 heads, struct nv50_wndw **); + const u32 *format, u32 heads, + enum nv50_disp_interlock_type, u32 interlock_data, + struct nv50_wndw **); void nv50_wndw_flush_set(struct nv50_wndw *, u32 *interlock, struct nv50_wndw_atom *); void nv50_wndw_flush_clr(struct nv50_wndw *, u32 *interlock, bool flush, diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c index 64ca7d7a9673..b898f865fb87 100644 --- a/drivers/iio/accel/mma9551_core.c +++ b/drivers/iio/accel/mma9551_core.c @@ -296,9 +296,12 @@ int mma9551_read_config_word(struct i2c_client *client, u8 app_id, ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG, reg, NULL, 0, (u8 *)&v, 2); + if (ret < 0) + return ret; + *val = be16_to_cpu(v); - return ret; + return 0; } EXPORT_SYMBOL_NS(mma9551_read_config_word, IIO_MMA9551); @@ -354,9 +357,12 @@ int mma9551_read_status_word(struct i2c_client *client, u8 app_id, ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS, reg, NULL, 0, (u8 *)&v, 2); + if (ret < 0) + return ret; + *val = be16_to_cpu(v); - return ret; + return 0; } EXPORT_SYMBOL_NS(mma9551_read_status_word, IIO_MMA9551); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 26d1772179b8..8730674ceb2e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -479,13 +479,20 @@ static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa, if (sa->sa_family != sb->sa_family) return sa->sa_family - sb->sa_family; - if (sa->sa_family == AF_INET) - return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr, - (char *)&((struct sockaddr_in *)sb)->sin_addr, + if (sa->sa_family == AF_INET && + __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) { + return memcmp(&((struct sockaddr_in *)sa)->sin_addr, + &((struct sockaddr_in *)sb)->sin_addr, sizeof(((struct sockaddr_in *)sa)->sin_addr)); + } - return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, - &((struct sockaddr_in6 *)sb)->sin6_addr); + if (sa->sa_family == AF_INET6 && + __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) { + return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr, + &((struct sockaddr_in6 *)sb)->sin6_addr); + } + + return -1; } static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index ebe970f76232..90b672feed83 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1056,7 +1056,7 @@ static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr); static void handle_temp_err(struct hfi1_devdata *dd); static void dc_shutdown(struct hfi1_devdata *dd); static void dc_start(struct hfi1_devdata *dd); -static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, +static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, unsigned int *np); static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms); @@ -13362,7 +13362,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd) int ret; unsigned ngroups; int rmt_count; - int user_rmt_reduced; u32 n_usr_ctxts; u32 send_contexts = chip_send_contexts(dd); u32 rcv_contexts = chip_rcv_contexts(dd); @@ -13421,28 +13420,34 @@ static int set_up_context_variables(struct hfi1_devdata *dd) (num_kernel_contexts + n_usr_ctxts), &node_affinity.real_cpu_mask); /* - * The RMT entries are currently allocated as shown below: - * 1. QOS (0 to 128 entries); - * 2. FECN (num_kernel_context - 1 + num_user_contexts + - * num_netdev_contexts); - * 3. netdev (num_netdev_contexts). - * It should be noted that FECN oversubscribe num_netdev_contexts - * entries of RMT because both netdev and PSM could allocate any receive - * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts, - * and PSM FECN must reserve an RMT entry for each possible PSM receive - * context. + * RMT entries are allocated as follows: + * 1. QOS (0 to 128 entries) + * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts + + * num_netdev_contexts [b]) + * 3. netdev (NUM_NETDEV_MAP_ENTRIES) + * + * Notes: + * [a] Kernel contexts (except control) are included in FECN if kernel + * TID_RDMA is active. + * [b] Netdev and user contexts are randomly allocated from the same + * context pool, so FECN must cover all contexts in the pool. */ - rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2); - if (HFI1_CAP_IS_KSET(TID_RDMA)) - rmt_count += num_kernel_contexts - 1; - if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) { - user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count; - dd_dev_err(dd, - "RMT size is reducing the number of user receive contexts from %u to %d\n", - n_usr_ctxts, - user_rmt_reduced); - /* recalculate */ - n_usr_ctxts = user_rmt_reduced; + rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL) + + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1 + : 0) + + n_usr_ctxts + + num_netdev_contexts + + NUM_NETDEV_MAP_ENTRIES; + if (rmt_count > NUM_MAP_ENTRIES) { + int over = rmt_count - NUM_MAP_ENTRIES; + /* try to squish user contexts, minimum of 1 */ + if (over >= n_usr_ctxts) { + dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n"); + return -EINVAL; + } + dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n", + n_usr_ctxts, n_usr_ctxts - over); + n_usr_ctxts -= over; } /* the first N are kernel contexts, the rest are user/netdev contexts */ @@ -14299,15 +14304,15 @@ static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index) } /* return the number of RSM map table entries that will be used for QOS */ -static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, +static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp, unsigned int *np) { int i; unsigned int m, n; - u8 max_by_vl = 0; + uint max_by_vl = 0; /* is QOS active at all? */ - if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS || + if (n_krcv_queues < MIN_KERNEL_KCTXTS || num_vls == 1 || krcvqsset <= 1) goto no_qos; @@ -14365,7 +14370,7 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt) if (!rmt) goto bail; - rmt_entries = qos_rmt_entries(dd, &m, &n); + rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n); if (rmt_entries == 0) goto bail; qpns_per_vl = 1 << m; diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c index 4b7eee01c6aa..615646a03039 100644 --- a/drivers/input/touchscreen/exc3000.c +++ b/drivers/input/touchscreen/exc3000.c @@ -109,6 +109,11 @@ static inline void exc3000_schedule_timer(struct exc3000_data *data) mod_timer(&data->timer, jiffies + msecs_to_jiffies(EXC3000_TIMEOUT_MS)); } +static void exc3000_shutdown_timer(void *timer) +{ + del_timer_sync(timer); +} + static int exc3000_read_frame(struct exc3000_data *data, u8 *buf) { struct i2c_client *client = data->client; @@ -386,6 +391,11 @@ static int exc3000_probe(struct i2c_client *client) if (error) return error; + error = devm_add_action_or_reset(&client->dev, exc3000_shutdown_timer, + &data->timer); + if (error) + return error; + error = devm_request_threaded_irq(&client->dev, client->irq, NULL, exc3000_interrupt, IRQF_ONESHOT, client->name, data); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 968e5e6668b2..20adb9b323d8 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -1712,27 +1712,29 @@ static int pdev_pri_ats_enable(struct pci_dev *pdev) /* Only allow access to user-accessible pages */ ret = pci_enable_pasid(pdev, 0); if (ret) - goto out_err; + return ret; /* First reset the PRI state of the device */ ret = pci_reset_pri(pdev); if (ret) - goto out_err; + goto out_err_pasid; /* Enable PRI */ /* FIXME: Hardcode number of outstanding requests for now */ ret = pci_enable_pri(pdev, 32); if (ret) - goto out_err; + goto out_err_pasid; ret = pci_enable_ats(pdev, PAGE_SHIFT); if (ret) - goto out_err; + goto out_err_pri; return 0; -out_err: +out_err_pri: pci_disable_pri(pdev); + +out_err_pasid: pci_disable_pasid(pdev); return ret; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index fd8c8aeb3c50..bfb2f163c691 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2089,8 +2089,22 @@ static int __iommu_attach_group(struct iommu_domain *domain, ret = __iommu_group_for_each_dev(group, domain, iommu_group_do_attach_device); - if (ret == 0) + if (ret == 0) { group->domain = domain; + } else { + /* + * To recover from the case when certain device within the + * group fails to attach to the new domain, we need force + * attaching all devices back to the old domain. The old + * domain is compatible for all devices in the group, + * hence the iommu driver should always return success. + */ + struct iommu_domain *old_domain = group->domain; + + group->domain = NULL; + WARN(__iommu_group_set_domain(group, old_domain), + "iommu driver failed to attach a compatible domain"); + } return ret; } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index d7386851569c..f351c15110a7 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -18,7 +18,9 @@ #include #include #include +#include #include +#include #include #include @@ -45,21 +47,7 @@ struct redist_region { bool single_redist; }; -struct gic_chip_data { - struct fwnode_handle *fwnode; - void __iomem *dist_base; - struct redist_region *redist_regions; - struct rdists rdists; - struct irq_domain *domain; - u64 redist_stride; - u32 nr_redist_regions; - u64 flags; - bool has_rss; - unsigned int ppi_nr; - struct partition_desc **ppi_descs; -}; - -static struct gic_chip_data gic_data __read_mostly; +static struct gic_chip_data_v3 gic_data __read_mostly; static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) @@ -1407,6 +1395,26 @@ static void gic_cpu_pm_init(void) static inline void gic_cpu_pm_init(void) { } #endif /* CONFIG_CPU_PM */ +#ifdef CONFIG_PM +static void gic_resume(void) +{ + trace_android_vh_gic_resume(&gic_data); +} + +static struct syscore_ops gic_syscore_ops = { + .resume = gic_resume, +}; + +static void gic_syscore_init(void) +{ + register_syscore_ops(&gic_syscore_ops); +} + +#else +static inline void gic_syscore_init(void) { } +#endif + + static struct irq_chip gic_chip = { .name = "GICv3", .irq_mask = gic_mask_irq, @@ -1690,7 +1698,7 @@ static const struct irq_domain_ops partition_domain_ops = { static bool gic_enable_quirk_msm8996(void *data) { - struct gic_chip_data *d = data; + struct gic_chip_data_v3 *d = data; d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; @@ -1699,7 +1707,7 @@ static bool gic_enable_quirk_msm8996(void *data) static bool gic_enable_quirk_cavium_38539(void *data) { - struct gic_chip_data *d = data; + struct gic_chip_data_v3 *d = data; d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; @@ -1708,7 +1716,7 @@ static bool gic_enable_quirk_cavium_38539(void *data) static bool gic_enable_quirk_hip06_07(void *data) { - struct gic_chip_data *d = data; + struct gic_chip_data_v3 *d = data; /* * HIP06 GICD_IIDR clashes with GIC-600 product number (despite @@ -1895,6 +1903,7 @@ static int __init gic_init_bases(void __iomem *dist_base, gic_cpu_init(); gic_smp_init(); gic_cpu_pm_init(); + gic_syscore_init(); if (gic_dist_supports_lpis()) { its_init(handle, &gic_data.rdists, gic_data.domain); diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c index 204661c8e918..9dd80837a759 100644 --- a/drivers/macintosh/windfarm_lm75_sensor.c +++ b/drivers/macintosh/windfarm_lm75_sensor.c @@ -33,8 +33,8 @@ #endif struct wf_lm75_sensor { - int ds1775 : 1; - int inited : 1; + unsigned int ds1775 : 1; + unsigned int inited : 1; struct i2c_client *i2c; struct wf_sensor sens; }; diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c index 00c6fe25fcba..2bdb73b34d29 100644 --- a/drivers/macintosh/windfarm_smu_sensors.c +++ b/drivers/macintosh/windfarm_smu_sensors.c @@ -274,8 +274,8 @@ struct smu_cpu_power_sensor { struct list_head link; struct wf_sensor *volts; struct wf_sensor *amps; - int fake_volts : 1; - int quadratic : 1; + unsigned int fake_volts : 1; + unsigned int quadratic : 1; struct wf_sensor sens; }; #define to_smu_cpu_power(c) container_of(c, struct smu_cpu_power_sensor, sens) diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 873087e18056..267f514023e7 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -3482,7 +3482,7 @@ static int ov5640_init_controls(struct ov5640_dev *sensor) /* Auto/manual gain */ ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN, 0, 1, 1, 1); - ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, + ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN, 0, 1023, 1, 0); ctrls->saturation = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_SATURATION, diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c index 22e524b69806..a56c844d7f81 100644 --- a/drivers/media/rc/gpio-ir-recv.c +++ b/drivers/media/rc/gpio-ir-recv.c @@ -130,6 +130,23 @@ static int gpio_ir_recv_probe(struct platform_device *pdev) "gpio-ir-recv-irq", gpio_dev); } +static int gpio_ir_recv_remove(struct platform_device *pdev) +{ + struct gpio_rc_dev *gpio_dev = platform_get_drvdata(pdev); + struct device *pmdev = gpio_dev->pmdev; + + if (pmdev) { + pm_runtime_get_sync(pmdev); + cpu_latency_qos_remove_request(&gpio_dev->qos); + + pm_runtime_disable(pmdev); + pm_runtime_put_noidle(pmdev); + pm_runtime_set_suspended(pmdev); + } + + return 0; +} + #ifdef CONFIG_PM static int gpio_ir_recv_suspend(struct device *dev) { @@ -189,6 +206,7 @@ MODULE_DEVICE_TABLE(of, gpio_ir_recv_of_match); static struct platform_driver gpio_ir_recv_driver = { .probe = gpio_ir_recv_probe, + .remove = gpio_ir_recv_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = of_match_ptr(gpio_ir_recv_of_match), diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index 44b0cfb8ee1c..067b43a1cb3e 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -6,6 +6,7 @@ * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ +#include #include #include #include @@ -1509,6 +1510,10 @@ static void uvc_ctrl_status_event_work(struct work_struct *work) uvc_ctrl_status_event(w->chain, w->ctrl, w->data); + /* The barrier is needed to synchronize with uvc_status_stop(). */ + if (smp_load_acquire(&dev->flush_status)) + return; + /* Resubmit the URB. */ w->urb->interval = dev->int_ep->desc.bInterval; ret = usb_submit_urb(w->urb, GFP_KERNEL); diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index abfe735f6ea3..a9cdef07e6b1 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -252,14 +252,10 @@ static int uvc_parse_format(struct uvc_device *dev, fmtdesc = uvc_format_by_guid(&buffer[5]); if (fmtdesc != NULL) { - strscpy(format->name, fmtdesc->name, - sizeof(format->name)); format->fcc = fmtdesc->fcc; } else { dev_info(&streaming->intf->dev, "Unknown video format %pUl\n", &buffer[5]); - snprintf(format->name, sizeof(format->name), "%pUl\n", - &buffer[5]); format->fcc = 0; } @@ -271,8 +267,6 @@ static int uvc_parse_format(struct uvc_device *dev, */ if (dev->quirks & UVC_QUIRK_FORCE_Y8) { if (format->fcc == V4L2_PIX_FMT_YUYV) { - strscpy(format->name, "Greyscale 8-bit (Y8 )", - sizeof(format->name)); format->fcc = V4L2_PIX_FMT_GREY; format->bpp = 8; width_multiplier = 2; @@ -313,7 +307,6 @@ static int uvc_parse_format(struct uvc_device *dev, return -EINVAL; } - strscpy(format->name, "MJPEG", sizeof(format->name)); format->fcc = V4L2_PIX_FMT_MJPEG; format->flags = UVC_FMT_FLAG_COMPRESSED; format->bpp = 0; @@ -329,17 +322,7 @@ static int uvc_parse_format(struct uvc_device *dev, return -EINVAL; } - switch (buffer[8] & 0x7f) { - case 0: - strscpy(format->name, "SD-DV", sizeof(format->name)); - break; - case 1: - strscpy(format->name, "SDL-DV", sizeof(format->name)); - break; - case 2: - strscpy(format->name, "HD-DV", sizeof(format->name)); - break; - default: + if ((buffer[8] & 0x7f) > 2) { uvc_dbg(dev, DESCR, "device %d videostreaming interface %d: unknown DV format %u\n", dev->udev->devnum, @@ -347,9 +330,6 @@ static int uvc_parse_format(struct uvc_device *dev, return -EINVAL; } - strlcat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz", - sizeof(format->name)); - format->fcc = V4L2_PIX_FMT_DV; format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM; format->bpp = 0; @@ -376,7 +356,7 @@ static int uvc_parse_format(struct uvc_device *dev, return -EINVAL; } - uvc_dbg(dev, DESCR, "Found format %s\n", format->name); + uvc_dbg(dev, DESCR, "Found format %p4cc", &format->fcc); buflen -= buffer[0]; buffer += buffer[0]; @@ -880,10 +860,8 @@ static int uvc_parse_vendor_control(struct uvc_device *dev, + n; memcpy(unit->extension.bmControls, &buffer[23+p], 2*n); - if (buffer[24+p+2*n] != 0) - usb_string(udev, buffer[24+p+2*n], unit->name, - sizeof(unit->name)); - else + if (buffer[24+p+2*n] == 0 || + usb_string(udev, buffer[24+p+2*n], unit->name, sizeof(unit->name)) < 0) sprintf(unit->name, "Extension %u", buffer[3]); list_add_tail(&unit->list, &dev->entities); @@ -1007,15 +985,15 @@ static int uvc_parse_standard_control(struct uvc_device *dev, memcpy(term->media.bmTransportModes, &buffer[10+n], p); } - if (buffer[7] != 0) - usb_string(udev, buffer[7], term->name, - sizeof(term->name)); - else if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) - sprintf(term->name, "Camera %u", buffer[3]); - else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) - sprintf(term->name, "Media %u", buffer[3]); - else - sprintf(term->name, "Input %u", buffer[3]); + if (buffer[7] == 0 || + usb_string(udev, buffer[7], term->name, sizeof(term->name)) < 0) { + if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA) + sprintf(term->name, "Camera %u", buffer[3]); + if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT) + sprintf(term->name, "Media %u", buffer[3]); + else + sprintf(term->name, "Input %u", buffer[3]); + } list_add_tail(&term->list, &dev->entities); break; @@ -1048,10 +1026,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev, memcpy(term->baSourceID, &buffer[7], 1); - if (buffer[8] != 0) - usb_string(udev, buffer[8], term->name, - sizeof(term->name)); - else + if (buffer[8] == 0 || + usb_string(udev, buffer[8], term->name, sizeof(term->name)) < 0) sprintf(term->name, "Output %u", buffer[3]); list_add_tail(&term->list, &dev->entities); @@ -1073,10 +1049,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev, memcpy(unit->baSourceID, &buffer[5], p); - if (buffer[5+p] != 0) - usb_string(udev, buffer[5+p], unit->name, - sizeof(unit->name)); - else + if (buffer[5+p] == 0 || + usb_string(udev, buffer[5+p], unit->name, sizeof(unit->name)) < 0) sprintf(unit->name, "Selector %u", buffer[3]); list_add_tail(&unit->list, &dev->entities); @@ -1106,10 +1080,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev, if (dev->uvc_version >= 0x0110) unit->processing.bmVideoStandards = buffer[9+n]; - if (buffer[8+n] != 0) - usb_string(udev, buffer[8+n], unit->name, - sizeof(unit->name)); - else + if (buffer[8+n] == 0 || + usb_string(udev, buffer[8+n], unit->name, sizeof(unit->name)) < 0) sprintf(unit->name, "Processing %u", buffer[3]); list_add_tail(&unit->list, &dev->entities); @@ -1137,10 +1109,8 @@ static int uvc_parse_standard_control(struct uvc_device *dev, unit->extension.bmControls = (u8 *)unit + sizeof(*unit); memcpy(unit->extension.bmControls, &buffer[23+p], n); - if (buffer[23+p+n] != 0) - usb_string(udev, buffer[23+p+n], unit->name, - sizeof(unit->name)); - else + if (buffer[23+p+n] == 0 || + usb_string(udev, buffer[23+p+n], unit->name, sizeof(unit->name)) < 0) sprintf(unit->name, "Extension %u", buffer[3]); list_add_tail(&unit->list, &dev->entities); @@ -2483,6 +2453,24 @@ static const struct usb_device_id uvc_ids[] = { .bInterfaceSubClass = 1, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, + /* Logitech, Webcam C910 */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x046d, + .idProduct = 0x0821, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)}, + /* Logitech, Webcam B910 */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x046d, + .idProduct = 0x0823, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)}, /* Logitech Quickcam Fusion */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c index 7c4d2f93d351..cc68dd24eb42 100644 --- a/drivers/media/usb/uvc/uvc_entity.c +++ b/drivers/media/usb/uvc/uvc_entity.c @@ -37,7 +37,7 @@ static int uvc_mc_create_links(struct uvc_video_chain *chain, continue; remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]); - if (remote == NULL) + if (remote == NULL || remote->num_pads == 0) return -EINVAL; source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c index 7518ffce22ed..4a92c989cf33 100644 --- a/drivers/media/usb/uvc/uvc_status.c +++ b/drivers/media/usb/uvc/uvc_status.c @@ -6,6 +6,7 @@ * Laurent Pinchart (laurent.pinchart@ideasonboard.com) */ +#include #include #include #include @@ -309,5 +310,41 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags) void uvc_status_stop(struct uvc_device *dev) { + struct uvc_ctrl_work *w = &dev->async_ctrl; + + /* + * Prevent the asynchronous control handler from requeing the URB. The + * barrier is needed so the flush_status change is visible to other + * CPUs running the asynchronous handler before usb_kill_urb() is + * called below. + */ + smp_store_release(&dev->flush_status, true); + + /* + * Cancel any pending asynchronous work. If any status event was queued, + * process it synchronously. + */ + if (cancel_work_sync(&w->work)) + uvc_ctrl_status_event(w->chain, w->ctrl, w->data); + + /* Kill the urb. */ usb_kill_urb(dev->int_urb); + + /* + * The URB completion handler may have queued asynchronous work. This + * won't resubmit the URB as flush_status is set, but it needs to be + * cancelled before returning or it could then race with a future + * uvc_status_start() call. + */ + if (cancel_work_sync(&w->work)) + uvc_ctrl_status_event(w->chain, w->ctrl, w->data); + + /* + * From this point, there are no events on the queue and the status URB + * is dead. No events will be queued until uvc_status_start() is called. + * The barrier is needed to make sure that flush_status is visible to + * uvc_ctrl_status_event_work() when uvc_status_start() will be called + * again. + */ + smp_store_release(&dev->flush_status, false); } diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 0774a11360c0..950b42d78a10 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -661,8 +661,6 @@ static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream, fmt->flags = 0; if (format->flags & UVC_FMT_FLAG_COMPRESSED) fmt->flags |= V4L2_FMT_FLAG_COMPRESSED; - strscpy(fmt->description, format->name, sizeof(fmt->description)); - fmt->description[sizeof(fmt->description) - 1] = 0; fmt->pixelformat = format->fcc; return 0; } diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c index d2eb9066e4dc..0d3a3b697b2d 100644 --- a/drivers/media/usb/uvc/uvc_video.c +++ b/drivers/media/usb/uvc/uvc_video.c @@ -1352,7 +1352,9 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream, if (has_scr) memcpy(stream->clock.last_scr, scr, 6); - memcpy(&meta->length, mem, length); + meta->length = mem[0]; + meta->flags = mem[1]; + memcpy(meta->buf, &mem[2], length - 2); meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof); uvc_dbg(stream->dev, FRAME, @@ -1965,6 +1967,17 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream, "Selecting alternate setting %u (%u B/frame bandwidth)\n", altsetting, best_psize); + /* + * Some devices, namely the Logitech C910 and B910, are unable + * to recover from a USB autosuspend, unless the alternate + * setting of the streaming interface is toggled. + */ + if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) { + usb_set_interface(stream->dev->udev, intfnum, + altsetting); + usb_set_interface(stream->dev->udev, intfnum, 0); + } + ret = usb_set_interface(stream->dev->udev, intfnum, altsetting); if (ret < 0) return ret; diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 1227ae63f85b..33e7475d4e64 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -74,6 +74,7 @@ #define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400 #define UVC_QUIRK_FORCE_Y8 0x00000800 #define UVC_QUIRK_FORCE_BPP 0x00001000 +#define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000 /* Format flags */ #define UVC_FMT_FLAG_COMPRESSED 0x00000001 @@ -264,8 +265,6 @@ struct uvc_format { u32 fcc; u32 flags; - char name[32]; - unsigned int nframes; struct uvc_frame *frame; }; @@ -559,6 +558,7 @@ struct uvc_device { /* Status Interrupt Endpoint */ struct usb_host_endpoint *int_ep; struct urb *int_urb; + bool flush_status; u8 *status; struct input_dev *input; char input_phys[64]; diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 61c288d40375..7343dbb79c9f 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -162,14 +162,36 @@ static const struct regmap_access_table rpcif_volatile_table = { .n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges), }; +struct rpcif_priv { + struct device *dev; + void __iomem *base; + void __iomem *dirmap; + struct regmap *regmap; + struct reset_control *rstc; + struct platform_device *vdev; + size_t size; + enum rpcif_type type; + enum rpcif_data_dir dir; + u8 bus_size; + u8 xfer_size; + void *buffer; + u32 xferlen; + u32 smcr; + u32 smadr; + u32 command; /* DRCMR or SMCMR */ + u32 option; /* DROPR or SMOPR */ + u32 enable; /* DRENR or SMENR */ + u32 dummy; /* DRDMCR or SMDMCR */ + u32 ddr; /* DRDRENR or SMDRENR */ +}; /* * Custom accessor functions to ensure SM[RW]DR[01] are always accessed with - * proper width. Requires rpcif.xfer_size to be correctly set before! + * proper width. Requires rpcif_priv.xfer_size to be correctly set before! */ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val) { - struct rpcif *rpc = context; + struct rpcif_priv *rpc = context; switch (reg) { case RPCIF_SMRDR0: @@ -205,7 +227,7 @@ static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val) static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val) { - struct rpcif *rpc = context; + struct rpcif_priv *rpc = context; switch (reg) { case RPCIF_SMWDR0: @@ -252,39 +274,18 @@ static const struct regmap_config rpcif_regmap_config = { .volatile_table = &rpcif_volatile_table, }; -int rpcif_sw_init(struct rpcif *rpc, struct device *dev) +int rpcif_sw_init(struct rpcif *rpcif, struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct resource *res; + struct rpcif_priv *rpc = dev_get_drvdata(dev); - rpc->dev = dev; - - rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs"); - if (IS_ERR(rpc->base)) - return PTR_ERR(rpc->base); - - rpc->regmap = devm_regmap_init(&pdev->dev, NULL, rpc, &rpcif_regmap_config); - if (IS_ERR(rpc->regmap)) { - dev_err(&pdev->dev, - "failed to init regmap for rpcif, error %ld\n", - PTR_ERR(rpc->regmap)); - return PTR_ERR(rpc->regmap); - } - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); - rpc->dirmap = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(rpc->dirmap)) - return PTR_ERR(rpc->dirmap); - rpc->size = resource_size(res); - - rpc->type = (uintptr_t)of_device_get_match_data(dev); - rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); - - return PTR_ERR_OR_ZERO(rpc->rstc); + rpcif->dev = dev; + rpcif->dirmap = rpc->dirmap; + rpcif->size = rpc->size; + return 0; } EXPORT_SYMBOL(rpcif_sw_init); -static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc) +static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc) { regmap_write(rpc->regmap, RPCIF_PHYWR, 0xa5390000); regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000000); @@ -298,8 +299,9 @@ static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif *rpc) regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032); } -int rpcif_hw_init(struct rpcif *rpc, bool hyperflash) +int rpcif_hw_init(struct rpcif *rpcif, bool hyperflash) { + struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev); u32 dummy; pm_runtime_get_sync(rpc->dev); @@ -360,7 +362,7 @@ int rpcif_hw_init(struct rpcif *rpc, bool hyperflash) } EXPORT_SYMBOL(rpcif_hw_init); -static int wait_msg_xfer_end(struct rpcif *rpc) +static int wait_msg_xfer_end(struct rpcif_priv *rpc) { u32 sts; @@ -369,7 +371,7 @@ static int wait_msg_xfer_end(struct rpcif *rpc) USEC_PER_SEC); } -static u8 rpcif_bits_set(struct rpcif *rpc, u32 nbytes) +static u8 rpcif_bits_set(struct rpcif_priv *rpc, u32 nbytes) { if (rpc->bus_size == 2) nbytes /= 2; @@ -382,9 +384,11 @@ static u8 rpcif_bit_size(u8 buswidth) return buswidth > 4 ? 2 : ilog2(buswidth); } -void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, +void rpcif_prepare(struct rpcif *rpcif, const struct rpcif_op *op, u64 *offs, size_t *len) { + struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev); + rpc->smcr = 0; rpc->smadr = 0; rpc->enable = 0; @@ -468,8 +472,9 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, } EXPORT_SYMBOL(rpcif_prepare); -int rpcif_manual_xfer(struct rpcif *rpc) +int rpcif_manual_xfer(struct rpcif *rpcif) { + struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev); u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4; int ret = 0; @@ -589,7 +594,7 @@ exit: err_out: if (reset_control_reset(rpc->rstc)) dev_err(rpc->dev, "Failed to reset HW\n"); - rpcif_hw_init(rpc, rpc->bus_size == 2); + rpcif_hw_init(rpcif, rpc->bus_size == 2); goto exit; } EXPORT_SYMBOL(rpcif_manual_xfer); @@ -636,8 +641,9 @@ static void memcpy_fromio_readw(void *to, } } -ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf) +ssize_t rpcif_dirmap_read(struct rpcif *rpcif, u64 offs, size_t len, void *buf) { + struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev); loff_t from = offs & (rpc->size - 1); size_t size = rpc->size - from; @@ -670,8 +676,11 @@ EXPORT_SYMBOL(rpcif_dirmap_read); static int rpcif_probe(struct platform_device *pdev) { + struct device *dev = &pdev->dev; struct platform_device *vdev; struct device_node *flash; + struct rpcif_priv *rpc; + struct resource *res; const char *name; int ret; @@ -692,11 +701,40 @@ static int rpcif_probe(struct platform_device *pdev) } of_node_put(flash); + rpc = devm_kzalloc(&pdev->dev, sizeof(*rpc), GFP_KERNEL); + if (!rpc) + return -ENOMEM; + + rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs"); + if (IS_ERR(rpc->base)) + return PTR_ERR(rpc->base); + + rpc->regmap = devm_regmap_init(dev, NULL, rpc, &rpcif_regmap_config); + if (IS_ERR(rpc->regmap)) { + dev_err(dev, "failed to init regmap for rpcif, error %ld\n", + PTR_ERR(rpc->regmap)); + return PTR_ERR(rpc->regmap); + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap"); + rpc->dirmap = devm_ioremap_resource(dev, res); + if (IS_ERR(rpc->dirmap)) + return PTR_ERR(rpc->dirmap); + rpc->size = resource_size(res); + + rpc->type = (uintptr_t)of_device_get_match_data(dev); + rpc->rstc = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(rpc->rstc)) + return PTR_ERR(rpc->rstc); + vdev = platform_device_alloc(name, pdev->id); if (!vdev) return -ENOMEM; vdev->dev.parent = &pdev->dev; - platform_set_drvdata(pdev, vdev); + + rpc->dev = &pdev->dev; + rpc->vdev = vdev; + platform_set_drvdata(pdev, rpc); ret = platform_device_add(vdev); if (ret) { @@ -709,9 +747,9 @@ static int rpcif_probe(struct platform_device *pdev) static int rpcif_remove(struct platform_device *pdev) { - struct platform_device *vdev = platform_get_drvdata(pdev); + struct rpcif_priv *rpc = platform_get_drvdata(pdev); - platform_device_unregister(vdev); + platform_device_unregister(rpc->vdev); return 0; } diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index cbf1dd90b70d..b1c53e040771 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -45,7 +45,7 @@ int arizona_clk32k_enable(struct arizona *arizona) if (arizona->clk32k_ref == 1) { switch (arizona->pdata.clk32k_src) { case ARIZONA_32KZ_MCLK1: - ret = pm_runtime_get_sync(arizona->dev); + ret = pm_runtime_resume_and_get(arizona->dev); if (ret != 0) goto err_ref; ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]); diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 71fbf0bc8453..5eeb4d04c096 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c @@ -151,7 +151,7 @@ static int mei_fwver(struct mei_cl_device *cldev) ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, MEI_CL_IO_TX_BLOCKING); if (ret < 0) { - dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n"); + dev_err(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret); return ret; } @@ -163,7 +163,7 @@ static int mei_fwver(struct mei_cl_device *cldev) * Should be at least one version block, * error out if nothing found */ - dev_err(&cldev->dev, "Could not read FW version\n"); + dev_err(&cldev->dev, "Could not read FW version ret = %d\n", bytes_recv); return -EIO; } @@ -376,7 +376,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0, MEI_CL_IO_TX_BLOCKING); if (ret < 0) { - dev_err(bus->dev, "Could not send IF version cmd\n"); + dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret); return ret; } @@ -391,7 +391,7 @@ static int mei_nfc_if_version(struct mei_cl *cl, bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag, 0, 0); if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) { - dev_err(bus->dev, "Could not read IF version\n"); + dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv); ret = -EIO; goto err; } diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 61a2be712bf7..9ce9b9e0e9b6 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1709,7 +1709,7 @@ static void __init vmballoon_debugfs_init(struct vmballoon *b) static void __exit vmballoon_debugfs_exit(struct vmballoon *b) { static_key_disable(&balloon_stat_enabled.key); - debugfs_remove(debugfs_lookup("vmmemctl", NULL)); + debugfs_lookup_and_remove("vmmemctl", NULL); kfree(b->stats); b->stats = NULL; } diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index a901f8edfa41..7f65af169751 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -468,6 +468,7 @@ static int uif_init(struct ubi_device *ubi) err = ubi_add_volume(ubi, ubi->volumes[i]); if (err) { ubi_err(ubi, "cannot add volume %d", i); + ubi->volumes[i] = NULL; goto out_volumes; } } @@ -663,6 +664,12 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024) ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); + if (ubi->vid_hdr_offset && ((ubi->vid_hdr_offset + UBI_VID_HDR_SIZE) > + ubi->vid_hdr_alsize)) { + ubi_err(ubi, "VID header offset %d too large.", ubi->vid_hdr_offset); + return -EINVAL; + } + dbg_gen("min_io_size %d", ubi->min_io_size); dbg_gen("max_write_size %d", ubi->max_write_size); dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 0ee452275578..863f571f1adb 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -146,13 +146,15 @@ void ubi_refill_pools(struct ubi_device *ubi) if (ubi->fm_anchor) { wl_tree_add(ubi->fm_anchor, &ubi->free); ubi->free_count++; + ubi->fm_anchor = NULL; } - /* - * All available PEBs are in ubi->free, now is the time to get - * the best anchor PEBs. - */ - ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); + if (!ubi->fm_disabled) + /* + * All available PEBs are in ubi->free, now is the time to get + * the best anchor PEBs. + */ + ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); for (;;) { enough = 0; diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 8fcc0bdf0635..2c867d16f89f 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -464,7 +464,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) for (i = 0; i < -pebs; i++) { err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); if (err) - goto out_acc; + goto out_free; } spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs += pebs; @@ -512,8 +512,10 @@ out_acc: ubi->avail_pebs += pebs; spin_unlock(&ubi->volumes_lock); } + return err; + out_free: - kfree(new_eba_tbl); + ubi_eba_destroy_table(new_eba_tbl); return err; } @@ -580,6 +582,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) if (err) { ubi_err(ubi, "cannot add character device for volume %d, error %d", vol_id, err); + vol_release(&vol->dev); return err; } @@ -590,15 +593,14 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) vol->dev.groups = volume_dev_groups; dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); err = device_register(&vol->dev); - if (err) - goto out_cdev; + if (err) { + cdev_del(&vol->cdev); + put_device(&vol->dev); + return err; + } self_check_volumes(ubi); return err; - -out_cdev: - cdev_del(&vol->cdev); - return err; } /** diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 68eb0f21b3fe..9e14319225c9 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -890,8 +890,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = do_sync_erase(ubi, e1, vol_id, lnum, 0); if (err) { - if (e2) + if (e2) { + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e2); + spin_unlock(&ubi->wl_lock); + } goto out_ro; } @@ -973,11 +976,11 @@ out_error: spin_lock(&ubi->wl_lock); ubi->move_from = ubi->move_to = NULL; ubi->move_to_put = ubi->wl_scheduled = 0; + wl_entry_destroy(ubi, e1); + wl_entry_destroy(ubi, e2); spin_unlock(&ubi->wl_lock); ubi_free_vid_buf(vidb); - wl_entry_destroy(ubi, e1); - wl_entry_destroy(ubi, e2); out_ro: ubi_ro_mode(ubi); @@ -1130,14 +1133,18 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) /* Re-schedule the LEB for erasure */ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false); if (err1) { + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e); + spin_unlock(&ubi->wl_lock); err = err1; goto out_ro; } return err; } + spin_lock(&ubi->wl_lock); wl_entry_destroy(ubi, e); + spin_unlock(&ubi->wl_lock); if (err != -EIO) /* * If this is not %-EIO, we have no idea what to do. Scheduling @@ -1253,6 +1260,18 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, retry: spin_lock(&ubi->wl_lock); e = ubi->lookuptbl[pnum]; + if (!e) { + /* + * This wl entry has been removed for some errors by other + * process (eg. wear leveling worker), corresponding process + * (except __erase_worker, which cannot concurrent with + * ubi_wl_put_peb) will set ubi ro_mode at the same time, + * just ignore this wl entry. + */ + spin_unlock(&ubi->wl_lock); + up_read(&ubi->fm_protect); + return 0; + } if (e == ubi->move_from) { /* * User is putting the physical eraseblock which was selected to diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index a884f6f6a8c2..1e0b8bcd59e6 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -393,6 +393,24 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]); } +/* Set up switch core clock for MT7530 */ +static void mt7530_pll_setup(struct mt7530_priv *priv) +{ + /* Disable PLL */ + core_write(priv, CORE_GSWPLL_GRP1, 0); + + /* Set core clock into 500Mhz */ + core_write(priv, CORE_GSWPLL_GRP2, + RG_GSWPLL_POSDIV_500M(1) | + RG_GSWPLL_FBKDIV_500M(25)); + + /* Enable PLL */ + core_write(priv, CORE_GSWPLL_GRP1, + RG_GSWPLL_EN_PRE | + RG_GSWPLL_POSDIV_200M(2) | + RG_GSWPLL_FBKDIV_200M(32)); +} + /* Setup TX circuit including relevant PAD and driving */ static int mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) @@ -453,21 +471,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN | REG_TRGMIICK_EN); - /* Setup core clock for MT7530 */ - /* Disable PLL */ - core_write(priv, CORE_GSWPLL_GRP1, 0); - - /* Set core clock into 500Mhz */ - core_write(priv, CORE_GSWPLL_GRP2, - RG_GSWPLL_POSDIV_500M(1) | - RG_GSWPLL_FBKDIV_500M(25)); - - /* Enable PLL */ - core_write(priv, CORE_GSWPLL_GRP1, - RG_GSWPLL_EN_PRE | - RG_GSWPLL_POSDIV_200M(2) | - RG_GSWPLL_FBKDIV_200M(32)); - /* Setup the MT7530 TRGMII Tx Clock */ core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); @@ -2201,6 +2204,8 @@ mt7530_setup(struct dsa_switch *ds) SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); + mt7530_pll_setup(priv); + /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */ val = mt7530_read(priv, MT7530_MHWTRAP); val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 26a35ae322d1..cc89cff029e1 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -513,7 +513,7 @@ static const char * const vsc9959_resource_names[TARGET_MAX] = { * SGMII/QSGMII MAC PCS can be found. */ static const struct resource vsc9959_imdio_res = - DEFINE_RES_MEM_NAMED(0x8030, 0x8040, "imdio"); + DEFINE_RES_MEM_NAMED(0x8030, 0x10, "imdio"); static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6), diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 7af33b2c685d..c2863d6d870f 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -923,8 +923,8 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) rc = mscc_miim_setup(dev, &bus, "VSC9953 internal MDIO bus", ocelot->targets[GCB], - ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK]); - + ocelot->map[GCB][GCB_MIIM_MII_STATUS & REG_MASK], + true); if (rc) { dev_err(dev, "failed to setup MDIO bus\n"); return rc; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 3038386a5afd..1761df8fb7f9 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -890,13 +890,13 @@ static void bgmac_chip_reset_idm_config(struct bgmac *bgmac) if (iost & BGMAC_BCMA_IOST_ATTACHED) { flags = BGMAC_BCMA_IOCTL_SW_CLKEN; - if (!bgmac->has_robosw) + if (bgmac->in_init || !bgmac->has_robosw) flags |= BGMAC_BCMA_IOCTL_SW_RESET; } bgmac_clk_enable(bgmac, flags); } - if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) + if (iost & BGMAC_BCMA_IOST_ATTACHED && (bgmac->in_init || !bgmac->has_robosw)) bgmac_idm_write(bgmac, BCMA_IOCTL, bgmac_idm_read(bgmac, BCMA_IOCTL) & ~BGMAC_BCMA_IOCTL_SW_RESET); @@ -1490,6 +1490,8 @@ int bgmac_enet_probe(struct bgmac *bgmac) struct net_device *net_dev = bgmac->net_dev; int err; + bgmac->in_init = true; + bgmac_chip_intrs_off(bgmac); net_dev->irq = bgmac->irq; @@ -1542,6 +1544,8 @@ int bgmac_enet_probe(struct bgmac *bgmac) /* Omit FCS from max MTU size */ net_dev->max_mtu = BGMAC_RX_MAX_FRAME_SIZE - ETH_FCS_LEN; + bgmac->in_init = false; + err = register_netdev(bgmac->net_dev); if (err) { dev_err(bgmac->dev, "Cannot register net device\n"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index e05ac92c0650..d73ef262991d 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -472,6 +472,8 @@ struct bgmac { int irq; u32 int_mask; + bool in_init; + /* Current MAC state */ int mac_speed; int mac_duplex; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index cecda545372f..251b102d2792 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3143,7 +3143,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) static void bnxt_free_tpa_info(struct bnxt *bp) { - int i; + int i, j; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; @@ -3151,8 +3151,10 @@ static void bnxt_free_tpa_info(struct bnxt *bp) kfree(rxr->rx_tpa_idx_map); rxr->rx_tpa_idx_map = NULL; if (rxr->rx_tpa) { - kfree(rxr->rx_tpa[0].agg_arr); - rxr->rx_tpa[0].agg_arr = NULL; + for (j = 0; j < bp->max_tpa; j++) { + kfree(rxr->rx_tpa[j].agg_arr); + rxr->rx_tpa[j].agg_arr = NULL; + } } kfree(rxr->rx_tpa); rxr->rx_tpa = NULL; @@ -3161,14 +3163,13 @@ static void bnxt_free_tpa_info(struct bnxt *bp) static int bnxt_alloc_tpa_info(struct bnxt *bp) { - int i, j, total_aggs = 0; + int i, j; bp->max_tpa = MAX_TPA; if (bp->flags & BNXT_FLAG_CHIP_P5) { if (!bp->max_tpa_v2) return 0; bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5); - total_aggs = bp->max_tpa * MAX_SKB_FRAGS; } for (i = 0; i < bp->rx_nr_rings; i++) { @@ -3182,12 +3183,12 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_CHIP_P5)) continue; - agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL); - rxr->rx_tpa[0].agg_arr = agg; - if (!agg) - return -ENOMEM; - for (j = 1; j < bp->max_tpa; j++) - rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS; + for (j = 0; j < bp->max_tpa; j++) { + agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL); + if (!agg) + return -ENOMEM; + rxr->rx_tpa[j].agg_arr = agg; + } rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map), GFP_KERNEL); if (!rxr->rx_tpa_idx_map) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 0b146a0d4205..6375372f8729 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -1372,7 +1372,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) tlv->ouisubtype = htonl(ouisubtype); buf[0] = dcbcfg->pfc.pfccap & 0xF; - buf[1] = dcbcfg->pfc.pfcena & 0xF; + buf[1] = dcbcfg->pfc.pfcena; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index e1f6373a3a2c..02eb78df2378 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -4145,6 +4145,8 @@ ice_get_module_eeprom(struct net_device *netdev, * SFP modules only ever use page 0. */ if (page == 0 || !(data[0x2] & 0x4)) { + u32 copy_len; + /* If i2c bus is busy due to slow page change or * link management access, call can fail. This is normal. * So we retry this a few times. @@ -4168,8 +4170,8 @@ ice_get_module_eeprom(struct net_device *netdev, } /* Make sure we have enough room for the new block */ - if ((i + SFF_READ_BLOCK_SIZE) < ee->len) - memcpy(data + i, value, SFF_READ_BLOCK_SIZE); + copy_len = min_t(u32, SFF_READ_BLOCK_SIZE, ee->len - i); + memcpy(data + i, value, copy_len); } } return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index f68c555be4e9..71cb15fcf63b 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -1322,8 +1322,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, if (match.mask->vlan_priority) { fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO; headers->vlan_hdr.vlan_prio = - cpu_to_be16((match.key->vlan_priority << - VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); + be16_encode_bits(match.key->vlan_priority, + VLAN_PRIO_MASK); } if (match.mask->vlan_tpid) @@ -1356,8 +1356,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, if (match.mask->vlan_priority) { fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO; headers->cvlan_hdr.vlan_prio = - cpu_to_be16((match.key->vlan_priority << - VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); + be16_encode_bits(match.key->vlan_priority, + VLAN_PRIO_MASK); } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 76474385a602..b07c6f51b461 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -859,6 +859,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot); int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc); +#define NDC_AF_BANK_MASK GENMASK_ULL(7, 0) +#define NDC_AF_BANK_LINE_MASK GENMASK_ULL(31, 16) + /* CN10K RVU */ int rvu_set_channels_base(struct rvu *rvu); void rvu_program_channels(struct rvu *rvu); @@ -874,6 +877,8 @@ static inline void rvu_dbg_init(struct rvu *rvu) {} static inline void rvu_dbg_exit(struct rvu *rvu) {} #endif +int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr); + /* RVU Switch */ void rvu_switch_enable(struct rvu *rvu); void rvu_switch_disable(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index f66dde2b0f92..abef0fd4259a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -198,9 +198,6 @@ enum cpt_eng_type { CPT_IE_TYPE = 3, }; -#define NDC_MAX_BANK(rvu, blk_addr) (rvu_read64(rvu, \ - blk_addr, NDC_AF_CONST) & 0xFF) - #define rvu_dbg_NULL NULL #define rvu_dbg_open_NULL NULL @@ -1448,6 +1445,7 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) struct nix_hw *nix_hw; struct rvu *rvu; int bank, max_bank; + u64 ndc_af_const; if (blk_addr == BLKADDR_NDC_NPA0) { rvu = s->private; @@ -1456,7 +1454,8 @@ static int ndc_blk_hits_miss_stats(struct seq_file *s, int idx, int blk_addr) rvu = nix_hw->rvu; } - max_bank = NDC_MAX_BANK(rvu, blk_addr); + ndc_af_const = rvu_read64(rvu, blk_addr, NDC_AF_CONST); + max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const); for (bank = 0; bank < max_bank; bank++) { seq_printf(s, "BANK:%d\n", bank); seq_printf(s, "\tHits:\t%lld\n", diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index a62c1b322012..84f2ba53b8b6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -790,6 +790,7 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, struct nix_aq_res_s *result; int timeout = 1000; u64 reg, head; + int ret; result = (struct nix_aq_res_s *)aq->res->base; @@ -813,9 +814,22 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, return -EBUSY; } - if (result->compcode != NIX_AQ_COMP_GOOD) + if (result->compcode != NIX_AQ_COMP_GOOD) { /* TODO: Replace this with some error code */ + if (result->compcode == NIX_AQ_COMP_CTX_FAULT || + result->compcode == NIX_AQ_COMP_LOCKERR || + result->compcode == NIX_AQ_COMP_CTX_POISON) { + ret = rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_RX); + ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX0_TX); + ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_RX); + ret |= rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NIX1_TX); + if (ret) + dev_err(rvu->dev, + "%s: Not able to unlock cachelines\n", __func__); + } + return -EBUSY; + } return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 70bd036ed76e..4f5ca5ab13a4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -4,7 +4,7 @@ * Copyright (C) 2018 Marvell. * */ - +#include #include #include @@ -42,9 +42,18 @@ static int npa_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block, return -EBUSY; } - if (result->compcode != NPA_AQ_COMP_GOOD) + if (result->compcode != NPA_AQ_COMP_GOOD) { /* TODO: Replace this with some error code */ + if (result->compcode == NPA_AQ_COMP_CTX_FAULT || + result->compcode == NPA_AQ_COMP_LOCKERR || + result->compcode == NPA_AQ_COMP_CTX_POISON) { + if (rvu_ndc_fix_locked_cacheline(rvu, BLKADDR_NDC_NPA0)) + dev_err(rvu->dev, + "%s: Not able to unlock cachelines\n", __func__); + } + return -EBUSY; + } return 0; } @@ -545,3 +554,48 @@ void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf) npa_ctx_free(rvu, pfvf); } + +/* Due to an Hardware errata, in some corner cases, AQ context lock + * operations can result in a NDC way getting into an illegal state + * of not valid but locked. + * + * This API solves the problem by clearing the lock bit of the NDC block. + * The operation needs to be done for each line of all the NDC banks. + */ +int rvu_ndc_fix_locked_cacheline(struct rvu *rvu, int blkaddr) +{ + int bank, max_bank, line, max_line, err; + u64 reg, ndc_af_const; + + /* Set the ENABLE bit(63) to '0' */ + reg = rvu_read64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL); + rvu_write64(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, reg & GENMASK_ULL(62, 0)); + + /* Poll until the BUSY bits(47:32) are set to '0' */ + err = rvu_poll_reg(rvu, blkaddr, NDC_AF_CAMS_RD_INTERVAL, GENMASK_ULL(47, 32), true); + if (err) { + dev_err(rvu->dev, "Timed out while polling for NDC CAM busy bits.\n"); + return err; + } + + ndc_af_const = rvu_read64(rvu, blkaddr, NDC_AF_CONST); + max_bank = FIELD_GET(NDC_AF_BANK_MASK, ndc_af_const); + max_line = FIELD_GET(NDC_AF_BANK_LINE_MASK, ndc_af_const); + for (bank = 0; bank < max_bank; bank++) { + for (line = 0; line < max_line; line++) { + /* Check if 'cache line valid bit(63)' is not set + * but 'cache line lock bit(60)' is set and on + * success, reset the lock bit(60). + */ + reg = rvu_read64(rvu, blkaddr, + NDC_AF_BANKX_LINEX_METADATA(bank, line)); + if (!(reg & BIT_ULL(63)) && (reg & BIT_ULL(60))) { + rvu_write64(rvu, blkaddr, + NDC_AF_BANKX_LINEX_METADATA(bank, line), + reg & ~BIT_ULL(60)); + } + } + } + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 0e0d536645ac..39f7a7cb2755 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -690,6 +690,7 @@ #define NDC_AF_INTR_ENA_W1S (0x00068) #define NDC_AF_INTR_ENA_W1C (0x00070) #define NDC_AF_ACTIVE_PC (0x00078) +#define NDC_AF_CAMS_RD_INTERVAL (0x00080) #define NDC_AF_BP_TEST_ENABLE (0x001F8) #define NDC_AF_BP_TEST(a) (0x00200 | (a) << 3) #define NDC_AF_BLK_RST (0x002F0) @@ -705,6 +706,8 @@ (0x00F00 | (a) << 5 | (b) << 4) #define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3) #define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3) +#define NDC_AF_BANKX_LINEX_METADATA(a, b) \ + (0x10000 | (a) << 12 | (b) << 3) /* LBK */ #define LBK_CONST (0x10ull) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 709fc0114fbd..0f7345a96965 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -765,7 +765,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp, /* NPC profile doesn't extract AH/ESP header fields */ if ((ah_esp_mask->spi & ah_esp_hdr->spi) || - (ah_esp_mask->tclass & ah_esp_mask->tclass)) + (ah_esp_mask->tclass & ah_esp_hdr->tclass)) return -EOPNOTSUPP; if (flow_type == AH_V6_FLOW) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index ef10aef3cda0..7045fedfd73a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "otx2_reg.h" #include "otx2_common.h" @@ -699,7 +700,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, int alg, u64 iova, int ptp_offset, - u64 base_ns, int udp_csum) + u64 base_ns, bool udp_csum_crt) { struct nix_sqe_mem_s *mem; @@ -711,7 +712,7 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, if (ptp_offset) { mem->start_offset = ptp_offset; - mem->udp_csum_crt = udp_csum; + mem->udp_csum_crt = !!udp_csum_crt; mem->base_ns = base_ns; mem->step_type = 1; } @@ -986,10 +987,11 @@ static bool otx2_validate_network_transport(struct sk_buff *skb) return false; } -static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum) +static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, bool *udp_csum_crt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); u16 nix_offload_hlen = 0, inner_vhlen = 0; + bool udp_hdr_present = false, is_sync; u8 *data = skb->data, *msgtype; __be16 proto = eth->h_proto; int network_depth = 0; @@ -1029,45 +1031,81 @@ static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum) if (!otx2_validate_network_transport(skb)) return false; - *udp_csum = 1; *offset = nix_offload_hlen + skb_transport_offset(skb) + sizeof(struct udphdr); + udp_hdr_present = true; + } msgtype = data + *offset; - /* Check PTP messageId is SYNC or not */ - return (*msgtype & 0xf) == 0; + is_sync = !(*msgtype & 0xf); + if (is_sync) + *udp_csum_crt = udp_hdr_present; + else + *offset = 0; + + return is_sync; } static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb, struct otx2_snd_queue *sq, int *offset) { + struct ethhdr *eth = (struct ethhdr *)(skb->data); struct ptpv2_tstamp *origin_tstamp; - int ptp_offset = 0, udp_csum = 0; + bool udp_csum_crt = false; + unsigned int udphoff; struct timespec64 ts; + int ptp_offset = 0; + __wsum skb_csum; u64 iova; if (unlikely(!skb_shinfo(skb)->gso_size && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) { - if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) { - if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) { - origin_tstamp = (struct ptpv2_tstamp *) - ((u8 *)skb->data + ptp_offset + - PTP_SYNC_SEC_OFFSET); - ts = ns_to_timespec64(pfvf->ptp->tstamp); - origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff); - origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff); - origin_tstamp->nanoseconds = htonl(ts.tv_nsec); - /* Point to correction field in PTP packet */ - ptp_offset += 8; + if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC && + otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum_crt))) { + origin_tstamp = (struct ptpv2_tstamp *) + ((u8 *)skb->data + ptp_offset + + PTP_SYNC_SEC_OFFSET); + ts = ns_to_timespec64(pfvf->ptp->tstamp); + origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff); + origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff); + origin_tstamp->nanoseconds = htonl(ts.tv_nsec); + /* Point to correction field in PTP packet */ + ptp_offset += 8; + + /* When user disables hw checksum, stack calculates the csum, + * but it does not cover ptp timestamp which is added later. + * Recalculate the checksum manually considering the timestamp. + */ + if (udp_csum_crt) { + struct udphdr *uh = udp_hdr(skb); + + if (skb->ip_summed != CHECKSUM_PARTIAL && uh->check != 0) { + udphoff = skb_transport_offset(skb); + uh->check = 0; + skb_csum = skb_checksum(skb, udphoff, skb->len - udphoff, + 0); + if (ntohs(eth->h_proto) == ETH_P_IPV6) + uh->check = csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + skb->len - udphoff, + ipv6_hdr(skb)->nexthdr, + skb_csum); + else + uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + skb->len - udphoff, + IPPROTO_UDP, + skb_csum); + } } } else { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } iova = sq->timestamps->iova + (sq->head * sizeof(u64)); otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova, - ptp_offset, pfvf->ptp->base_ns, udp_csum); + ptp_offset, pfvf->ptp->base_ns, udp_csum_crt); } else { skb_tx_timestamp(skb); } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 53ee9dea6638..49975924e242 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -561,7 +561,8 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); mcr_new = mcr_cur; mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | - MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK | + MAC_MCR_RX_FIFO_CLR_DIS; /* Only update control register when needed! */ if (mcr_new != mcr_cur) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 306fdc2c608a..dafa9a0baa58 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -357,6 +357,7 @@ #define MAC_MCR_FORCE_MODE BIT(15) #define MAC_MCR_TX_EN BIT(14) #define MAC_MCR_RX_EN BIT(13) +#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12) #define MAC_MCR_BACKOFF_EN BIT(9) #define MAC_MCR_BACKPR_EN BIT(8) #define MAC_MCR_FORCE_RX_FC BIT(5) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c index cdc87ecae5d3..d000236ddbac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c @@ -90,4 +90,8 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev) err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]); if (err) mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err); + + err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]); + if (err) + mlx5_core_warn(dev, "Timeout reclaiming external host VFs pages err(%d)\n", err); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 8469e9c38670..ae75e230170b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -86,7 +86,19 @@ static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id)); } -static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id) +static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id) +{ + u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc); + u16 skb_pc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc); + + if (PTP_WQE_CTR2IDX(skb_id - skb_cc) >= PTP_WQE_CTR2IDX(skb_pc - skb_cc)) + return true; + + return false; +} + +static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, + u16 skb_id, int budget) { struct skb_shared_hwtstamps hwts = {}; struct sk_buff *skb; @@ -98,6 +110,7 @@ static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp; skb_tstamp_tx(skb, &hwts); ptpsq->cq_stats->resync_cqe++; + napi_consume_skb(skb, budget); skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc); } } @@ -118,8 +131,14 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, goto out; } - if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) - mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id); + if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) { + if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) { + /* already handled by a previous resync */ + ptpsq->cq_stats->ooo_cqe_drop++; + return; + } + mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id, budget); + } skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 853f312cd757..1b3a65325ece 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -81,7 +81,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); static inline bool mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo) { - return (*fifo->pc - *fifo->cc) < fifo->mask; + return (u16)(*fifo->pc - *fifo->cc) < fifo->mask; } static inline bool @@ -297,6 +297,8 @@ void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb) static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo) { + WARN_ON_ONCE(*fifo->pc == *fifo->cc); + return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 03c1841970f1..f7f54550a8bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -2121,6 +2121,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = { { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) }, + { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) }, }; static const struct counter_desc ptp_rq_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 9f781085be47..52a67efafcd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -459,6 +459,7 @@ struct mlx5e_ptp_cq_stats { u64 abort_abs_diff_ns; u64 resync_cqe; u64 resync_event; + u64 ooo_cqe_drop; }; struct mlx5e_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index b4e263e8cfb8..235f6f0a7052 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1043,7 +1043,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - if (rep->vport == MLX5_VPORT_UPLINK) + if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) && + rep->vport == MLX5_VPORT_UPLINK) spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c index 23361a9ae4fa..6dc83e871cd7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/geneve.c @@ -105,6 +105,7 @@ int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *op geneve->opt_type = opt->type; geneve->obj_id = res; geneve->refcount++; + res = 0; } unlock: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 3008e9ce2bbf..20d7662c10fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -147,6 +147,10 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf); + /* For ECPFs, skip waiting for host VF pages until ECPF is destroyed */ + if (mlx5_core_is_ecpf(dev)) + return; + if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF])) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c index a9aec900d608..7d66fe75cd3b 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_police.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_police.c @@ -194,7 +194,7 @@ int lan966x_police_port_del(struct lan966x_port *port, return -EINVAL; } - err = lan966x_police_del(port, port->tc.police_id); + err = lan966x_police_del(port, POL_IDX_PORT + port->chip_port); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to add policer to port"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 84e1740b12f1..3c1d4b27668f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1168,6 +1168,7 @@ static int stmmac_init_phy(struct net_device *dev) phylink_ethtool_get_wol(priv->phylink, &wol); device_set_wakeup_capable(priv->device, !!wol.supported); + device_set_wakeup_enable(priv->device, !!wol.wolopts); } return ret; diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 1c16548415cd..b0c7ab74a82e 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2894,8 +2894,10 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, goto err_out_clear_quattro; } - hpreg_res = devm_request_region(&pdev->dev, pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0), DRV_NAME); + hpreg_res = devm_request_mem_region(&pdev->dev, + pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0), + DRV_NAME); if (!hpreg_res) { err = -EBUSY; dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index 51f68daac152..34b87389788b 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -52,6 +52,7 @@ struct mscc_miim_info { struct mscc_miim_dev { struct regmap *regs; int mii_status_offset; + bool ignore_read_errors; struct regmap *phy_regs; const struct mscc_miim_info *info; struct clk *clk; @@ -138,7 +139,7 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) goto out; } - if (val & MSCC_MIIM_DATA_ERROR) { + if (!miim->ignore_read_errors && !!(val & MSCC_MIIM_DATA_ERROR)) { ret = -EIO; goto out; } @@ -218,7 +219,8 @@ static const struct regmap_config mscc_miim_phy_regmap_config = { }; int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name, - struct regmap *mii_regmap, int status_offset) + struct regmap *mii_regmap, int status_offset, + bool ignore_read_errors) { struct mscc_miim_dev *miim; struct mii_bus *bus; @@ -240,6 +242,7 @@ int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name, miim->regs = mii_regmap; miim->mii_status_offset = status_offset; + miim->ignore_read_errors = ignore_read_errors; *pbus = bus; @@ -291,7 +294,7 @@ static int mscc_miim_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(phy_regmap), "Unable to create phy register regmap\n"); - ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0); + ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0, false); if (ret < 0) { dev_err(dev, "Unable to setup the MDIO bus\n"); return ret; diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index ccecee2524ce..0b88635f4fbc 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -342,6 +342,37 @@ static int lan88xx_config_aneg(struct phy_device *phydev) return genphy_config_aneg(phydev); } +static void lan88xx_link_change_notify(struct phy_device *phydev) +{ + int temp; + + /* At forced 100 F/H mode, chip may fail to set mode correctly + * when cable is switched between long(~50+m) and short one. + * As workaround, set to 10 before setting to 100 + * at forced 100 F/H mode. + */ + if (!phydev->autoneg && phydev->speed == 100) { + /* disable phy interrupt */ + temp = phy_read(phydev, LAN88XX_INT_MASK); + temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; + phy_write(phydev, LAN88XX_INT_MASK, temp); + + temp = phy_read(phydev, MII_BMCR); + temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000); + phy_write(phydev, MII_BMCR, temp); /* set to 10 first */ + temp |= BMCR_SPEED100; + phy_write(phydev, MII_BMCR, temp); /* set to 100 later */ + + /* clear pending interrupt generated while workaround */ + temp = phy_read(phydev, LAN88XX_INT_STS); + + /* enable phy interrupt back */ + temp = phy_read(phydev, LAN88XX_INT_MASK); + temp |= LAN88XX_INT_MASK_MDINTPIN_EN_; + phy_write(phydev, LAN88XX_INT_MASK, temp); + } +} + static struct phy_driver microchip_phy_driver[] = { { .phy_id = 0x0007c132, @@ -359,6 +390,7 @@ static struct phy_driver microchip_phy_driver[] = { .config_init = lan88xx_config_init, .config_aneg = lan88xx_config_aneg, + .link_change_notify = lan88xx_link_change_notify, .config_intr = lan88xx_phy_config_intr, .handle_interrupt = lan88xx_handle_interrupt, diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8cff61dbc4b5..7fbb0904b3c0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3041,8 +3041,6 @@ static int phy_probe(struct device *dev) if (phydrv->flags & PHY_IS_INTERNAL) phydev->is_internal = true; - mutex_lock(&phydev->lock); - /* Deassert the reset signal */ phy_device_reset(phydev, 0); @@ -3110,12 +3108,10 @@ static int phy_probe(struct device *dev) phydev->state = PHY_READY; out: - /* Assert the reset signal */ + /* Re-assert the reset signal on error */ if (err) phy_device_reset(phydev, 1); - mutex_unlock(&phydev->lock); - return err; } @@ -3125,9 +3121,7 @@ static int phy_remove(struct device *dev) cancel_delayed_work_sync(&phydev->state_queue); - mutex_lock(&phydev->lock); phydev->state = PHY_DOWN; - mutex_unlock(&phydev->lock); sfp_bus_del_upstream(phydev->sfp_bus); phydev->sfp_bus = NULL; diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index ac7481ce2fc1..00d9eff91dcf 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -44,7 +44,6 @@ static struct smsc_hw_stat smsc_hw_stats[] = { }; struct smsc_phy_priv { - u16 intmask; bool energy_enable; }; @@ -57,7 +56,6 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) static int smsc_phy_config_intr(struct phy_device *phydev) { - struct smsc_phy_priv *priv = phydev->priv; int rc; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { @@ -65,14 +63,9 @@ static int smsc_phy_config_intr(struct phy_device *phydev) if (rc) return rc; - priv->intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6; - if (priv->energy_enable) - priv->intmask |= MII_LAN83C185_ISF_INT7; - - rc = phy_write(phydev, MII_LAN83C185_IM, priv->intmask); + rc = phy_write(phydev, MII_LAN83C185_IM, + MII_LAN83C185_ISF_INT_PHYLIB_EVENTS); } else { - priv->intmask = 0; - rc = phy_write(phydev, MII_LAN83C185_IM, 0); if (rc) return rc; @@ -85,7 +78,6 @@ static int smsc_phy_config_intr(struct phy_device *phydev) static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev) { - struct smsc_phy_priv *priv = phydev->priv; int irq_status; irq_status = phy_read(phydev, MII_LAN83C185_ISF); @@ -96,7 +88,7 @@ static irqreturn_t smsc_phy_handle_interrupt(struct phy_device *phydev) return IRQ_NONE; } - if (!(irq_status & priv->intmask)) + if (!(irq_status & MII_LAN83C185_ISF_INT_PHYLIB_EVENTS)) return IRQ_NONE; phy_trigger_machine(phydev); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f18ab8e220db..068488890d57 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -2115,33 +2115,8 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev) static void lan78xx_link_status_change(struct net_device *net) { struct phy_device *phydev = net->phydev; - int temp; - /* At forced 100 F/H mode, chip may fail to set mode correctly - * when cable is switched between long(~50+m) and short one. - * As workaround, set to 10 before setting to 100 - * at forced 100 F/H mode. - */ - if (!phydev->autoneg && (phydev->speed == 100)) { - /* disable phy interrupt */ - temp = phy_read(phydev, LAN88XX_INT_MASK); - temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; - phy_write(phydev, LAN88XX_INT_MASK, temp); - - temp = phy_read(phydev, MII_BMCR); - temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000); - phy_write(phydev, MII_BMCR, temp); /* set to 10 first */ - temp |= BMCR_SPEED100; - phy_write(phydev, MII_BMCR, temp); /* set to 100 later */ - - /* clear pending interrupt generated while workaround */ - temp = phy_read(phydev, LAN88XX_INT_STS); - - /* enable phy interrupt back */ - temp = phy_read(phydev, LAN88XX_INT_MASK); - temp |= LAN88XX_INT_MASK_MDINTPIN_EN_; - phy_write(phydev, LAN88XX_INT_MASK, temp); - } + phy_print_status(phydev); } static int irq_map(struct irq_domain *d, unsigned int irq, diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index 2d53e0f88d2f..1e0f2297f9c6 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -247,6 +247,9 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev, len, sizeof(**fw_vsc_cfg), GFP_KERNEL); + if (!*fw_vsc_cfg) + goto alloc_err; + r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME, *fw_vsc_cfg, len); @@ -260,6 +263,7 @@ vsc_read_err: *fw_vsc_cfg = NULL; } +alloc_err: dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s", *clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no"); } diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c index ec87dd21e054..b2f1ced8e6dd 100644 --- a/drivers/nfc/st-nci/se.c +++ b/drivers/nfc/st-nci/se.c @@ -672,6 +672,12 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx, ST_NCI_EVT_TRANSMIT_DATA, apdu, apdu_length); default: + /* Need to free cb_context here as at the moment we can't + * clearly indicate to the caller if the callback function + * would be called (and free it) or not. In both cases a + * negative value may be returned to the caller. + */ + kfree(cb_context); return -ENODEV; } } diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index df8d27cf2956..dae288bebcb5 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -236,6 +236,12 @@ int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx, ST21NFCA_EVT_TRANSMIT_DATA, apdu, apdu_length); default: + /* Need to free cb_context here as at the moment we can't + * clearly indicate to the caller if the callback function + * would be called (and free it) or not. In both cases a + * negative value may be returned to the caller. + */ + kfree(cb_context); return -ENODEV; } } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index dd35c2b7b55f..54481bd9fb76 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -38,6 +38,7 @@ struct nvme_ns_info { bool is_shared; bool is_readonly; bool is_ready; + bool is_removed; }; unsigned int admin_timeout = 60; @@ -1439,16 +1440,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, error = nvme_submit_sync_cmd(ctrl->admin_q, &c, *id, sizeof(**id)); if (error) { dev_warn(ctrl->device, "Identify namespace failed (%d)\n", error); - goto out_free_id; + kfree(*id); } - - error = NVME_SC_INVALID_NS | NVME_SC_DNR; - if ((*id)->ncap == 0) /* namespace not allocated or attached */ - goto out_free_id; - return 0; - -out_free_id: - kfree(*id); return error; } @@ -1462,6 +1455,13 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, ret = nvme_identify_ns(ctrl, info->nsid, &id); if (ret) return ret; + + if (id->ncap == 0) { + /* namespace not allocated or attached */ + info->is_removed = true; + return -ENODEV; + } + info->anagrpid = id->anagrpid; info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; @@ -4388,6 +4388,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns_info info = { .nsid = nsid }; struct nvme_ns *ns; + int ret; if (nvme_identify_ns_descs(ctrl, &info)) return; @@ -4404,19 +4405,19 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) * set up a namespace. If not fall back to the legacy version. */ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || - (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) { - if (nvme_ns_info_from_id_cs_indep(ctrl, &info)) - return; - } else { - if (nvme_ns_info_from_identify(ctrl, &info)) - return; - } + (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) + ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); + else + ret = nvme_ns_info_from_identify(ctrl, &info); + + if (info.is_removed) + nvme_ns_remove_by_nsid(ctrl, nsid); /* * Ignore the namespace if it is not ready. We will get an AEN once it * becomes ready and restart the scan. */ - if (!info.is_ready) + if (ret || !info.is_ready) return; ns = nvme_find_get_ns(ctrl, nsid); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index a6e22116e139..dcac3df8a5f7 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -189,7 +189,8 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl, static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl) { - if (!ctrl->subsys) + if (!ctrl->subsys || + !strcmp(ctrl->opts->subsysnqn, NVME_DISC_SUBSYS_NAME)) return ctrl->opts->subsysnqn; return ctrl->subsys->subnqn; } diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 1dc7c733c7e3..bb80192c16b6 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2488,6 +2488,10 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len = nvmf_get_address(ctrl, buf, size); + mutex_lock(&queue->queue_lock); + + if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) + goto done; ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); if (ret > 0) { if (len > 0) @@ -2495,6 +2499,8 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", (len) ? "," : "", &src_addr); } +done: + mutex_unlock(&queue->queue_lock); return len; } diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index f90975e00446..fc636f9ee421 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -26,7 +26,7 @@ #include "of_private.h" -#define MAX_RESERVED_REGIONS 64 +#define MAX_RESERVED_REGIONS 128 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; static int reserved_mem_count; diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 040ae076ec0e..112c8f401ac4 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -1086,6 +1086,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e, + PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110, PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400, diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index a46fec776ad7..1698205dd73c 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -976,24 +976,41 @@ bool acpi_pci_power_manageable(struct pci_dev *dev) bool acpi_pci_bridge_d3(struct pci_dev *dev) { struct pci_dev *rpdev; - struct acpi_device *adev; - acpi_status status; - unsigned long long state; + struct acpi_device *adev, *rpadev; const union acpi_object *obj; if (acpi_pci_disabled || !dev->is_hotplug_bridge) return false; - /* Assume D3 support if the bridge is power-manageable by ACPI. */ - if (acpi_pci_power_manageable(dev)) - return true; + adev = ACPI_COMPANION(&dev->dev); + if (adev) { + /* + * If the bridge has _S0W, whether or not it can go into D3 + * depends on what is returned by that object. In particular, + * if the power state returned by _S0W is D2 or shallower, + * entering D3 should not be allowed. + */ + if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2) + return false; + + /* + * Otherwise, assume that the bridge can enter D3 so long as it + * is power-manageable via ACPI. + */ + if (acpi_device_power_manageable(adev)) + return true; + } rpdev = pcie_find_root_port(dev); if (!rpdev) return false; - adev = ACPI_COMPANION(&rpdev->dev); - if (!adev) + if (rpdev == dev) + rpadev = adev; + else + rpadev = ACPI_COMPANION(&rpdev->dev); + + if (!rpadev) return false; /* @@ -1001,15 +1018,15 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev) * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug * events from low-power states including D3hot and D3cold. */ - if (!adev->wakeup.flags.valid) + if (!rpadev->wakeup.flags.valid) return false; /* - * If the Root Port cannot wake itself from D3hot or D3cold, we - * can't use D3. + * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port + * to verify whether or not it can signal wakeup from D3. */ - status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state); - if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT) + if (rpadev != adev && + acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2) return false; /* @@ -1018,7 +1035,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev) * bridges *below* that Root Port can also signal hotplug events * while in D3. */ - if (!acpi_dev_get_property(adev, "HotPlugSupportInD3", + if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3", ACPI_TYPE_INTEGER, &obj) && obj->integer.value == 1) return true; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 20ac67d59034..494fa46f5767 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4835,6 +4835,26 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } +/* + * Wangxun 10G/1G NICs have no ACS capability, and on multi-function + * devices, peer-to-peer transactions are not be used between the functions. + * So add an ACS quirk for below devices to isolate functions. + * SFxxx 1G NICs(em). + * RP1000/RP2000 10G NICs(sp). + */ +static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags) +{ + switch (dev->device) { + case 0x0100 ... 0x010F: + case 0x1001: + case 0x2001: + return pci_acs_ctrl_enabled(acs_flags, + PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); + } + + return false; +} + static const struct pci_dev_acs_enabled { u16 vendor; u16 device; @@ -4980,6 +5000,8 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs }, /* Zhaoxin Root/Downstream Ports */ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, + /* Wangxun nics */ + { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs }, { 0 } }; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index b4096598dbcb..c690572b10ce 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -1765,12 +1765,70 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res, add_size = size - new_size; pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res, &add_size); + } else { + return; } res->end = res->start + new_size - 1; - remove_from_list(add_list, res); + + /* If the resource is part of the add_list, remove it now */ + if (add_list) + remove_from_list(add_list, res); } +static void remove_dev_resource(struct resource *avail, struct pci_dev *dev, + struct resource *res) +{ + resource_size_t size, align, tmp; + + size = resource_size(res); + if (!size) + return; + + align = pci_resource_alignment(dev, res); + align = align ? ALIGN(avail->start, align) - avail->start : 0; + tmp = align + size; + avail->start = min(avail->start + tmp, avail->end + 1); +} + +static void remove_dev_resources(struct pci_dev *dev, struct resource *io, + struct resource *mmio, + struct resource *mmio_pref) +{ + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *res = &dev->resource[i]; + + if (resource_type(res) == IORESOURCE_IO) { + remove_dev_resource(io, dev, res); + } else if (resource_type(res) == IORESOURCE_MEM) { + + /* + * Make sure prefetchable memory is reduced from + * the correct resource. Specifically we put 32-bit + * prefetchable memory in non-prefetchable window + * if there is an 64-bit pretchable window. + * + * See comments in __pci_bus_size_bridges() for + * more information. + */ + if ((res->flags & IORESOURCE_PREFETCH) && + ((res->flags & IORESOURCE_MEM_64) == + (mmio_pref->flags & IORESOURCE_MEM_64))) + remove_dev_resource(mmio_pref, dev, res); + else + remove_dev_resource(mmio, dev, res); + } + } +} + +/* + * io, mmio and mmio_pref contain the total amount of bridge window space + * available. This includes the minimal space needed to cover all the + * existing devices on the bus and the possible extra space that can be + * shared with the bridges. + */ static void pci_bus_distribute_available_resources(struct pci_bus *bus, struct list_head *add_list, struct resource io, @@ -1780,7 +1838,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, unsigned int normal_bridges = 0, hotplug_bridges = 0; struct resource *io_res, *mmio_res, *mmio_pref_res; struct pci_dev *dev, *bridge = bus->self; - resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align; + resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align; io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW]; mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW]; @@ -1824,94 +1882,88 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, normal_bridges++; } - /* - * There is only one bridge on the bus so it gets all available - * resources which it can then distribute to the possible hotplug - * bridges below. - */ - if (hotplug_bridges + normal_bridges == 1) { - dev = list_first_entry(&bus->devices, struct pci_dev, bus_list); - if (dev->subordinate) - pci_bus_distribute_available_resources(dev->subordinate, - add_list, io, mmio, mmio_pref); + if (!(hotplug_bridges + normal_bridges)) return; + + /* + * Calculate the amount of space we can forward from "bus" to any + * downstream buses, i.e., the space left over after assigning the + * BARs and windows on "bus". + */ + list_for_each_entry(dev, &bus->devices, bus_list) { + if (!dev->is_virtfn) + remove_dev_resources(dev, &io, &mmio, &mmio_pref); } - if (hotplug_bridges == 0) - return; - /* - * Calculate the total amount of extra resource space we can - * pass to bridges below this one. This is basically the - * extra space reduced by the minimal required space for the - * non-hotplug bridges. + * If there is at least one hotplug bridge on this bus it gets all + * the extra resource space that was left after the reductions + * above. + * + * If there are no hotplug bridges the extra resource space is + * split between non-hotplug bridges. This is to allow possible + * hotplug bridges below them to get the extra space as well. */ + if (hotplug_bridges) { + io_per_b = div64_ul(resource_size(&io), hotplug_bridges); + mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges); + mmio_pref_per_b = div64_ul(resource_size(&mmio_pref), + hotplug_bridges); + } else { + io_per_b = div64_ul(resource_size(&io), normal_bridges); + mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges); + mmio_pref_per_b = div64_ul(resource_size(&mmio_pref), + normal_bridges); + } + for_each_pci_bridge(dev, bus) { - resource_size_t used_size; struct resource *res; - - if (dev->is_hotplug_bridge) - continue; - - /* - * Reduce the available resource space by what the - * bridge and devices below it occupy. - */ - res = &dev->resource[PCI_BRIDGE_IO_WINDOW]; - align = pci_resource_alignment(dev, res); - align = align ? ALIGN(io.start, align) - io.start : 0; - used_size = align + resource_size(res); - if (!res->parent) - io.start = min(io.start + used_size, io.end + 1); - - res = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; - align = pci_resource_alignment(dev, res); - align = align ? ALIGN(mmio.start, align) - mmio.start : 0; - used_size = align + resource_size(res); - if (!res->parent) - mmio.start = min(mmio.start + used_size, mmio.end + 1); - - res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; - align = pci_resource_alignment(dev, res); - align = align ? ALIGN(mmio_pref.start, align) - - mmio_pref.start : 0; - used_size = align + resource_size(res); - if (!res->parent) - mmio_pref.start = min(mmio_pref.start + used_size, - mmio_pref.end + 1); - } - - io_per_hp = div64_ul(resource_size(&io), hotplug_bridges); - mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges); - mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref), - hotplug_bridges); - - /* - * Go over devices on this bus and distribute the remaining - * resource space between hotplug bridges. - */ - for_each_pci_bridge(dev, bus) { struct pci_bus *b; b = dev->subordinate; - if (!b || !dev->is_hotplug_bridge) + if (!b) + continue; + if (hotplug_bridges && !dev->is_hotplug_bridge) continue; + res = &dev->resource[PCI_BRIDGE_IO_WINDOW]; + /* - * Distribute available extra resources equally between - * hotplug-capable downstream ports taking alignment into - * account. + * Make sure the split resource space is properly aligned + * for bridge windows (align it down to avoid going above + * what is available). */ - io.end = io.start + io_per_hp - 1; - mmio.end = mmio.start + mmio_per_hp - 1; - mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1; + align = pci_resource_alignment(dev, res); + io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1 + : io.start + io_per_b - 1; + + /* + * The x_per_b holds the extra resource space that can be + * added for each bridge but there is the minimal already + * reserved as well so adjust x.start down accordingly to + * cover the whole space. + */ + io.start -= resource_size(res); + + res = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; + align = pci_resource_alignment(dev, res); + mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1 + : mmio.start + mmio_per_b - 1; + mmio.start -= resource_size(res); + + res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; + align = pci_resource_alignment(dev, res); + mmio_pref.end = align ? mmio_pref.start + + ALIGN_DOWN(mmio_pref_per_b, align) - 1 + : mmio_pref.start + mmio_pref_per_b - 1; + mmio_pref.start -= resource_size(res); pci_bus_distribute_available_resources(b, add_list, io, mmio, mmio_pref); - io.start += io_per_hp; - mmio.start += mmio_per_hp; - mmio_pref.start += mmio_pref_per_hp; + io.start += io.end + 1; + mmio.start += mmio.end + 1; + mmio_pref.start += mmio_pref.end + 1; } } @@ -1923,6 +1975,8 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge, if (!bridge->is_hotplug_bridge) return; + pci_dbg(bridge, "distributing available resources\n"); + /* Take the initial extra resources from the hotplug port */ available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW]; available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW]; @@ -1934,6 +1988,54 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge, available_mmio_pref); } +static bool pci_bridge_resources_not_assigned(struct pci_dev *dev) +{ + const struct resource *r; + + /* + * If the child device's resources are not yet assigned it means we + * are configuring them (not the boot firmware), so we should be + * able to extend the upstream bridge resources in the same way we + * do with the normal hotplug case. + */ + r = &dev->resource[PCI_BRIDGE_IO_WINDOW]; + if (r->flags && !(r->flags & IORESOURCE_STARTALIGN)) + return false; + r = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; + if (r->flags && !(r->flags & IORESOURCE_STARTALIGN)) + return false; + r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; + if (r->flags && !(r->flags & IORESOURCE_STARTALIGN)) + return false; + + return true; +} + +static void +pci_root_bus_distribute_available_resources(struct pci_bus *bus, + struct list_head *add_list) +{ + struct pci_dev *dev, *bridge = bus->self; + + for_each_pci_bridge(dev, bus) { + struct pci_bus *b; + + b = dev->subordinate; + if (!b) + continue; + + /* + * Need to check "bridge" here too because it is NULL + * in case of root bus. + */ + if (bridge && pci_bridge_resources_not_assigned(dev)) + pci_bridge_distribute_available_resources(bridge, + add_list); + else + pci_root_bus_distribute_available_resources(b, add_list); + } +} + /* * First try will not touch PCI bridge res. * Second and later try will clear small leaf bridge res. @@ -1973,6 +2075,8 @@ again: */ __pci_bus_size_bridges(bus, add_list); + pci_root_bus_distribute_available_resources(bus, add_list); + /* Depth last, allocate resources and update the hardware. */ __pci_bus_assign_resources(bus, add_list, &fail_head); if (add_list) diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c index 6aea512e5d4e..39db8acde61a 100644 --- a/drivers/phy/rockchip/phy-rockchip-typec.c +++ b/drivers/phy/rockchip/phy-rockchip-typec.c @@ -808,9 +808,8 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy) struct extcon_dev *edev = tcphy->extcon; union extcon_property_value property; unsigned int id; - bool ufp, dp; u8 mode; - int ret; + int ret, ufp, dp; if (!edev) return MODE_DFP_USB; diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig index 09c7829e95c4..382793e73a60 100644 --- a/drivers/platform/mellanox/Kconfig +++ b/drivers/platform/mellanox/Kconfig @@ -16,17 +16,17 @@ if MELLANOX_PLATFORM config MLXREG_HOTPLUG tristate "Mellanox platform hotplug driver support" - depends on REGMAP depends on HWMON depends on I2C + select REGMAP help This driver handles hot-plug events for the power suppliers, power cables and fans on the wide range Mellanox IB and Ethernet systems. config MLXREG_IO tristate "Mellanox platform register access driver support" - depends on REGMAP depends on HWMON + select REGMAP help This driver allows access to Mellanox programmable device register space through sysfs interface. The sets of registers for sysfs access @@ -36,9 +36,9 @@ config MLXREG_IO config MLXREG_LC tristate "Mellanox line card platform driver support" - depends on REGMAP depends on HWMON depends on I2C + select REGMAP help This driver provides support for the Mellanox MSN4800-XX line cards, which are the part of MSN4800 Ethernet modular switch systems @@ -80,10 +80,9 @@ config MLXBF_PMC config NVSW_SN2201 tristate "Nvidia SN2201 platform driver support" - depends on REGMAP depends on HWMON depends on I2C - depends on REGMAP_I2C + select REGMAP_I2C help This driver provides support for the Nvidia SN2201 platform. The SN2201 is a highly integrated for one rack unit system with diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index f5312f51de19..b02a8125bc7d 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -997,7 +997,8 @@ config SERIAL_MULTI_INSTANTIATE config MLX_PLATFORM tristate "Mellanox Technologies platform support" - depends on I2C && REGMAP + depends on I2C + select REGMAP help This option enables system support for the Mellanox Technologies platform. The Mellanox systems provide data center networking diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 77918a2c6701..75f58fc468a7 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -66,7 +66,7 @@ struct ptp_vclock { struct hlist_node vclock_hash_node; struct cyclecounter cc; struct timecounter tc; - spinlock_t lock; /* protects tc/cc */ + struct mutex lock; /* protects tc/cc */ }; /* diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index 1c0ed4805c0a..dcf752c9e045 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -43,16 +43,16 @@ static void ptp_vclock_hash_del(struct ptp_vclock *vclock) static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct ptp_vclock *vclock = info_to_vclock(ptp); - unsigned long flags; s64 adj; adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT; adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR); - spin_lock_irqsave(&vclock->lock, flags); + if (mutex_lock_interruptible(&vclock->lock)) + return -EINTR; timecounter_read(&vclock->tc); vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj; - spin_unlock_irqrestore(&vclock->lock, flags); + mutex_unlock(&vclock->lock); return 0; } @@ -60,11 +60,11 @@ static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct ptp_vclock *vclock = info_to_vclock(ptp); - unsigned long flags; - spin_lock_irqsave(&vclock->lock, flags); + if (mutex_lock_interruptible(&vclock->lock)) + return -EINTR; timecounter_adjtime(&vclock->tc, delta); - spin_unlock_irqrestore(&vclock->lock, flags); + mutex_unlock(&vclock->lock); return 0; } @@ -73,12 +73,12 @@ static int ptp_vclock_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ptp_vclock *vclock = info_to_vclock(ptp); - unsigned long flags; u64 ns; - spin_lock_irqsave(&vclock->lock, flags); + if (mutex_lock_interruptible(&vclock->lock)) + return -EINTR; ns = timecounter_read(&vclock->tc); - spin_unlock_irqrestore(&vclock->lock, flags); + mutex_unlock(&vclock->lock); *ts = ns_to_timespec64(ns); return 0; @@ -91,7 +91,6 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp, struct ptp_vclock *vclock = info_to_vclock(ptp); struct ptp_clock *pptp = vclock->pclock; struct timespec64 pts; - unsigned long flags; int err; u64 ns; @@ -99,9 +98,10 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp, if (err) return err; - spin_lock_irqsave(&vclock->lock, flags); + if (mutex_lock_interruptible(&vclock->lock)) + return -EINTR; ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts)); - spin_unlock_irqrestore(&vclock->lock, flags); + mutex_unlock(&vclock->lock); *ts = ns_to_timespec64(ns); @@ -113,11 +113,11 @@ static int ptp_vclock_settime(struct ptp_clock_info *ptp, { struct ptp_vclock *vclock = info_to_vclock(ptp); u64 ns = timespec64_to_ns(ts); - unsigned long flags; - spin_lock_irqsave(&vclock->lock, flags); + if (mutex_lock_interruptible(&vclock->lock)) + return -EINTR; timecounter_init(&vclock->tc, &vclock->cc, ns); - spin_unlock_irqrestore(&vclock->lock, flags); + mutex_unlock(&vclock->lock); return 0; } @@ -127,7 +127,6 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp, { struct ptp_vclock *vclock = info_to_vclock(ptp); struct ptp_clock *pptp = vclock->pclock; - unsigned long flags; int err; u64 ns; @@ -135,9 +134,10 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp, if (err) return err; - spin_lock_irqsave(&vclock->lock, flags); + if (mutex_lock_interruptible(&vclock->lock)) + return -EINTR; ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device)); - spin_unlock_irqrestore(&vclock->lock, flags); + mutex_unlock(&vclock->lock); xtstamp->device = ns_to_ktime(ns); @@ -205,7 +205,7 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) INIT_HLIST_NODE(&vclock->vclock_hash_node); - spin_lock_init(&vclock->lock); + mutex_init(&vclock->lock); vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev); if (IS_ERR_OR_NULL(vclock->clock)) { @@ -269,7 +269,6 @@ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index) { unsigned int hash = vclock_index % HASH_SIZE(vclock_hash); struct ptp_vclock *vclock; - unsigned long flags; u64 ns; u64 vclock_ns = 0; @@ -281,9 +280,10 @@ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index) if (vclock->clock->index != vclock_index) continue; - spin_lock_irqsave(&vclock->lock, flags); + if (mutex_lock_interruptible(&vclock->lock)) + break; vclock_ns = timecounter_cyc2time(&vclock->tc, ns); - spin_unlock_irqrestore(&vclock->lock, flags); + mutex_unlock(&vclock->lock); break; } diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index bb7239313401..89d53a0f91e6 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -159,7 +159,13 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, mutex_lock(&ddata->lock); if (state->period != ddata->approx_period) { - if (ddata->user_count != 1) { + /* + * Don't let a 2nd user change the period underneath the 1st user. + * However if ddate->approx_period == 0 this is the first time we set + * any period, so let whoever gets here first set the period so other + * users who agree on the period won't fail. + */ + if (ddata->user_count != 1 && ddata->approx_period) { mutex_unlock(&ddata->lock); return -EBUSY; } diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 3115abb3f52a..61a1c87cd501 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -127,7 +127,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm, /* ensure CMP & ARR registers are properly written */ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, - (val & STM32_LPTIM_CMPOK_ARROK), + (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, 100, 1000); if (ret) { dev_err(priv->chip.dev, "ARR/CMP registers write issue\n"); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 9edd662c69ac..3d0fbc644f57 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -392,7 +392,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) return err; if (!rtc->ops) { err = -ENODEV; - } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features) || !rtc->ops->read_alarm) { + } else if (!test_bit(RTC_FEATURE_ALARM, rtc->features)) { err = -EINVAL; } else { memset(alarm, 0, sizeof(struct rtc_wkalrm)); diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index ed5516089e9a..7038f47d77ff 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -136,7 +136,6 @@ struct sun6i_rtc_clk_data { unsigned int fixed_prescaler : 16; unsigned int has_prescaler : 1; unsigned int has_out_clk : 1; - unsigned int export_iosc : 1; unsigned int has_losc_en : 1; unsigned int has_auto_swt : 1; }; @@ -271,10 +270,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, /* Yes, I know, this is ugly. */ sun6i_rtc = rtc; - /* Only read IOSC name from device tree if it is exported */ - if (rtc->data->export_iosc) - of_property_read_string_index(node, "clock-output-names", 2, - &iosc_name); + of_property_read_string_index(node, "clock-output-names", 2, + &iosc_name); rtc->int_osc = clk_hw_register_fixed_rate_with_accuracy(NULL, iosc_name, @@ -315,13 +312,10 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, goto err_register; } - clk_data->num = 2; + clk_data->num = 3; clk_data->hws[0] = &rtc->hw; clk_data->hws[1] = __clk_get_hw(rtc->ext_losc); - if (rtc->data->export_iosc) { - clk_data->hws[2] = rtc->int_osc; - clk_data->num = 3; - } + clk_data->hws[2] = rtc->int_osc; of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data); return; @@ -361,7 +355,6 @@ static const struct sun6i_rtc_clk_data sun8i_h3_rtc_data = { .fixed_prescaler = 32, .has_prescaler = 1, .has_out_clk = 1, - .export_iosc = 1, }; static void __init sun8i_h3_rtc_clk_init(struct device_node *node) @@ -379,7 +372,6 @@ static const struct sun6i_rtc_clk_data sun50i_h6_rtc_data = { .fixed_prescaler = 32, .has_prescaler = 1, .has_out_clk = 1, - .export_iosc = 1, .has_losc_en = 1, .has_auto_swt = 1, }; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 9857dba09c95..85e66574ec41 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -181,6 +181,7 @@ void scsi_remove_host(struct Scsi_Host *shost) scsi_forget_host(shost); mutex_unlock(&shost->scan_mutex); scsi_proc_host_rm(shost); + scsi_proc_hostdir_rm(shost->hostt); /* * New SCSI devices cannot be attached anymore because of the SCSI host @@ -340,6 +341,7 @@ static void scsi_host_dev_release(struct device *dev) struct Scsi_Host *shost = dev_to_shost(dev); struct device *parent = dev->parent; + /* In case scsi_remove_host() has not been called. */ scsi_proc_hostdir_rm(shost->hostt); /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */ diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 2022ffb45041..8c062afb2918 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1516,23 +1516,22 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd) } /** - * strip_and_pad_whitespace - Strip and pad trailing whitespace. - * @i: index into buffer - * @buf: string to modify + * strip_whitespace - Strip and pad trailing whitespace. + * @i: size of buffer + * @buf: string to modify * - * This function will strip all trailing whitespace, pad the end - * of the string with a single space, and NULL terminate the string. + * This function will strip all trailing whitespace and + * NUL terminate the string. * - * Return value: - * new length of string **/ -static int strip_and_pad_whitespace(int i, char *buf) +static void strip_whitespace(int i, char *buf) { + if (i < 1) + return; + i--; while (i && buf[i] == ' ') i--; - buf[i+1] = ' '; - buf[i+2] = '\0'; - return i + 2; + buf[i+1] = '\0'; } /** @@ -1547,19 +1546,21 @@ static int strip_and_pad_whitespace(int i, char *buf) static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb, struct ipr_vpd *vpd) { - char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3]; - int i = 0; + char vendor_id[IPR_VENDOR_ID_LEN + 1]; + char product_id[IPR_PROD_ID_LEN + 1]; + char sn[IPR_SERIAL_NUM_LEN + 1]; - memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); - i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer); + memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN); + strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id); - memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN); - i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer); + memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN); + strip_whitespace(IPR_PROD_ID_LEN, product_id); - memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN); - buffer[IPR_SERIAL_NUM_LEN + i] = '\0'; + memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN); + strip_whitespace(IPR_SERIAL_NUM_LEN, sn); - ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer); + ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix, + vendor_id, product_id, sn); } /** diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 4919ea54b827..2ef9d41fc6f4 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -1519,6 +1519,8 @@ struct megasas_ctrl_info { #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ MEGASAS_MAX_DEV_PER_CHANNEL) +#define MEGASAS_MAX_SUPPORTED_LD_IDS 240 + #define MEGASAS_MAX_SECTORS (2*1024) #define MEGASAS_MAX_SECTORS_IEEE (2*128) #define MEGASAS_DBG_LVL 1 diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index da1cad1ee123..4463a538102a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -358,7 +358,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance, u64 map_id) ld = MR_TargetIdToLdGet(i, drv_map); /* For non existing VDs, iterate to next VD*/ - if (ld >= (MAX_LOGICAL_DRIVES_EXT - 1)) + if (ld >= MEGASAS_MAX_SUPPORTED_LD_IDS) continue; raid = MR_LdRaidGet(ld, drv_map); diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index def4c5e15cd8..8a438f248a82 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -955,19 +955,16 @@ struct scmd_priv { * @chain_buf_count: Chain buffer count * @chain_buf_pool: Chain buffer pool * @chain_sgl_list: Chain SGL list - * @chain_bitmap_sz: Chain buffer allocator bitmap size * @chain_bitmap: Chain buffer allocator bitmap * @chain_buf_lock: Chain buffer list lock * @bsg_cmds: Command tracker for BSG command * @host_tm_cmds: Command tracker for task management commands * @dev_rmhs_cmds: Command tracker for device removal commands * @evtack_cmds: Command tracker for event ack commands - * @devrem_bitmap_sz: Device removal bitmap size * @devrem_bitmap: Device removal bitmap - * @dev_handle_bitmap_sz: Device handle bitmap size + * @dev_handle_bitmap_bits: Number of bits in device handle bitmap * @removepend_bitmap: Remove pending bitmap * @delayed_rmhs_list: Delayed device removal list - * @evtack_cmds_bitmap_sz: Event Ack bitmap size * @evtack_cmds_bitmap: Event Ack bitmap * @delayed_evtack_cmds_list: Delayed event acknowledgment list * @ts_update_counter: Timestamp update counter @@ -1128,7 +1125,6 @@ struct mpi3mr_ioc { u32 chain_buf_count; struct dma_pool *chain_buf_pool; struct chain_element *chain_sgl_list; - u16 chain_bitmap_sz; void *chain_bitmap; spinlock_t chain_buf_lock; @@ -1136,12 +1132,10 @@ struct mpi3mr_ioc { struct mpi3mr_drv_cmd host_tm_cmds; struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD]; struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD]; - u16 devrem_bitmap_sz; void *devrem_bitmap; - u16 dev_handle_bitmap_sz; + u16 dev_handle_bitmap_bits; void *removepend_bitmap; struct list_head delayed_rmhs_list; - u16 evtack_cmds_bitmap_sz; void *evtack_cmds_bitmap; struct list_head delayed_evtack_cmds_list; diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 0c4aabaefdcc..1e4467ea8472 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -1128,7 +1128,6 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc, static int mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) { - u16 dev_handle_bitmap_sz; void *removepend_bitmap; if (mrioc->facts.reply_sz > mrioc->reply_sz) { @@ -1160,25 +1159,23 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc) "\tcontroller while sas transport support is enabled at the\n" "\tdriver, please reboot the system or reload the driver\n"); - dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; - if (mrioc->facts.max_devhandle % 8) - dev_handle_bitmap_sz++; - if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) { - removepend_bitmap = krealloc(mrioc->removepend_bitmap, - dev_handle_bitmap_sz, GFP_KERNEL); + if (mrioc->facts.max_devhandle > mrioc->dev_handle_bitmap_bits) { + removepend_bitmap = bitmap_zalloc(mrioc->facts.max_devhandle, + GFP_KERNEL); if (!removepend_bitmap) { ioc_err(mrioc, - "failed to increase removepend_bitmap sz from: %d to %d\n", - mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); + "failed to increase removepend_bitmap bits from %d to %d\n", + mrioc->dev_handle_bitmap_bits, + mrioc->facts.max_devhandle); return -EPERM; } - memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0, - dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz); + bitmap_free(mrioc->removepend_bitmap); mrioc->removepend_bitmap = removepend_bitmap; ioc_info(mrioc, - "increased dev_handle_bitmap_sz from %d to %d\n", - mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz); - mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz; + "increased bits of dev_handle_bitmap from %d to %d\n", + mrioc->dev_handle_bitmap_bits, + mrioc->facts.max_devhandle); + mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; } return 0; @@ -2957,27 +2954,18 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc) if (!mrioc->pel_abort_cmd.reply) goto out_failed; - mrioc->dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8; - if (mrioc->facts.max_devhandle % 8) - mrioc->dev_handle_bitmap_sz++; - mrioc->removepend_bitmap = kzalloc(mrioc->dev_handle_bitmap_sz, - GFP_KERNEL); + mrioc->dev_handle_bitmap_bits = mrioc->facts.max_devhandle; + mrioc->removepend_bitmap = bitmap_zalloc(mrioc->dev_handle_bitmap_bits, + GFP_KERNEL); if (!mrioc->removepend_bitmap) goto out_failed; - mrioc->devrem_bitmap_sz = MPI3MR_NUM_DEVRMCMD / 8; - if (MPI3MR_NUM_DEVRMCMD % 8) - mrioc->devrem_bitmap_sz++; - mrioc->devrem_bitmap = kzalloc(mrioc->devrem_bitmap_sz, - GFP_KERNEL); + mrioc->devrem_bitmap = bitmap_zalloc(MPI3MR_NUM_DEVRMCMD, GFP_KERNEL); if (!mrioc->devrem_bitmap) goto out_failed; - mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8; - if (MPI3MR_NUM_EVTACKCMD % 8) - mrioc->evtack_cmds_bitmap_sz++; - mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz, - GFP_KERNEL); + mrioc->evtack_cmds_bitmap = bitmap_zalloc(MPI3MR_NUM_EVTACKCMD, + GFP_KERNEL); if (!mrioc->evtack_cmds_bitmap) goto out_failed; @@ -3415,10 +3403,7 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc) if (!mrioc->chain_sgl_list[i].addr) goto out_failed; } - mrioc->chain_bitmap_sz = num_chains / 8; - if (num_chains % 8) - mrioc->chain_bitmap_sz++; - mrioc->chain_bitmap = kzalloc(mrioc->chain_bitmap_sz, GFP_KERNEL); + mrioc->chain_bitmap = bitmap_zalloc(num_chains, GFP_KERNEL); if (!mrioc->chain_bitmap) goto out_failed; return retval; @@ -4190,10 +4175,11 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc) for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) memset(mrioc->evtack_cmds[i].reply, 0, sizeof(*mrioc->evtack_cmds[i].reply)); - memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); - memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); - memset(mrioc->evtack_cmds_bitmap, 0, - mrioc->evtack_cmds_bitmap_sz); + bitmap_clear(mrioc->removepend_bitmap, 0, + mrioc->dev_handle_bitmap_bits); + bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); + bitmap_clear(mrioc->evtack_cmds_bitmap, 0, + MPI3MR_NUM_EVTACKCMD); } for (i = 0; i < mrioc->num_queues; i++) { @@ -4319,16 +4305,16 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) mrioc->evtack_cmds[i].reply = NULL; } - kfree(mrioc->removepend_bitmap); + bitmap_free(mrioc->removepend_bitmap); mrioc->removepend_bitmap = NULL; - kfree(mrioc->devrem_bitmap); + bitmap_free(mrioc->devrem_bitmap); mrioc->devrem_bitmap = NULL; - kfree(mrioc->evtack_cmds_bitmap); + bitmap_free(mrioc->evtack_cmds_bitmap); mrioc->evtack_cmds_bitmap = NULL; - kfree(mrioc->chain_bitmap); + bitmap_free(mrioc->chain_bitmap); mrioc->chain_bitmap = NULL; kfree(mrioc->transport_cmds.reply); @@ -4887,9 +4873,10 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc, mpi3mr_flush_delayed_cmd_lists(mrioc); mpi3mr_flush_drv_cmds(mrioc); - memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz); - memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz); - memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz); + bitmap_clear(mrioc->devrem_bitmap, 0, MPI3MR_NUM_DEVRMCMD); + bitmap_clear(mrioc->removepend_bitmap, 0, + mrioc->dev_handle_bitmap_bits); + bitmap_clear(mrioc->evtack_cmds_bitmap, 0, MPI3MR_NUM_EVTACKCMD); mpi3mr_flush_host_io(mrioc); mpi3mr_cleanup_fwevt_list(mrioc); mpi3mr_invalidate_devhandles(mrioc); diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index 3fc897336b5e..3b61815979da 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -1280,7 +1280,7 @@ void mpi3mr_sas_host_add(struct mpi3mr_ioc *mrioc) if (mrioc->sas_hba.enclosure_handle) { if (!(mpi3mr_cfg_get_enclosure_pg0(mrioc, &ioc_status, - &encl_pg0, sizeof(dev_pg0), + &encl_pg0, sizeof(encl_pg0), MPI3_ENCLOS_PGAD_FORM_HANDLE, mrioc->sas_hba.enclosure_handle)) && (ioc_status == MPI3_IOCSTATUS_SUCCESS)) diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 7c626a6554b9..0efc659b8336 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2953,8 +2953,13 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) } if (sdkp->device->type == TYPE_ZBC) { - /* Host-managed */ + /* + * Host-managed: Per ZBC and ZAC specifications, writes in + * sequential write required zones of host-managed devices must + * be aligned to the device physical block size. + */ disk_set_zoned(sdkp->disk, BLK_ZONED_HM); + blk_queue_zone_write_granularity(q, sdkp->physical_block_size); } else { sdkp->zoned = zoned; if (sdkp->zoned == 1) { diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 13c529d47654..4131e57d819d 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -958,14 +958,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, u8 buf[SD_BUF_SIZE]) disk_set_max_active_zones(disk, 0); nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); - /* - * Per ZBC and ZAC specifications, writes in sequential write required - * zones of host-managed devices must be aligned to the device physical - * block size. - */ - if (blk_queue_zoned_model(q) == BLK_ZONED_HM) - blk_queue_zone_write_granularity(q, sdkp->physical_block_size); - sdkp->early_zone_info.nr_zones = nr_zones; sdkp->early_zone_info.zone_blocks = zone_blocks; diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h index 108af61854a3..fce86f79c505 100644 --- a/drivers/soc/mediatek/mt8186-pm-domains.h +++ b/drivers/soc/mediatek/mt8186-pm-domains.h @@ -304,7 +304,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { .ctl_offs = 0x9FC, .pwr_sta_offs = 0x16C, .pwr_sta2nd_offs = 0x170, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, }, [MT8186_POWER_DOMAIN_ADSP_INFRA] = { .name = "adsp_infra", @@ -312,7 +311,6 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { .ctl_offs = 0x9F8, .pwr_sta_offs = 0x16C, .pwr_sta2nd_offs = 0x170, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, }, [MT8186_POWER_DOMAIN_ADSP_TOP] = { .name = "adsp_top", @@ -332,7 +330,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { MT8186_TOP_AXI_PROT_EN_3_CLR, MT8186_TOP_AXI_PROT_EN_3_STA), }, - .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_ACTIVE_WAKEUP, + .caps = MTK_SCPD_SRAM_ISO | MTK_SCPD_ACTIVE_WAKEUP, }, }; diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c index 0469c9dfeb04..00526fd37d7b 100644 --- a/drivers/soc/mediatek/mtk-svs.c +++ b/drivers/soc/mediatek/mtk-svs.c @@ -1324,7 +1324,7 @@ static int svs_init01(struct svs_platform *svsp) svsb->pm_runtime_enabled_count++; } - ret = pm_runtime_get_sync(svsb->opp_dev); + ret = pm_runtime_resume_and_get(svsb->opp_dev); if (ret < 0) { dev_err(svsb->dev, "mtcmos on fail: %d\n", ret); goto svs_init01_resume_cpuidle; @@ -1461,6 +1461,7 @@ static int svs_init02(struct svs_platform *svsp) { struct svs_bank *svsb; unsigned long flags, time_left; + int ret; u32 idx; for (idx = 0; idx < svsp->bank_max; idx++) { @@ -1479,7 +1480,8 @@ static int svs_init02(struct svs_platform *svsp) msecs_to_jiffies(5000)); if (!time_left) { dev_err(svsb->dev, "init02 completion timeout\n"); - return -EBUSY; + ret = -EBUSY; + goto out_of_init02; } } @@ -1497,12 +1499,30 @@ static int svs_init02(struct svs_platform *svsp) if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { if (svs_sync_bank_volts_from_opp(svsb)) { dev_err(svsb->dev, "sync volt fail\n"); - return -EPERM; + ret = -EPERM; + goto out_of_init02; } } } return 0; + +out_of_init02: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_PTPEN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_VAL_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + return ret; } static void svs_mon_mode(struct svs_platform *svsp) @@ -1594,12 +1614,16 @@ static int svs_resume(struct device *dev) ret = svs_init02(svsp); if (ret) - goto out_of_resume; + goto svs_resume_reset_assert; svs_mon_mode(svsp); return 0; +svs_resume_reset_assert: + dev_err(svsp->dev, "assert reset: %d\n", + reset_control_assert(svsp->rst)); + out_of_resume: clk_disable_unprepare(svsp->main_clk); return ret; @@ -2385,14 +2409,6 @@ static int svs_probe(struct platform_device *pdev) goto svs_probe_free_resource; } - ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, - IRQF_ONESHOT, svsp->name, svsp); - if (ret) { - dev_err(svsp->dev, "register irq(%d) failed: %d\n", - svsp_irq, ret); - goto svs_probe_free_resource; - } - svsp->main_clk = devm_clk_get(svsp->dev, "main"); if (IS_ERR(svsp->main_clk)) { dev_err(svsp->dev, "failed to get clock: %ld\n", @@ -2414,6 +2430,14 @@ static int svs_probe(struct platform_device *pdev) goto svs_probe_clk_disable; } + ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, + IRQF_ONESHOT, svsp->name, svsp); + if (ret) { + dev_err(svsp->dev, "register irq(%d) failed: %d\n", + svsp_irq, ret); + goto svs_probe_iounmap; + } + ret = svs_start(svsp); if (ret) { dev_err(svsp->dev, "svs start fail: %d\n", ret); diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c index 121ea409fafc..b252bedf0cf1 100644 --- a/drivers/soc/qcom/qcom_stats.c +++ b/drivers/soc/qcom/qcom_stats.c @@ -92,7 +92,7 @@ static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused) /* Items are allocated lazily, so lookup pointer each time */ stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL); if (IS_ERR(stat)) - return -EIO; + return 0; qcom_print_stats(s, stat); @@ -170,20 +170,14 @@ static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem * static void qcom_create_subsystem_stat_files(struct dentry *root, const struct stats_config *config) { - const struct sleep_stats *stat; int i; if (!config->subsystem_stats_in_smem) return; - for (i = 0; i < ARRAY_SIZE(subsystems); i++) { - stat = qcom_smem_get(subsystems[i].pid, subsystems[i].smem_item, NULL); - if (IS_ERR(stat)) - continue; - + for (i = 0; i < ARRAY_SIZE(subsystems); i++) debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i], &qcom_subsystem_sleep_stats_fops); - } } static int qcom_stats_probe(struct platform_device *pdev) diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 2de082765bef..c76381899ef4 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -116,8 +116,10 @@ static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, cons INIT_LIST_HEAD(&eve_data->cb_list_head); cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); - if (!cb_data) + if (!cb_data) { + kfree(eve_data); return -ENOMEM; + } cb_data->eve_cb = cb_fun; cb_data->agent_data = data; diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c index 04b3529f8929..963498db0fd2 100644 --- a/drivers/soundwire/bus_type.c +++ b/drivers/soundwire/bus_type.c @@ -105,20 +105,19 @@ static int sdw_drv_probe(struct device *dev) if (ret) return ret; - mutex_lock(&slave->sdw_dev_lock); - ret = drv->probe(slave, id); if (ret) { name = drv->name; if (!name) name = drv->driver.name; - mutex_unlock(&slave->sdw_dev_lock); dev_err(dev, "Probe of %s failed: %d\n", name, ret); dev_pm_domain_detach(dev, false); return ret; } + mutex_lock(&slave->sdw_dev_lock); + /* device is probed so let's read the properties now */ if (drv->ops && drv->ops->read_prop) drv->ops->read_prop(slave); @@ -167,14 +166,12 @@ static int sdw_drv_remove(struct device *dev) int ret = 0; mutex_lock(&slave->sdw_dev_lock); - slave->probed = false; + mutex_unlock(&slave->sdw_dev_lock); if (drv->remove) ret = drv->remove(slave); - mutex_unlock(&slave->sdw_dev_lock); - dev_pm_domain_detach(dev, false); return ret; diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c index b65cdf2a7593..e7da7d7b213f 100644 --- a/drivers/soundwire/cadence_master.c +++ b/drivers/soundwire/cadence_master.c @@ -555,6 +555,29 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns, return SDW_CMD_OK; } +static void cdns_read_response(struct sdw_cdns *cdns) +{ + u32 num_resp, cmd_base; + int i; + + /* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */ + BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2); + + num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT); + num_resp &= CDNS_MCP_RX_FIFO_AVAIL; + if (num_resp > ARRAY_SIZE(cdns->response_buf)) { + dev_warn(cdns->dev, "RX AVAIL %d too long\n", num_resp); + num_resp = ARRAY_SIZE(cdns->response_buf); + } + + cmd_base = CDNS_MCP_CMD_BASE; + + for (i = 0; i < num_resp; i++) { + cdns->response_buf[i] = cdns_readl(cdns, cmd_base); + cmd_base += CDNS_MCP_CMD_WORD_LEN; + } +} + static enum sdw_command_response _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd, int offset, int count, bool defer) @@ -596,6 +619,10 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd, dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n", cmd, msg->dev_num, msg->addr, msg->len); msg->len = 0; + + /* Drain anything in the RX_FIFO */ + cdns_read_response(cdns); + return SDW_CMD_TIMEOUT; } @@ -769,22 +796,6 @@ EXPORT_SYMBOL(cdns_read_ping_status); * IRQ handling */ -static void cdns_read_response(struct sdw_cdns *cdns) -{ - u32 num_resp, cmd_base; - int i; - - num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT); - num_resp &= CDNS_MCP_RX_FIFO_AVAIL; - - cmd_base = CDNS_MCP_CMD_BASE; - - for (i = 0; i < num_resp; i++) { - cdns->response_buf[i] = cdns_readl(cdns, cmd_base); - cmd_base += CDNS_MCP_CMD_WORD_LEN; - } -} - static int cdns_update_slave_status(struct sdw_cdns *cdns, u64 slave_intstat) { diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h index ca9e805bab88..51e6ecc027cb 100644 --- a/drivers/soundwire/cadence_master.h +++ b/drivers/soundwire/cadence_master.h @@ -8,6 +8,12 @@ #define SDW_CADENCE_GSYNC_KHZ 4 /* 4 kHz */ #define SDW_CADENCE_GSYNC_HZ (SDW_CADENCE_GSYNC_KHZ * 1000) +/* + * The Cadence IP supports up to 32 entries in the FIFO, though implementations + * can configure the IP to have a smaller FIFO. + */ +#define CDNS_MCP_IP_MAX_CMD_LEN 32 + /** * struct sdw_cdns_pdi: PDI (Physical Data Interface) instance * @@ -114,7 +120,12 @@ struct sdw_cdns { struct sdw_bus bus; unsigned int instance; - u32 response_buf[0x80]; + /* + * The datasheet says the RX FIFO AVAIL can be 2 entries more + * than the FIFO capacity, so allow for this. + */ + u32 response_buf[CDNS_MCP_IP_MAX_CMD_LEN + 2]; + struct completion tx_complete; struct sdw_defer *defer; diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index 3ac73691fbb5..54fc226e1cdf 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -1366,14 +1366,14 @@ static int intel_spi_populate_chip(struct intel_spi *ispi) if (!spi_new_device(ispi->master, &chip)) return -ENODEV; - /* Add the second chip if present */ - if (ispi->master->num_chipselect < 2) - return 0; - ret = intel_spi_read_desc(ispi); if (ret) return ret; + /* Add the second chip if present */ + if (ispi->master->num_chipselect < 2) + return 0; + chip.platform_data = NULL; chip.chip_select = 1; diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 9f356612ba7e..06c54d49076a 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -1156,6 +1156,10 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, ret = -EIO; goto exit; } + if (!xfer->cs_change) { + tegra_qspi_transfer_end(spi); + spi_transfer_delay_exec(xfer); + } break; default: ret = -EINVAL; @@ -1164,14 +1168,14 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, msg->actual_length += xfer->len; transfer_phase++; } - if (!xfer->cs_change) { - tegra_qspi_transfer_end(spi); - spi_transfer_delay_exec(xfer); - } ret = 0; exit: msg->status = ret; + if (ret < 0) { + tegra_qspi_transfer_end(spi); + spi_transfer_delay_exec(xfer); + } return ret; } @@ -1297,7 +1301,7 @@ static bool tegra_qspi_validate_cmb_seq(struct tegra_qspi *tqspi, if (xfer->len > 4 || xfer->len < 3) return false; xfer = list_next_entry(xfer, transfer_list); - if (!tqspi->soc_data->has_dma || xfer->len > (QSPI_FIFO_DEPTH << 2)) + if (!tqspi->soc_data->has_dma && xfer->len > (QSPI_FIFO_DEPTH << 2)) return false; return true; diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c index b6abd3770e81..edd20a03f7a2 100644 --- a/drivers/staging/emxx_udc/emxx_udc.c +++ b/drivers/staging/emxx_udc/emxx_udc.c @@ -2590,10 +2590,15 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep, req->unaligned = false; if (req->unaligned) { - if (!ep->virt_buf) + if (!ep->virt_buf) { ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE, &ep->phys_buf, GFP_ATOMIC | GFP_DMA); + if (!ep->virt_buf) { + spin_unlock_irqrestore(&udc->lock, flags); + return -ENOMEM; + } + } if (ep->epnum > 0) { if (ep->direct == USB_DIR_IN) memcpy(ep->virt_buf, req->req.buf, diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c index d4e06a3929f3..b59f6a4cb611 100644 --- a/drivers/staging/pi433/pi433_if.c +++ b/drivers/staging/pi433/pi433_if.c @@ -55,6 +55,7 @@ static dev_t pi433_dev; static DEFINE_IDR(pi433_idr); static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */ +static struct dentry *root_dir; /* debugfs root directory for the driver */ static struct class *pi433_class; /* mainly for udev to create /dev/pi433 */ @@ -1306,8 +1307,7 @@ static int pi433_probe(struct spi_device *spi) /* spi setup */ spi_set_drvdata(spi, device); - entry = debugfs_create_dir(dev_name(device->dev), - debugfs_lookup(KBUILD_MODNAME, NULL)); + entry = debugfs_create_dir(dev_name(device->dev), root_dir); debugfs_create_file("regs", 0400, entry, device, &pi433_debugfs_regs_fops); return 0; @@ -1333,9 +1333,8 @@ RX_failed: static void pi433_remove(struct spi_device *spi) { struct pi433_device *device = spi_get_drvdata(spi); - struct dentry *mod_entry = debugfs_lookup(KBUILD_MODNAME, NULL); - debugfs_remove(debugfs_lookup(dev_name(device->dev), mod_entry)); + debugfs_lookup_and_remove(dev_name(device->dev), root_dir); /* free GPIOs */ free_gpio(device); @@ -1408,7 +1407,7 @@ static int __init pi433_init(void) return PTR_ERR(pi433_class); } - debugfs_create_dir(KBUILD_MODNAME, NULL); + root_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); status = spi_register_driver(&pi433_spi_driver); if (status < 0) { @@ -1427,7 +1426,7 @@ static void __exit pi433_exit(void) spi_unregister_driver(&pi433_spi_driver); class_destroy(pi433_class); unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name); - debugfs_remove_recursive(debugfs_lookup(KBUILD_MODNAME, NULL)); + debugfs_remove(root_dir); } module_exit(pi433_exit); diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c index 702551056227..f660f947ab63 100644 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c @@ -185,7 +185,6 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev); static void _rtl92e_dm_deinit_fsync(struct net_device *dev); static void _rtl92e_dm_check_txrateandretrycount(struct net_device *dev); -static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev); static void _rtl92e_dm_check_fsync(struct net_device *dev); static void _rtl92e_dm_check_rf_ctrl_gpio(void *data); static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t); @@ -238,8 +237,6 @@ void rtl92e_dm_watchdog(struct net_device *dev) if (priv->being_init_adapter) return; - _rtl92e_dm_check_ac_dc_power(dev); - _rtl92e_dm_check_txrateandretrycount(dev); _rtl92e_dm_check_edca_turbo(dev); @@ -257,28 +254,6 @@ void rtl92e_dm_watchdog(struct net_device *dev) _rtl92e_dm_cts_to_self(dev); } -static void _rtl92e_dm_check_ac_dc_power(struct net_device *dev) -{ - struct r8192_priv *priv = rtllib_priv(dev); - static const char ac_dc_script[] = "/etc/acpi/wireless-rtl-ac-dc-power.sh"; - char *argv[] = {(char *)ac_dc_script, DRV_NAME, NULL}; - static char *envp[] = {"HOME=/", - "TERM=linux", - "PATH=/usr/bin:/bin", - NULL}; - - if (priv->ResetProgress == RESET_TYPE_SILENT) { - return; - } - - if (priv->rtllib->state != RTLLIB_LINKED) - return; - call_usermodehelper(ac_dc_script, argv, envp, UMH_WAIT_PROC); - - return; -}; - - void rtl92e_init_adaptive_rate(struct net_device *dev) { @@ -1667,10 +1642,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data) u8 tmp1byte; enum rt_rf_power_state rf_power_state_to_set; bool bActuallySet = false; - char *argv[3]; - static const char RadioPowerPath[] = "/etc/acpi/events/RadioPower.sh"; - static char *envp[] = {"HOME=/", "TERM=linux", "PATH=/usr/bin:/bin", - NULL}; bActuallySet = false; @@ -1700,14 +1671,6 @@ static void _rtl92e_dm_check_rf_ctrl_gpio(void *data) mdelay(1000); priv->bHwRfOffAction = 1; rtl92e_set_rf_state(dev, rf_power_state_to_set, RF_CHANGE_BY_HW); - if (priv->hw_radio_off) - argv[1] = "RFOFF"; - else - argv[1] = "RFON"; - - argv[0] = (char *)RadioPowerPath; - argv[2] = NULL; - call_usermodehelper(RadioPowerPath, argv, envp, UMH_WAIT_PROC); } } diff --git a/drivers/staging/rtl8723bs/include/rtw_security.h b/drivers/staging/rtl8723bs/include/rtw_security.h index a68b73858462..7587fa888527 100644 --- a/drivers/staging/rtl8723bs/include/rtw_security.h +++ b/drivers/staging/rtl8723bs/include/rtw_security.h @@ -107,13 +107,13 @@ struct security_priv { u32 dot118021XGrpPrivacy; /* This specify the privacy algthm. used for Grp key */ u32 dot118021XGrpKeyid; /* key id used for Grp Key (tx key index) */ - union Keytype dot118021XGrpKey[BIP_MAX_KEYID]; /* 802.1x Group Key, for inx0 and inx1 */ - union Keytype dot118021XGrptxmickey[BIP_MAX_KEYID]; - union Keytype dot118021XGrprxmickey[BIP_MAX_KEYID]; + union Keytype dot118021XGrpKey[BIP_MAX_KEYID + 1]; /* 802.1x Group Key, for inx0 and inx1 */ + union Keytype dot118021XGrptxmickey[BIP_MAX_KEYID + 1]; + union Keytype dot118021XGrprxmickey[BIP_MAX_KEYID + 1]; union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit. */ union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv. */ u32 dot11wBIPKeyid; /* key id used for BIP Key (tx key index) */ - union Keytype dot11wBIPKey[6]; /* BIP Key, for index4 and index5 */ + union Keytype dot11wBIPKey[BIP_MAX_KEYID + 1]; /* BIP Key, for index4 and index5 */ union pn48 dot11wBIPtxpn; /* PN48 used for Grp Key xmit. */ union pn48 dot11wBIPrxpn; /* PN48 used for Grp Key recv. */ diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index 6aeb169c6ebf..5c738011322f 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -350,7 +350,7 @@ int rtw_cfg80211_check_bss(struct adapter *padapter) bss = cfg80211_get_bss(padapter->rtw_wdev->wiphy, notify_channel, pnetwork->mac_address, pnetwork->ssid.ssid, pnetwork->ssid.ssid_length, - WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); + IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss); @@ -711,6 +711,7 @@ exit: static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len) { int ret = 0; + u8 max_idx; u32 wep_key_idx, wep_key_len; struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; @@ -724,26 +725,29 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param goto exit; } - if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && - param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && - param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { - if (param->u.crypt.idx >= WEP_KEYS - || param->u.crypt.idx >= BIP_MAX_KEYID) { - ret = -EINVAL; - goto exit; - } - } else { - { + if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff || + param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff || + param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) { ret = -EINVAL; goto exit; } + + if (strcmp(param->u.crypt.alg, "WEP") == 0) + max_idx = WEP_KEYS - 1; + else + max_idx = BIP_MAX_KEYID; + + if (param->u.crypt.idx > max_idx) { + netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx); + ret = -EINVAL; + goto exit; } if (strcmp(param->u.crypt.alg, "WEP") == 0) { wep_key_idx = param->u.crypt.idx; wep_key_len = param->u.crypt.key_len; - if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) { + if (wep_key_len <= 0) { ret = -EINVAL; goto exit; } @@ -1135,8 +1139,8 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/, select_network->mac_address, select_network->ssid.ssid, - select_network->ssid.ssid_length, 0/*WLAN_CAPABILITY_ESS*/, - 0/*WLAN_CAPABILITY_ESS*/); + select_network->ssid.ssid_length, IEEE80211_BSS_TYPE_ANY, + IEEE80211_PRIVACY_ANY); if (bss) { cfg80211_unlink_bss(wiphy, bss); diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index 30374a820496..40a3157fb735 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -46,6 +46,7 @@ static int wpa_set_auth_algs(struct net_device *dev, u32 value) static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len) { int ret = 0; + u8 max_idx; u32 wep_key_idx, wep_key_len, wep_total_len; struct ndis_802_11_wep *pwep = NULL; struct adapter *padapter = rtw_netdev_priv(dev); @@ -60,19 +61,22 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, goto exit; } - if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff && - param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff && - param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) { - if (param->u.crypt.idx >= WEP_KEYS || - param->u.crypt.idx >= BIP_MAX_KEYID) { - ret = -EINVAL; - goto exit; - } - } else { - { - ret = -EINVAL; - goto exit; - } + if (param->sta_addr[0] != 0xff || param->sta_addr[1] != 0xff || + param->sta_addr[2] != 0xff || param->sta_addr[3] != 0xff || + param->sta_addr[4] != 0xff || param->sta_addr[5] != 0xff) { + ret = -EINVAL; + goto exit; + } + + if (strcmp(param->u.crypt.alg, "WEP") == 0) + max_idx = WEP_KEYS - 1; + else + max_idx = BIP_MAX_KEYID; + + if (param->u.crypt.idx > max_idx) { + netdev_err(dev, "Error crypt.idx %d > %d\n", param->u.crypt.idx, max_idx); + ret = -EINVAL; + goto exit; } if (strcmp(param->u.crypt.alg, "WEP") == 0) { @@ -84,9 +88,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, wep_key_idx = param->u.crypt.idx; wep_key_len = param->u.crypt.key_len; - if (wep_key_idx > WEP_KEYS) - return -EINVAL; - if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material); diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig index f0c845679250..e3cfad10d5dd 100644 --- a/drivers/thermal/intel/Kconfig +++ b/drivers/thermal/intel/Kconfig @@ -64,7 +64,8 @@ endmenu config INTEL_BXT_PMIC_THERMAL tristate "Intel Broxton PMIC thermal driver" - depends on X86 && INTEL_SOC_PMIC_BXTWC && REGMAP + depends on X86 && INTEL_SOC_PMIC_BXTWC + select REGMAP help Select this driver for Intel Broxton PMIC with ADC channels monitoring system temperature measurements and alerts. diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c index 3eafc6b0e6c3..b43fbd5eaa6b 100644 --- a/drivers/thermal/intel/intel_quark_dts_thermal.c +++ b/drivers/thermal/intel/intel_quark_dts_thermal.c @@ -415,22 +415,14 @@ MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids); static int __init intel_quark_thermal_init(void) { - int err = 0; - if (!x86_match_cpu(qrk_thermal_ids) || !iosf_mbi_available()) return -ENODEV; soc_dts = alloc_soc_dts(); - if (IS_ERR(soc_dts)) { - err = PTR_ERR(soc_dts); - goto err_free; - } + if (IS_ERR(soc_dts)) + return PTR_ERR(soc_dts); return 0; - -err_free: - free_soc_dts(soc_dts); - return err; } static void __exit intel_quark_thermal_exit(void) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 13a6cd0116a1..f9d667ce1619 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1468,12 +1468,32 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state) static void lpuart32_break_ctl(struct uart_port *port, int break_state) { - unsigned long temp; + unsigned long temp, modem; + struct tty_struct *tty; + unsigned int cflag = 0; + + tty = tty_port_tty_get(&port->state->port); + if (tty) { + cflag = tty->termios.c_cflag; + tty_kref_put(tty); + } temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK; + modem = lpuart32_read(port, UARTMODIR); - if (break_state != 0) + if (break_state != 0) { temp |= UARTCTRL_SBK; + /* + * LPUART CTS has higher priority than SBK, need to disable CTS before + * asserting SBK to avoid any interference if flow control is enabled. + */ + if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE) + lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR); + } else { + /* Re-enable the CTS when break off. */ + if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE)) + lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR); + } lpuart32_write(port, temp, UARTCTRL); } diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 83f35b7b0897..abff1c6470f6 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -1779,7 +1779,7 @@ static void pch_uart_exit_port(struct eg20t_port *priv) char name[32]; snprintf(name, sizeof(name), "uart%d_regs", priv->port.line); - debugfs_remove(debugfs_lookup(name, NULL)); + debugfs_lookup_and_remove(name, NULL); uart_remove_one_port(&pch_uart_driver, &priv->port); free_page((unsigned long)priv->rxbuf.buf); } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 524921360ca7..93cf5f788817 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -1426,25 +1426,6 @@ static int sc16is7xx_probe(struct device *dev, } sched_set_fifo(s->kworker_task); -#ifdef CONFIG_GPIOLIB - if (devtype->nr_gpio) { - /* Setup GPIO cotroller */ - s->gpio.owner = THIS_MODULE; - s->gpio.parent = dev; - s->gpio.label = dev_name(dev); - s->gpio.direction_input = sc16is7xx_gpio_direction_input; - s->gpio.get = sc16is7xx_gpio_get; - s->gpio.direction_output = sc16is7xx_gpio_direction_output; - s->gpio.set = sc16is7xx_gpio_set; - s->gpio.base = -1; - s->gpio.ngpio = devtype->nr_gpio; - s->gpio.can_sleep = 1; - ret = gpiochip_add_data(&s->gpio, s); - if (ret) - goto out_thread; - } -#endif - /* reset device, purging any pending irq / data */ regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT, SC16IS7XX_IOCONTROL_SRESET_BIT); @@ -1521,6 +1502,25 @@ static int sc16is7xx_probe(struct device *dev, s->p[u].irda_mode = true; } +#ifdef CONFIG_GPIOLIB + if (devtype->nr_gpio) { + /* Setup GPIO cotroller */ + s->gpio.owner = THIS_MODULE; + s->gpio.parent = dev; + s->gpio.label = dev_name(dev); + s->gpio.direction_input = sc16is7xx_gpio_direction_input; + s->gpio.get = sc16is7xx_gpio_get; + s->gpio.direction_output = sc16is7xx_gpio_direction_output; + s->gpio.set = sc16is7xx_gpio_set; + s->gpio.base = -1; + s->gpio.ngpio = devtype->nr_gpio; + s->gpio.can_sleep = 1; + ret = gpiochip_add_data(&s->gpio, s); + if (ret) + goto out_thread; + } +#endif + /* * Setup interrupt. We first try to acquire the IRQ line as level IRQ. * If that succeeds, we can allow sharing the interrupt as well. @@ -1540,18 +1540,19 @@ static int sc16is7xx_probe(struct device *dev, if (!ret) return 0; -out_ports: - for (i--; i >= 0; i--) { - uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port); - clear_bit(s->p[i].port.line, &sc16is7xx_lines); - } - #ifdef CONFIG_GPIOLIB if (devtype->nr_gpio) gpiochip_remove(&s->gpio); out_thread: #endif + +out_ports: + for (i--; i >= 0; i--) { + uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port); + clear_bit(s->p[i].port.line, &sc16is7xx_lines); + } + kthread_stop(s->kworker_task); out_clk: diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index de06c3c2ff70..1ac6784ea1f9 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1224,14 +1224,16 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver, { struct tty_struct *tty; - if (driver->ops->lookup) + if (driver->ops->lookup) { if (!file) tty = ERR_PTR(-EIO); else tty = driver->ops->lookup(driver, file, idx); - else + } else { + if (idx >= driver->num) + return ERR_PTR(-EINVAL); tty = driver->ttys[idx]; - + } if (!IS_ERR(tty)) tty_kref_get(tty); return tty; diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index 71e091f879f0..1dc07f9214d5 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -415,10 +415,8 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) */ size = vcs_size(vc, attr, uni_mode); if (size < 0) { - if (read) - break; ret = size; - goto unlock_out; + break; } if (pos >= size) break; diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c index faf6b078b6c4..bbc610e5bd69 100644 --- a/drivers/usb/chipidea/debug.c +++ b/drivers/usb/chipidea/debug.c @@ -364,5 +364,5 @@ void dbg_create_files(struct ci_hdrc *ci) */ void dbg_remove_files(struct ci_hdrc *ci) { - debugfs_remove(debugfs_lookup(dev_name(ci->dev), usb_debug_root)); + debugfs_lookup_and_remove(dev_name(ci->dev), usb_debug_root); } diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index d7c8461976ce..38703781ee2d 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -271,7 +271,7 @@ static int ulpi_regs_show(struct seq_file *seq, void *data) } DEFINE_SHOW_ATTRIBUTE(ulpi_regs); -#define ULPI_ROOT debugfs_lookup(KBUILD_MODNAME, NULL) +static struct dentry *ulpi_root; static int ulpi_register(struct device *dev, struct ulpi *ulpi) { @@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi) return ret; } - root = debugfs_create_dir(dev_name(dev), ULPI_ROOT); + root = debugfs_create_dir(dev_name(dev), ulpi_root); debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops); dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n", @@ -349,8 +349,7 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface); */ void ulpi_unregister_interface(struct ulpi *ulpi) { - debugfs_remove_recursive(debugfs_lookup(dev_name(&ulpi->dev), - ULPI_ROOT)); + debugfs_lookup_and_remove(dev_name(&ulpi->dev), ulpi_root); device_unregister(&ulpi->dev); } EXPORT_SYMBOL_GPL(ulpi_unregister_interface); @@ -360,12 +359,11 @@ EXPORT_SYMBOL_GPL(ulpi_unregister_interface); static int __init ulpi_init(void) { int ret; - struct dentry *root; - root = debugfs_create_dir(KBUILD_MODNAME, NULL); + ulpi_root = debugfs_create_dir(KBUILD_MODNAME, NULL); ret = bus_register(&ulpi_bus); if (ret) - debugfs_remove(root); + debugfs_remove(ulpi_root); return ret; } subsys_initcall(ulpi_init); @@ -373,7 +371,7 @@ subsys_initcall(ulpi_init); static void __exit ulpi_exit(void) { bus_unregister(&ulpi_bus); - debugfs_remove_recursive(ULPI_ROOT); + debugfs_remove(ulpi_root); } module_exit(ulpi_exit); diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 11b15d7b357a..a415206cab04 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -998,7 +998,7 @@ static void usb_debugfs_init(void) static void usb_debugfs_cleanup(void) { - debugfs_remove(debugfs_lookup("devices", usb_debug_root)); + debugfs_lookup_and_remove("devices", usb_debug_root); } /* diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 8f9959ba9fd4..582ebd9cf9c2 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -1117,6 +1117,7 @@ struct dwc3_scratchpad_array { * address. * @num_ep_resized: carries the current number endpoints which have had its tx * fifo resized. + * @debug_root: root debugfs directory for this device to put its files in. */ struct dwc3 { struct work_struct drd_work; @@ -1332,6 +1333,7 @@ struct dwc3 { int max_cfg_eps; int last_fifo_depth; int num_ep_resized; + struct dentry *debug_root; }; #define INCRX_BURST_MODE 0 diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index 48b44b88dc25..8bb2c9e3b9ac 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -414,11 +414,14 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status) #ifdef CONFIG_DEBUG_FS extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep); +extern void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep); extern void dwc3_debugfs_init(struct dwc3 *d); extern void dwc3_debugfs_exit(struct dwc3 *d); #else static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep) { } +static inline void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep) +{ } static inline void dwc3_debugfs_init(struct dwc3 *d) { } static inline void dwc3_debugfs_exit(struct dwc3 *d) diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index f2b7675c7f62..850df0e6bcab 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -873,27 +873,23 @@ static const struct dwc3_ep_file_map dwc3_ep_file_map[] = { { "GDBGEPINFO", &dwc3_ep_info_register_fops, }, }; -static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep, - struct dentry *parent) +void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep) { + struct dentry *dir; int i; + dir = debugfs_create_dir(dep->name, dep->dwc->debug_root); for (i = 0; i < ARRAY_SIZE(dwc3_ep_file_map); i++) { const struct file_operations *fops = dwc3_ep_file_map[i].fops; const char *name = dwc3_ep_file_map[i].name; - debugfs_create_file(name, 0444, parent, dep, fops); + debugfs_create_file(name, 0444, dir, dep, fops); } } -void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep) +void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep) { - struct dentry *dir; - struct dentry *root; - - root = debugfs_lookup(dev_name(dep->dwc->dev), usb_debug_root); - dir = debugfs_create_dir(dep->name, root); - dwc3_debugfs_create_endpoint_files(dep, dir); + debugfs_lookup_and_remove(dep->name, dep->dwc->debug_root); } void dwc3_debugfs_init(struct dwc3 *dwc) @@ -911,6 +907,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc) dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START; root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root); + dwc->debug_root = root; debugfs_create_regset32("regdump", 0444, root, dwc->regset); debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops); @@ -929,6 +926,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc) void dwc3_debugfs_exit(struct dwc3 *dwc) { - debugfs_remove(debugfs_lookup(dev_name(dwc->dev), usb_debug_root)); + debugfs_lookup_and_remove(dev_name(dwc->dev), usb_debug_root); kfree(dwc->regset); } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9d0d9a4af9b3..5997d7f943fe 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -3188,9 +3188,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) list_del(&dep->endpoint.ep_list); } - debugfs_remove_recursive(debugfs_lookup(dep->name, - debugfs_lookup(dev_name(dep->dwc->dev), - usb_debug_root))); + dwc3_debugfs_remove_endpoint_dir(dep); kfree(dep); } } diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 76cb60d13049..1460142fbc42 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -483,11 +483,68 @@ UVC_ATTR_RO(uvcg_default_output_, cname, aname) UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8); UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16); UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8); -UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8); UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8); #undef UVCG_DEFAULT_OUTPUT_ATTR +static ssize_t uvcg_default_output_b_source_id_show(struct config_item *item, + char *page) +{ + struct config_group *group = to_config_group(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct uvc_output_terminal_descriptor *cd; + int result; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = group->cg_item.ci_parent->ci_parent-> + ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + cd = &opts->uvc_output_terminal; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u\n", le8_to_cpu(cd->bSourceID)); + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return result; +} + +static ssize_t uvcg_default_output_b_source_id_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &group->cg_subsys->su_mutex; + struct uvc_output_terminal_descriptor *cd; + int result; + u8 num; + + result = kstrtou8(page, 0, &num); + if (result) + return result; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = group->cg_item.ci_parent->ci_parent-> + ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + cd = &opts->uvc_output_terminal; + + mutex_lock(&opts->lock); + cd->bSourceID = num; + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return len; +} +UVC_ATTR(uvcg_default_output_, b_source_id, bSourceID); + static struct configfs_attribute *uvcg_default_output_attrs[] = { &uvcg_default_output_attr_b_terminal_id, &uvcg_default_output_attr_w_terminal_type, diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c index d04d72f5816e..8d5892891300 100644 --- a/drivers/usb/gadget/udc/bcm63xx_udc.c +++ b/drivers/usb/gadget/udc/bcm63xx_udc.c @@ -2258,7 +2258,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc) */ static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc) { - debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root)); + debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root); } /*********************************************************************** diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 85cdc0af3bf9..09762559912d 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -215,7 +215,7 @@ static void gr_dfs_create(struct gr_udc *dev) static void gr_dfs_delete(struct gr_udc *dev) { - debugfs_remove(debugfs_lookup(dev_name(dev->dev), usb_debug_root)); + debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root); } #else /* !CONFIG_USB_GADGET_DEBUG_FS */ diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index cea10cdb83ae..fe62db32dd0e 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -532,7 +532,7 @@ static void create_debug_file(struct lpc32xx_udc *udc) static void remove_debug_file(struct lpc32xx_udc *udc) { - debugfs_remove(debugfs_lookup(debug_filename, NULL)); + debugfs_lookup_and_remove(debug_filename, NULL); } #else diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c index c593fc383481..9e01ddf2b417 100644 --- a/drivers/usb/gadget/udc/pxa25x_udc.c +++ b/drivers/usb/gadget/udc/pxa25x_udc.c @@ -1340,7 +1340,7 @@ DEFINE_SHOW_ATTRIBUTE(udc_debug); debugfs_create_file(dev->gadget.name, \ S_IRUGO, NULL, dev, &udc_debug_fops); \ } while (0) -#define remove_debug_files(dev) debugfs_remove(debugfs_lookup(dev->gadget.name, NULL)) +#define remove_debug_files(dev) debugfs_lookup_and_remove(dev->gadget.name, NULL) #else /* !CONFIG_USB_GADGET_DEBUG_FILES */ diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c index ac980d6a4740..0ecdfd2ba9e9 100644 --- a/drivers/usb/gadget/udc/pxa27x_udc.c +++ b/drivers/usb/gadget/udc/pxa27x_udc.c @@ -215,7 +215,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc) static void pxa_cleanup_debugfs(struct pxa_udc *udc) { - debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root)); + debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root); } #else diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 3d1dbcf4c073..c4c1fbc12b4c 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -862,7 +862,7 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210) { struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self; - debugfs_remove(debugfs_lookup(bus->bus_name, fotg210_debug_root)); + debugfs_lookup_and_remove(bus->bus_name, fotg210_debug_root); } /* handshake - spin reading hc until handshake completes or fails diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c index 4f564d71bb0b..49ae01487af4 100644 --- a/drivers/usb/host/isp116x-hcd.c +++ b/drivers/usb/host/isp116x-hcd.c @@ -1205,7 +1205,7 @@ static void create_debug_file(struct isp116x *isp116x) static void remove_debug_file(struct isp116x *isp116x) { - debugfs_remove(debugfs_lookup(hcd_name, usb_debug_root)); + debugfs_lookup_and_remove(hcd_name, usb_debug_root); } #else diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 0e14d1d07709..b0da143ef4be 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -2170,7 +2170,7 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd) static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) { - debugfs_remove(debugfs_lookup("isp1362", usb_debug_root)); + debugfs_lookup_and_remove("isp1362", usb_debug_root); } /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index d206bd95c7bb..b8b90eec9107 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -1501,7 +1501,7 @@ static void create_debug_file(struct sl811 *sl811) static void remove_debug_file(struct sl811 *sl811) { - debugfs_remove(debugfs_lookup("sl811h", usb_debug_root)); + debugfs_lookup_and_remove("sl811h", usb_debug_root); } /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index c22b51af83fc..7cdc2fa7c28f 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -536,8 +536,8 @@ static void release_uhci(struct uhci_hcd *uhci) uhci->is_initialized = 0; spin_unlock_irq(&uhci->lock); - debugfs_remove(debugfs_lookup(uhci_to_hcd(uhci)->self.bus_name, - uhci_debugfs_root)); + debugfs_lookup_and_remove(uhci_to_hcd(uhci)->self.bus_name, + uhci_debugfs_root); for (i = 0; i < UHCI_NUM_SKELQH; i++) uhci_free_qh(uhci, uhci->skelqh[i]); @@ -700,7 +700,7 @@ err_alloc_frame_cpu: uhci->frame, uhci->frame_dma_handle); err_alloc_frame: - debugfs_remove(debugfs_lookup(hcd->self.bus_name, uhci_debugfs_root)); + debugfs_lookup_and_remove(hcd->self.bus_name, uhci_debugfs_root); return retval; } diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c index 60651a50770f..87f1597a0e5a 100644 --- a/drivers/usb/host/xhci-mvebu.c +++ b/drivers/usb/host/xhci-mvebu.c @@ -32,7 +32,7 @@ static void xhci_mvebu_mbus_config(void __iomem *base, /* Program each DRAM CS in a seperate window */ for (win = 0; win < dram->num_cs; win++) { - const struct mbus_dram_window *cs = dram->cs + win; + const struct mbus_dram_window *cs = &dram->cs[win]; writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 6012603f3630..97c66c0d91f4 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -939,7 +939,7 @@ static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageDa struct ms_lib_type_extdat ExtraData; struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; - PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL); + PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL); if (PageBuffer == NULL) return (u32)-1; diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 3e4486bfa0b7..3ec5ca3aefe1 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -10,11 +10,6 @@ #include "ifcvf_base.h" -struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw) -{ - return container_of(hw, struct ifcvf_adapter, vf); -} - u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector) { struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; @@ -37,8 +32,6 @@ u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector) static void __iomem *get_cap_addr(struct ifcvf_hw *hw, struct virtio_pci_cap *cap) { - struct ifcvf_adapter *ifcvf; - struct pci_dev *pdev; u32 length, offset; u8 bar; @@ -46,17 +39,14 @@ static void __iomem *get_cap_addr(struct ifcvf_hw *hw, offset = le32_to_cpu(cap->offset); bar = cap->bar; - ifcvf= vf_to_adapter(hw); - pdev = ifcvf->pdev; - if (bar >= IFCVF_PCI_MAX_RESOURCE) { - IFCVF_DBG(pdev, + IFCVF_DBG(hw->pdev, "Invalid bar number %u to get capabilities\n", bar); return NULL; } - if (offset + length > pci_resource_len(pdev, bar)) { - IFCVF_DBG(pdev, + if (offset + length > pci_resource_len(hw->pdev, bar)) { + IFCVF_DBG(hw->pdev, "offset(%u) + len(%u) overflows bar%u's capability\n", offset, length, bar); return NULL; @@ -92,6 +82,7 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) IFCVF_ERR(pdev, "Failed to read PCI capability list\n"); return -EIO; } + hw->pdev = pdev; while (pos) { ret = ifcvf_read_config_range(pdev, (u32 *)&cap, @@ -220,10 +211,8 @@ u64 ifcvf_get_features(struct ifcvf_hw *hw) int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features) { - struct ifcvf_adapter *ifcvf = vf_to_adapter(hw); - if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { - IFCVF_ERR(ifcvf->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n"); + IFCVF_ERR(hw->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n"); return -EINVAL; } @@ -232,13 +221,11 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features) u32 ifcvf_get_config_size(struct ifcvf_hw *hw) { - struct ifcvf_adapter *adapter; u32 net_config_size = sizeof(struct virtio_net_config); u32 blk_config_size = sizeof(struct virtio_blk_config); u32 cap_size = hw->cap_dev_config_size; u32 config_size; - adapter = vf_to_adapter(hw); /* If the onboard device config space size is greater than * the size of struct virtio_net/blk_config, only the spec * implementing contents size is returned, this is very @@ -253,7 +240,7 @@ u32 ifcvf_get_config_size(struct ifcvf_hw *hw) break; default: config_size = 0; - IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type); + IFCVF_ERR(hw->pdev, "VIRTIO ID %u not supported\n", hw->dev_type); } return config_size; @@ -301,14 +288,11 @@ static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features) static int ifcvf_config_features(struct ifcvf_hw *hw) { - struct ifcvf_adapter *ifcvf; - - ifcvf = vf_to_adapter(hw); ifcvf_set_features(hw, hw->req_features); ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK); if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) { - IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n"); + IFCVF_ERR(hw->pdev, "Failed to set FEATURES_OK status\n"); return -EIO; } diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index f5563f665cc6..25bd4e927b27 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -39,7 +39,7 @@ #define IFCVF_INFO(pdev, fmt, ...) dev_info(&pdev->dev, fmt, ##__VA_ARGS__) #define ifcvf_private_to_vf(adapter) \ - (&((struct ifcvf_adapter *)adapter)->vf) + (((struct ifcvf_adapter *)adapter)->vf) /* all vqs and config interrupt has its own vector */ #define MSIX_VECTOR_PER_VQ_AND_CONFIG 1 @@ -89,12 +89,13 @@ struct ifcvf_hw { u16 nr_vring; /* VIRTIO_PCI_CAP_DEVICE_CFG size */ u32 cap_dev_config_size; + struct pci_dev *pdev; }; struct ifcvf_adapter { struct vdpa_device vdpa; struct pci_dev *pdev; - struct ifcvf_hw vf; + struct ifcvf_hw *vf; }; struct ifcvf_vring_lm_cfg { @@ -109,6 +110,7 @@ struct ifcvf_lm_cfg { struct ifcvf_vdpa_mgmt_dev { struct vdpa_mgmt_dev mdev; + struct ifcvf_hw vf; struct ifcvf_adapter *adapter; struct pci_dev *pdev; }; diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index 44b29289aa19..d5036f49f161 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -69,10 +69,9 @@ static void ifcvf_free_irq_vectors(void *data) pci_free_irq_vectors(data); } -static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter) +static void ifcvf_free_per_vq_irq(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; + struct pci_dev *pdev = vf->pdev; int i; for (i = 0; i < vf->nr_vring; i++) { @@ -83,10 +82,9 @@ static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter) } } -static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter) +static void ifcvf_free_vqs_reused_irq(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; + struct pci_dev *pdev = vf->pdev; if (vf->vqs_reused_irq != -EINVAL) { devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf); @@ -95,20 +93,17 @@ static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter) } -static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter) +static void ifcvf_free_vq_irq(struct ifcvf_hw *vf) { - struct ifcvf_hw *vf = &adapter->vf; - if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) - ifcvf_free_per_vq_irq(adapter); + ifcvf_free_per_vq_irq(vf); else - ifcvf_free_vqs_reused_irq(adapter); + ifcvf_free_vqs_reused_irq(vf); } -static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter) +static void ifcvf_free_config_irq(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; + struct pci_dev *pdev = vf->pdev; if (vf->config_irq == -EINVAL) return; @@ -123,12 +118,12 @@ static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter) } } -static void ifcvf_free_irq(struct ifcvf_adapter *adapter) +static void ifcvf_free_irq(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; + struct pci_dev *pdev = vf->pdev; - ifcvf_free_vq_irq(adapter); - ifcvf_free_config_irq(adapter); + ifcvf_free_vq_irq(vf); + ifcvf_free_config_irq(vf); ifcvf_free_irq_vectors(pdev); } @@ -137,10 +132,9 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter) * It returns the number of allocated vectors, negative * return value when fails. */ -static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter) +static int ifcvf_alloc_vectors(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; + struct pci_dev *pdev = vf->pdev; int max_intr, ret; /* all queues and config interrupt */ @@ -160,10 +154,9 @@ static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter) return ret; } -static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter) +static int ifcvf_request_per_vq_irq(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; + struct pci_dev *pdev = vf->pdev; int i, vector, ret, irq; vf->vqs_reused_irq = -EINVAL; @@ -190,15 +183,14 @@ static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter) return 0; err: - ifcvf_free_irq(adapter); + ifcvf_free_irq(vf); return -EFAULT; } -static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter) +static int ifcvf_request_vqs_reused_irq(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; + struct pci_dev *pdev = vf->pdev; int i, vector, ret, irq; vector = 0; @@ -224,15 +216,14 @@ static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter) return 0; err: - ifcvf_free_irq(adapter); + ifcvf_free_irq(vf); return -EFAULT; } -static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter) +static int ifcvf_request_dev_irq(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; + struct pci_dev *pdev = vf->pdev; int i, vector, ret, irq; vector = 0; @@ -265,29 +256,27 @@ static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter) return 0; err: - ifcvf_free_irq(adapter); + ifcvf_free_irq(vf); return -EFAULT; } -static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter) +static int ifcvf_request_vq_irq(struct ifcvf_hw *vf) { - struct ifcvf_hw *vf = &adapter->vf; int ret; if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) - ret = ifcvf_request_per_vq_irq(adapter); + ret = ifcvf_request_per_vq_irq(vf); else - ret = ifcvf_request_vqs_reused_irq(adapter); + ret = ifcvf_request_vqs_reused_irq(vf); return ret; } -static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter) +static int ifcvf_request_config_irq(struct ifcvf_hw *vf) { - struct pci_dev *pdev = adapter->pdev; - struct ifcvf_hw *vf = &adapter->vf; + struct pci_dev *pdev = vf->pdev; int config_vector, ret; if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG) @@ -320,17 +309,16 @@ static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter) return 0; err: - ifcvf_free_irq(adapter); + ifcvf_free_irq(vf); return -EFAULT; } -static int ifcvf_request_irq(struct ifcvf_adapter *adapter) +static int ifcvf_request_irq(struct ifcvf_hw *vf) { - struct ifcvf_hw *vf = &adapter->vf; int nvectors, ret, max_intr; - nvectors = ifcvf_alloc_vectors(adapter); + nvectors = ifcvf_alloc_vectors(vf); if (nvectors <= 0) return -EFAULT; @@ -341,16 +329,16 @@ static int ifcvf_request_irq(struct ifcvf_adapter *adapter) if (nvectors == 1) { vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED; - ret = ifcvf_request_dev_irq(adapter); + ret = ifcvf_request_dev_irq(vf); return ret; } - ret = ifcvf_request_vq_irq(adapter); + ret = ifcvf_request_vq_irq(vf); if (ret) return ret; - ret = ifcvf_request_config_irq(adapter); + ret = ifcvf_request_config_irq(vf); if (ret) return ret; @@ -414,7 +402,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev) { struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); - return &adapter->vf; + return adapter->vf; } static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev) @@ -479,7 +467,7 @@ static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) { - ret = ifcvf_request_irq(adapter); + ret = ifcvf_request_irq(vf); if (ret) { status = ifcvf_get_status(vf); status |= VIRTIO_CONFIG_S_FAILED; @@ -511,7 +499,7 @@ static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev) if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) { ifcvf_stop_datapath(adapter); - ifcvf_free_irq(adapter); + ifcvf_free_irq(vf); } ifcvf_reset_vring(adapter); @@ -758,12 +746,20 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, int ret; ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev); - if (!ifcvf_mgmt_dev->adapter) - return -EOPNOTSUPP; + vf = &ifcvf_mgmt_dev->vf; + pdev = vf->pdev; + adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, + &pdev->dev, &ifc_vdpa_ops, 1, 1, NULL, false); + if (IS_ERR(adapter)) { + IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); + return PTR_ERR(adapter); + } - adapter = ifcvf_mgmt_dev->adapter; - vf = &adapter->vf; - pdev = adapter->pdev; + ifcvf_mgmt_dev->adapter = adapter; + adapter->pdev = pdev; + adapter->vdpa.dma_dev = &pdev->dev; + adapter->vdpa.mdev = mdev; + adapter->vf = vf; vdpa_dev = &adapter->vdpa; if (name) @@ -781,7 +777,6 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name, return 0; } - static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev) { struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; @@ -800,7 +795,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev; struct device *dev = &pdev->dev; - struct ifcvf_adapter *adapter; struct ifcvf_hw *vf; u32 dev_type; int ret, i; @@ -831,20 +825,16 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } pci_set_master(pdev); - - adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, - dev, &ifc_vdpa_ops, 1, 1, NULL, false); - if (IS_ERR(adapter)) { - IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); - return PTR_ERR(adapter); + ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL); + if (!ifcvf_mgmt_dev) { + IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n"); + return -ENOMEM; } - vf = &adapter->vf; + vf = &ifcvf_mgmt_dev->vf; vf->dev_type = get_dev_type(pdev); vf->base = pcim_iomap_table(pdev); - - adapter->pdev = pdev; - adapter->vdpa.dma_dev = &pdev->dev; + vf->pdev = pdev; ret = ifcvf_init_hw(vf, pdev); if (ret) { @@ -858,16 +848,6 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) vf->hw_features = ifcvf_get_hw_features(vf); vf->config_size = ifcvf_get_config_size(vf); - ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL); - if (!ifcvf_mgmt_dev) { - IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n"); - return -ENOMEM; - } - - ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops; - ifcvf_mgmt_dev->mdev.device = dev; - ifcvf_mgmt_dev->adapter = adapter; - dev_type = get_dev_type(pdev); switch (dev_type) { case VIRTIO_ID_NET: @@ -882,12 +862,11 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err; } + ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops; + ifcvf_mgmt_dev->mdev.device = dev; ifcvf_mgmt_dev->mdev.max_supported_vqs = vf->nr_vring; ifcvf_mgmt_dev->mdev.supported_features = vf->hw_features; - adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev; - - ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev); if (ret) { IFCVF_ERR(pdev, diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c index 292b5a1ca831..fed7be246442 100644 --- a/drivers/watchdog/at91sam9_wdt.c +++ b/drivers/watchdog/at91sam9_wdt.c @@ -206,10 +206,9 @@ static int at91_wdt_init(struct platform_device *pdev, struct at91wdt *wdt) "min heartbeat and max heartbeat might be too close for the system to handle it correctly\n"); if ((tmp & AT91_WDT_WDFIEN) && wdt->irq) { - err = request_irq(wdt->irq, wdt_interrupt, - IRQF_SHARED | IRQF_IRQPOLL | - IRQF_NO_SUSPEND, - pdev->name, wdt); + err = devm_request_irq(dev, wdt->irq, wdt_interrupt, + IRQF_SHARED | IRQF_IRQPOLL | IRQF_NO_SUSPEND, + pdev->name, wdt); if (err) return err; } diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 1bdaf17c1d38..8202f0a6b093 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -325,7 +325,8 @@ static int usb_pcwd_set_heartbeat(struct usb_pcwd_private *usb_pcwd, int t) static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd, int *temperature) { - unsigned char msb, lsb; + unsigned char msb = 0x00; + unsigned char lsb = 0x00; usb_pcwd_send_command(usb_pcwd, CMD_READ_TEMP, &msb, &lsb); @@ -341,7 +342,8 @@ static int usb_pcwd_get_temperature(struct usb_pcwd_private *usb_pcwd, static int usb_pcwd_get_timeleft(struct usb_pcwd_private *usb_pcwd, int *time_left) { - unsigned char msb, lsb; + unsigned char msb = 0x00; + unsigned char lsb = 0x00; /* Read the time that's left before rebooting */ /* Note: if the board is not yet armed then we will read 0xFFFF */ diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c index 974a4194a8fd..d404953d0e0f 100644 --- a/drivers/watchdog/rzg2l_wdt.c +++ b/drivers/watchdog/rzg2l_wdt.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,8 @@ #define F2CYCLE_NSEC(f) (1000000000 / (f)) +#define RZV2M_A_NSEC 730 + static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" @@ -51,11 +54,35 @@ struct rzg2l_wdt_priv { struct reset_control *rstc; unsigned long osc_clk_rate; unsigned long delay; + unsigned long minimum_assertion_period; struct clk *pclk; struct clk *osc_clk; enum rz_wdt_type devtype; }; +static int rzg2l_wdt_reset(struct rzg2l_wdt_priv *priv) +{ + int err, status; + + if (priv->devtype == WDT_RZV2M) { + /* WDT needs TYPE-B reset control */ + err = reset_control_assert(priv->rstc); + if (err) + return err; + ndelay(priv->minimum_assertion_period); + err = reset_control_deassert(priv->rstc); + if (err) + return err; + err = read_poll_timeout(reset_control_status, status, + status != 1, 0, 1000, false, + priv->rstc); + } else { + err = reset_control_reset(priv->rstc); + } + + return err; +} + static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv) { /* delay timer when change the setting register */ @@ -115,25 +142,23 @@ static int rzg2l_wdt_stop(struct watchdog_device *wdev) { struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); + rzg2l_wdt_reset(priv); pm_runtime_put(wdev->parent); - reset_control_reset(priv->rstc); return 0; } static int rzg2l_wdt_set_timeout(struct watchdog_device *wdev, unsigned int timeout) { - struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev); - wdev->timeout = timeout; /* * If the watchdog is active, reset the module for updating the WDTSET - * register so that it is updated with new timeout values. + * register by calling rzg2l_wdt_stop() (which internally calls reset_control_reset() + * to reset the module) so that it is updated with new timeout values. */ if (watchdog_active(wdev)) { - pm_runtime_put(wdev->parent); - reset_control_reset(priv->rstc); + rzg2l_wdt_stop(wdev); rzg2l_wdt_start(wdev); } @@ -156,6 +181,7 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev, rzg2l_wdt_write(priv, PEEN_FORCE, PEEN); } else { /* RZ/V2M doesn't have parity error registers */ + rzg2l_wdt_reset(priv); wdev->timeout = 0; @@ -253,6 +279,13 @@ static int rzg2l_wdt_probe(struct platform_device *pdev) priv->devtype = (uintptr_t)of_device_get_match_data(dev); + if (priv->devtype == WDT_RZV2M) { + priv->minimum_assertion_period = RZV2M_A_NSEC + + 3 * F2CYCLE_NSEC(pclk_rate) + 5 * + max(F2CYCLE_NSEC(priv->osc_clk_rate), + F2CYCLE_NSEC(pclk_rate)); + } + pm_runtime_enable(&pdev->dev); priv->wdev.info = &rzg2l_wdt_ident; diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c index 9791c74aebd4..63862803421f 100644 --- a/drivers/watchdog/sbsa_gwdt.c +++ b/drivers/watchdog/sbsa_gwdt.c @@ -150,6 +150,7 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd, struct sbsa_gwdt *gwdt = watchdog_get_drvdata(wdd); wdd->timeout = timeout; + timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000); if (action) sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt); diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 55574ed42504..fdffa6859dde 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -1061,8 +1061,8 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) if (wdd->id == 0) { misc_deregister(&watchdog_miscdev); old_wd_data = NULL; - put_device(&wd_data->dev); } + put_device(&wd_data->dev); return err; } diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index deebc8ddbd93..4b69945755e4 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1616,7 +1616,8 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) btrfs_info(fs_info, "reclaiming chunk %llu with %llu%% used %llu%% unusable", - bg->start, div_u64(bg->used * 100, bg->length), + bg->start, + div64_u64(bg->used * 100, bg->length), div64_u64(zone_unusable * 100, bg->length)); trace_btrfs_reclaim_block_group(bg); ret = btrfs_relocate_chunk(fs_info, bg->start); diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 6092a4eedc92..b8ae02aa632e 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -760,7 +760,13 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, goto next; } + flags = em->flags; clear_bit(EXTENT_FLAG_PINNED, &em->flags); + /* + * In case we split the extent map, we want to preserve the + * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want + * it on the new extent maps. + */ clear_bit(EXTENT_FLAG_LOGGING, &flags); modified = !list_empty(&em->list); @@ -771,7 +777,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, if (em->start >= start && em_end <= end) goto remove_em; - flags = em->flags; gen = em->generation; compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 05f9cbbf6e1e..f02b8cbd6ec4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6739,7 +6739,7 @@ static void btrfs_log_dev_io_error(struct bio *bio, struct btrfs_device *dev) if (btrfs_op(bio) == BTRFS_MAP_WRITE) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); - if (!(bio->bi_opf & REQ_RAHEAD)) + else if (!(bio->bi_opf & REQ_RAHEAD)) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS); if (bio->bi_opf & REQ_PREFLUSH) btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_FLUSH_ERRS); diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index eb1a0de9dd55..bc4475f6c082 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -664,11 +664,21 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, int match_target_ip(struct TCP_Server_Info *server, const char *share, size_t share_len, bool *result); - -int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid, - struct cifs_tcon *tcon, - struct cifs_sb_info *cifs_sb, - const char *dfs_link_path); +int cifs_inval_name_dfs_link_error(const unsigned int xid, + struct cifs_tcon *tcon, + struct cifs_sb_info *cifs_sb, + const char *full_path, + bool *islink); +#else +static inline int cifs_inval_name_dfs_link_error(const unsigned int xid, + struct cifs_tcon *tcon, + struct cifs_sb_info *cifs_sb, + const char *full_path, + bool *islink) +{ + *islink = false; + return 0; +} #endif static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options) diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 062175994e87..4e54736a0699 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -21,6 +21,7 @@ #include "cifsfs.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dns_resolve.h" +#include "dfs_cache.h" #endif #include "fs_context.h" #include "cached_dir.h" @@ -1314,4 +1315,70 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; return 0; } + +/* + * Handle weird Windows SMB server behaviour. It responds with + * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for + * "\\\" DFS reference, where contains + * non-ASCII unicode symbols. + */ +int cifs_inval_name_dfs_link_error(const unsigned int xid, + struct cifs_tcon *tcon, + struct cifs_sb_info *cifs_sb, + const char *full_path, + bool *islink) +{ + struct cifs_ses *ses = tcon->ses; + size_t len; + char *path; + char *ref_path; + + *islink = false; + + /* + * Fast path - skip check when @full_path doesn't have a prefix path to + * look up or tcon is not DFS. + */ + if (strlen(full_path) < 2 || !cifs_sb || + (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) || + !is_tcon_dfs(tcon) || !ses->server->origin_fullpath) + return 0; + + /* + * Slow path - tcon is DFS and @full_path has prefix path, so attempt + * to get a referral to figure out whether it is an DFS link. + */ + len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1; + path = kmalloc(len, GFP_KERNEL); + if (!path) + return -ENOMEM; + + scnprintf(path, len, "%s%s", tcon->tree_name, full_path); + ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls, + cifs_remap(cifs_sb)); + kfree(path); + + if (IS_ERR(ref_path)) { + if (PTR_ERR(ref_path) != -EINVAL) + return PTR_ERR(ref_path); + } else { + struct dfs_info3_param *refs = NULL; + int num_refs = 0; + + /* + * XXX: we are not using dfs_cache_find() here because we might + * end filling all the DFS cache and thus potentially + * removing cached DFS targets that the client would eventually + * need during failover. + */ + if (ses->server->ops->get_dfs_refer && + !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs, + &num_refs, cifs_sb->local_nls, + cifs_remap(cifs_sb))) + *islink = refs[0].server_type == DFS_TYPE_LINK; + free_dfs_info_array(refs, num_refs); + kfree(ref_path); + } + return 0; +} #endif diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index e1491440e8f1..442718cf61b8 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -511,12 +511,13 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, struct cifs_open_info_data *data, bool *adjust_tz, bool *reparse) { - int rc; __u32 create_options = 0; struct cifsFileInfo *cfile; struct cached_fid *cfid = NULL; struct kvec err_iov[3] = {}; int err_buftype[3] = {}; + bool islink; + int rc, rc2; *adjust_tz = false; *reparse = false; @@ -563,15 +564,15 @@ int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, create_options, ACL_NO_MODE, data, SMB2_OP_QUERY_INFO, cfile, NULL, NULL); goto out; - } else if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && - hdr->Status == STATUS_OBJECT_NAME_INVALID) { - /* - * Handle weird Windows SMB server behaviour. It responds with - * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request - * for "\\\" DFS reference, - * where contains non-ASCII unicode symbols. - */ - rc = -EREMOTE; + } else if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) { + rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb, + full_path, &islink); + if (rc2) { + rc = rc2; + goto out; + } + if (islink) + rc = -EREMOTE; } if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 6da495f593e1..0424876d22e5 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -796,7 +796,6 @@ static int smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path) { - int rc; __le16 *utf16_path; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; int err_buftype = CIFS_NO_BUFFER; @@ -804,6 +803,8 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, struct kvec err_iov = {}; struct cifs_fid fid; struct cached_fid *cfid; + bool islink; + int rc, rc2; rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid); if (!rc) { @@ -833,15 +834,17 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, if (unlikely(!hdr || err_buftype == CIFS_NO_BUFFER)) goto out; - /* - * Handle weird Windows SMB server behaviour. It responds with - * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request - * for "\\\" DFS reference, - * where contains non-ASCII unicode symbols. - */ - if (rc != -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && - hdr->Status == STATUS_OBJECT_NAME_INVALID) - rc = -EREMOTE; + + if (rc != -EREMOTE && hdr->Status == STATUS_OBJECT_NAME_INVALID) { + rc2 = cifs_inval_name_dfs_link_error(xid, tcon, cifs_sb, + full_path, &islink); + if (rc2) { + rc = rc2; + goto out; + } + if (islink) + rc = -EREMOTE; + } if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)) rc = -EOPNOTSUPP; diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 94a72ede5764..0b1bc24536ce 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -3611,9 +3611,10 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb, /* further lowcomms enhancements or alternate implementations may make the return value from this function useful at some point */ -static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms) +static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms, + const void *name, int namelen) { - dlm_midcomms_commit_mhandle(mh); + dlm_midcomms_commit_mhandle(mh, name, namelen); return 0; } @@ -3679,7 +3680,7 @@ static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype) send_args(r, lkb, ms); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); if (error) goto fail; return 0; @@ -3742,7 +3743,7 @@ static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb) ms->m_result = 0; - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } @@ -3763,7 +3764,7 @@ static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode) ms->m_bastmode = cpu_to_le32(mode); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } @@ -3786,7 +3787,7 @@ static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb) send_args(r, lkb, ms); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); if (error) goto fail; return 0; @@ -3811,7 +3812,7 @@ static int send_remove(struct dlm_rsb *r) memcpy(ms->m_extra, r->res_name, r->res_length); ms->m_hash = cpu_to_le32(r->res_hash); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } @@ -3833,7 +3834,7 @@ static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, ms->m_result = cpu_to_le32(to_dlm_errno(rv)); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } @@ -3874,7 +3875,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in, ms->m_result = cpu_to_le32(to_dlm_errno(rv)); ms->m_nodeid = cpu_to_le32(ret_nodeid); - error = send_message(mh, ms); + error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in)); out: return error; } @@ -4044,66 +4045,6 @@ out: return error; } -static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len) -{ - char name[DLM_RESNAME_MAXLEN + 1]; - struct dlm_message *ms; - struct dlm_mhandle *mh; - struct dlm_rsb *r; - uint32_t hash, b; - int rv, dir_nodeid; - - memset(name, 0, sizeof(name)); - memcpy(name, ms_name, len); - - hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - - dir_nodeid = dlm_hash2nodeid(ls, hash); - - log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name); - - spin_lock(&ls->ls_rsbtbl[b].lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); - if (!rv) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_error(ls, "repeat_remove on keep %s", name); - return; - } - - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (!rv) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_error(ls, "repeat_remove on toss %s", name); - return; - } - - /* use ls->remove_name2 to avoid conflict with shrink? */ - - spin_lock(&ls->ls_remove_spin); - ls->ls_remove_len = len; - memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN); - spin_unlock(&ls->ls_remove_spin); - spin_unlock(&ls->ls_rsbtbl[b].lock); - - rv = _create_message(ls, sizeof(struct dlm_message) + len, - dir_nodeid, DLM_MSG_REMOVE, &ms, &mh); - if (rv) - goto out; - - memcpy(ms->m_extra, name, len); - ms->m_hash = cpu_to_le32(hash); - - send_message(mh, ms); - -out: - spin_lock(&ls->ls_remove_spin); - ls->ls_remove_len = 0; - memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); - spin_unlock(&ls->ls_remove_spin); - wake_up(&ls->ls_remove_wait); -} - static int receive_request(struct dlm_ls *ls, struct dlm_message *ms) { struct dlm_lkb *lkb; @@ -4173,25 +4114,11 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms) ENOTBLK request failures when the lookup reply designating us as master is delayed. */ - /* We could repeatedly return -EBADR here if our send_remove() is - delayed in being sent/arriving/being processed on the dir node. - Another node would repeatedly lookup up the master, and the dir - node would continue returning our nodeid until our send_remove - took effect. - - We send another remove message in case our previous send_remove - was lost/ignored/missed somehow. */ - if (error != -ENOTBLK) { log_limit(ls, "receive_request %x from %d %d", le32_to_cpu(ms->m_lkid), from_nodeid, error); } - if (namelen && error == -EBADR) { - send_repeat_remove(ls, ms->m_extra, namelen); - msleep(1000); - } - setup_stub_lkb(ls, ms); send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); return error; @@ -6374,7 +6301,7 @@ static int send_purge(struct dlm_ls *ls, int nodeid, int pid) ms->m_nodeid = cpu_to_le32(nodeid); ms->m_pid = cpu_to_le32(pid); - return send_message(mh, ms); + return send_message(mh, ms, NULL, 0); } int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index bae050df7abf..7b29ea7bfb41 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -17,7 +17,6 @@ #include "recoverd.h" #include "dir.h" #include "midcomms.h" -#include "lowcomms.h" #include "config.h" #include "memory.h" #include "lock.h" @@ -382,23 +381,23 @@ static int threads_start(void) { int error; - error = dlm_scand_start(); - if (error) { - log_print("cannot start dlm_scand thread %d", error); - goto fail; - } - /* Thread for sending/receiving messages for all lockspace's */ error = dlm_midcomms_start(); if (error) { - log_print("cannot start dlm lowcomms %d", error); - goto scand_fail; + log_print("cannot start dlm midcomms %d", error); + goto fail; + } + + error = dlm_scand_start(); + if (error) { + log_print("cannot start dlm_scand thread %d", error); + goto midcomms_fail; } return 0; - scand_fail: - dlm_scand_stop(); + midcomms_fail: + dlm_midcomms_stop(); fail: return error; } @@ -726,7 +725,7 @@ static int __dlm_new_lockspace(const char *name, const char *cluster, if (!ls_count) { dlm_scand_stop(); dlm_midcomms_shutdown(); - dlm_lowcomms_stop(); + dlm_midcomms_stop(); } out: mutex_unlock(&ls_lock); @@ -929,7 +928,7 @@ int dlm_release_lockspace(void *lockspace, int force) if (!error) ls_count--; if (!ls_count) - dlm_lowcomms_stop(); + dlm_midcomms_stop(); mutex_unlock(&ls_lock); return error; diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 871d4e9f49fb..6ed09edabea0 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1982,10 +1982,6 @@ static const struct dlm_proto_ops dlm_sctp_ops = { int dlm_lowcomms_start(void) { int error = -EINVAL; - int i; - - for (i = 0; i < CONN_HASH_SIZE; i++) - INIT_HLIST_HEAD(&connection_hash[i]); init_local(); if (!dlm_local_count) { @@ -1994,8 +1990,6 @@ int dlm_lowcomms_start(void) goto fail; } - INIT_WORK(&listen_con.rwork, process_listen_recv_socket); - error = work_start(); if (error) goto fail_local; @@ -2034,6 +2028,16 @@ fail: return error; } +void dlm_lowcomms_init(void) +{ + int i; + + for (i = 0; i < CONN_HASH_SIZE; i++) + INIT_HLIST_HEAD(&connection_hash[i]); + + INIT_WORK(&listen_con.rwork, process_listen_recv_socket); +} + void dlm_lowcomms_exit(void) { struct dlm_node_addr *na, *safe; diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h index 29369feea991..bbce7a18416d 100644 --- a/fs/dlm/lowcomms.h +++ b/fs/dlm/lowcomms.h @@ -35,6 +35,7 @@ extern int dlm_allow_conn; int dlm_lowcomms_start(void); void dlm_lowcomms_shutdown(void); void dlm_lowcomms_stop(void); +void dlm_lowcomms_init(void); void dlm_lowcomms_exit(void); int dlm_lowcomms_close(int nodeid); struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation, diff --git a/fs/dlm/main.c b/fs/dlm/main.c index 1c5be4b70ac1..a77338be3237 100644 --- a/fs/dlm/main.c +++ b/fs/dlm/main.c @@ -17,7 +17,7 @@ #include "user.h" #include "memory.h" #include "config.h" -#include "lowcomms.h" +#include "midcomms.h" #define CREATE_TRACE_POINTS #include @@ -30,6 +30,8 @@ static int __init init_dlm(void) if (error) goto out; + dlm_midcomms_init(); + error = dlm_lockspace_init(); if (error) goto out_mem; @@ -66,6 +68,7 @@ static int __init init_dlm(void) out_lockspace: dlm_lockspace_exit(); out_mem: + dlm_midcomms_exit(); dlm_memory_exit(); out: return error; @@ -79,7 +82,7 @@ static void __exit exit_dlm(void) dlm_config_exit(); dlm_memory_exit(); dlm_lockspace_exit(); - dlm_lowcomms_exit(); + dlm_midcomms_exit(); dlm_unregister_debugfs(); } diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c index 546c52c46b1c..b2a25a33a148 100644 --- a/fs/dlm/midcomms.c +++ b/fs/dlm/midcomms.c @@ -132,6 +132,7 @@ */ #define DLM_DEBUG_FENCE_TERMINATION 0 +#include #include #include "dlm_internal.h" @@ -194,7 +195,7 @@ struct midcomms_node { }; struct dlm_mhandle { - const struct dlm_header *inner_hd; + const union dlm_packet *inner_p; struct midcomms_node *node; struct dlm_opts *opts; struct dlm_msg *msg; @@ -405,6 +406,7 @@ static int dlm_send_fin(struct midcomms_node *node, if (!mh) return -ENOMEM; + set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags); mh->ack_rcv = ack_rcv; m_header = (struct dlm_header *)ppc; @@ -415,8 +417,7 @@ static int dlm_send_fin(struct midcomms_node *node, m_header->h_cmd = DLM_FIN; pr_debug("sending fin msg to node %d\n", node->nodeid); - dlm_midcomms_commit_mhandle(mh); - set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags); + dlm_midcomms_commit_mhandle(mh, NULL, 0); return 0; } @@ -468,12 +469,26 @@ static void dlm_pas_fin_ack_rcv(struct midcomms_node *node) spin_unlock(&node->state_lock); log_print("%s: unexpected state: %d\n", __func__, node->state); - WARN_ON(1); + WARN_ON_ONCE(1); return; } spin_unlock(&node->state_lock); } +static void dlm_receive_buffer_3_2_trace(uint32_t seq, union dlm_packet *p) +{ + switch (p->header.h_cmd) { + case DLM_MSG: + trace_dlm_recv_message(seq, &p->message); + break; + case DLM_RCOM: + trace_dlm_recv_rcom(seq, &p->rcom); + break; + default: + break; + } +} + static void dlm_midcomms_receive_buffer(union dlm_packet *p, struct midcomms_node *node, uint32_t seq) @@ -527,13 +542,14 @@ static void dlm_midcomms_receive_buffer(union dlm_packet *p, spin_unlock(&node->state_lock); log_print("%s: unexpected state: %d\n", __func__, node->state); - WARN_ON(1); + WARN_ON_ONCE(1); return; } spin_unlock(&node->state_lock); break; default: - WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); + WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); + dlm_receive_buffer_3_2_trace(seq, p); dlm_receive_buffer(p, node->nodeid); set_bit(DLM_NODE_ULP_DELIVERED, &node->flags); break; @@ -748,7 +764,7 @@ static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid) goto out; } - WARN_ON(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); + WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); dlm_receive_buffer(p, nodeid); break; case DLM_OPTS: @@ -1049,7 +1065,7 @@ static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int node dlm_fill_opts_header(opts, len, mh->seq); *ppc += sizeof(*opts); - mh->inner_hd = (const struct dlm_header *)*ppc; + mh->inner_p = (const union dlm_packet *)*ppc; return msg; } @@ -1073,7 +1089,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, } /* this is a bug, however we going on and hope it will be resolved */ - WARN_ON(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags)); + WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags)); mh = dlm_allocate_mhandle(); if (!mh) @@ -1105,7 +1121,7 @@ struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, break; default: dlm_free_mhandle(mh); - WARN_ON(1); + WARN_ON_ONCE(1); goto err; } @@ -1124,11 +1140,30 @@ err: } #endif -static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh) +static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh, + const void *name, int namelen) +{ + switch (mh->inner_p->header.h_cmd) { + case DLM_MSG: + trace_dlm_send_message(mh->seq, &mh->inner_p->message, + name, namelen); + break; + case DLM_RCOM: + trace_dlm_send_rcom(mh->seq, &mh->inner_p->rcom); + break; + default: + /* nothing to trace */ + break; + } +} + +static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh, + const void *name, int namelen) { /* nexthdr chain for fast lookup */ - mh->opts->o_nextcmd = mh->inner_hd->h_cmd; + mh->opts->o_nextcmd = mh->inner_p->header.h_cmd; mh->committed = true; + dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen); dlm_lowcomms_commit_msg(mh->msg); } @@ -1136,8 +1171,10 @@ static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh) * dlm_midcomms_get_mhandle */ #ifndef __CHECKER__ -void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh) +void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, + const void *name, int namelen) { + switch (mh->node->version) { case DLM_VERSION_3_1: srcu_read_unlock(&nodes_srcu, mh->idx); @@ -1148,25 +1185,47 @@ void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh) dlm_free_mhandle(mh); break; case DLM_VERSION_3_2: - dlm_midcomms_commit_msg_3_2(mh); + /* held rcu read lock here, because we sending the + * dlm message out, when we do that we could receive + * an ack back which releases the mhandle and we + * get a use after free. + */ + rcu_read_lock(); + dlm_midcomms_commit_msg_3_2(mh, name, namelen); srcu_read_unlock(&nodes_srcu, mh->idx); + rcu_read_unlock(); break; default: srcu_read_unlock(&nodes_srcu, mh->idx); - WARN_ON(1); + WARN_ON_ONCE(1); break; } } #endif int dlm_midcomms_start(void) +{ + return dlm_lowcomms_start(); +} + +void dlm_midcomms_stop(void) +{ + dlm_lowcomms_stop(); +} + +void dlm_midcomms_init(void) { int i; for (i = 0; i < CONN_HASH_SIZE; i++) INIT_HLIST_HEAD(&node_hash[i]); - return dlm_lowcomms_start(); + dlm_lowcomms_init(); +} + +void dlm_midcomms_exit(void) +{ + dlm_lowcomms_exit(); } static void dlm_act_fin_ack_rcv(struct midcomms_node *node) @@ -1195,7 +1254,7 @@ static void dlm_act_fin_ack_rcv(struct midcomms_node *node) spin_unlock(&node->state_lock); log_print("%s: unexpected state: %d\n", __func__, node->state); - WARN_ON(1); + WARN_ON_ONCE(1); return; } spin_unlock(&node->state_lock); @@ -1307,7 +1366,8 @@ static void midcomms_node_release(struct rcu_head *rcu) { struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu); - WARN_ON(atomic_read(&node->send_queue_cnt)); + WARN_ON_ONCE(atomic_read(&node->send_queue_cnt)); + dlm_send_queue_flush(node); kfree(node); } diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h index 82bcd9661922..69296552d5ad 100644 --- a/fs/dlm/midcomms.h +++ b/fs/dlm/midcomms.h @@ -17,9 +17,13 @@ struct midcomms_node; int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen); struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, gfp_t allocation, char **ppc); -void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh); +void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name, + int namelen); int dlm_midcomms_close(int nodeid); int dlm_midcomms_start(void); +void dlm_midcomms_stop(void); +void dlm_midcomms_init(void); +void dlm_midcomms_exit(void); void dlm_midcomms_shutdown(void); void dlm_midcomms_add_member(int nodeid); void dlm_midcomms_remove_member(int nodeid); diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c index f19860315043..b76d52e2f6bd 100644 --- a/fs/dlm/rcom.c +++ b/fs/dlm/rcom.c @@ -91,7 +91,7 @@ static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type, static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc) { - dlm_midcomms_commit_mhandle(mh); + dlm_midcomms_commit_mhandle(mh, NULL, 0); } static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc) @@ -516,7 +516,7 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) rf = (struct rcom_config *) rc->rc_buf; rf->rf_lvblen = cpu_to_le32(~0U); - dlm_midcomms_commit_mhandle(mh); + dlm_midcomms_commit_mhandle(mh, NULL, 0); return 0; } diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index 091fd5adf818..5cd612a8f858 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -278,7 +278,7 @@ again: } } if (no < nrpages_out && strm->buf.out) - kunmap(rq->in[no]); + kunmap(rq->out[no]); if (ni < nrpages_in) kunmap(rq->in[ni]); /* 4. push back LZMA stream context to the global list */ diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 3af123b7633d..bc4971ee26d2 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1172,12 +1172,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, if (!be->decompressed_pages) be->decompressed_pages = - kcalloc(be->nr_pages, sizeof(struct page *), - GFP_KERNEL | __GFP_NOFAIL); + kvcalloc(be->nr_pages, sizeof(struct page *), + GFP_KERNEL | __GFP_NOFAIL); if (!be->compressed_pages) be->compressed_pages = - kcalloc(pclusterpages, sizeof(struct page *), - GFP_KERNEL | __GFP_NOFAIL); + kvcalloc(pclusterpages, sizeof(struct page *), + GFP_KERNEL | __GFP_NOFAIL); z_erofs_parse_out_bvecs(be); err2 = z_erofs_parse_in_bvecs(be, &overlapped); @@ -1225,7 +1225,7 @@ out: } if (be->compressed_pages < be->onstack_pages || be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES) - kfree(be->compressed_pages); + kvfree(be->compressed_pages); z_erofs_fill_other_copies(be, err); for (i = 0; i < be->nr_pages; ++i) { @@ -1244,7 +1244,7 @@ out: } if (be->decompressed_pages != be->onstack_pages) - kfree(be->decompressed_pages); + kvfree(be->decompressed_pages); pcl->length = 0; pcl->partial = true; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4e739902dc03..a2bc440743ae 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1529,6 +1529,7 @@ struct ext4_sb_info { unsigned int s_mount_opt2; unsigned long s_mount_flags; unsigned int s_def_mount_opt; + unsigned int s_def_mount_opt2; ext4_fsblk_t s_sb_block; atomic64_t s_resv_clusters; kuid_t s_resuid; diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 7ed71c652f67..1110bfa0a5b7 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -1354,8 +1354,14 @@ struct dentry_info_args { char *dname; }; +/* Same as struct ext4_fc_tl, but uses native endianness fields */ +struct ext4_fc_tl_mem { + u16 fc_tag; + u16 fc_len; +}; + static inline void tl_to_darg(struct dentry_info_args *darg, - struct ext4_fc_tl *tl, u8 *val) + struct ext4_fc_tl_mem *tl, u8 *val) { struct ext4_fc_dentry_info fcd; @@ -1367,16 +1373,18 @@ static inline void tl_to_darg(struct dentry_info_args *darg, darg->dname_len = tl->fc_len - sizeof(struct ext4_fc_dentry_info); } -static inline void ext4_fc_get_tl(struct ext4_fc_tl *tl, u8 *val) +static inline void ext4_fc_get_tl(struct ext4_fc_tl_mem *tl, u8 *val) { - memcpy(tl, val, EXT4_FC_TAG_BASE_LEN); - tl->fc_len = le16_to_cpu(tl->fc_len); - tl->fc_tag = le16_to_cpu(tl->fc_tag); + struct ext4_fc_tl tl_disk; + + memcpy(&tl_disk, val, EXT4_FC_TAG_BASE_LEN); + tl->fc_len = le16_to_cpu(tl_disk.fc_len); + tl->fc_tag = le16_to_cpu(tl_disk.fc_tag); } /* Unlink replay function */ -static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl, - u8 *val) +static int ext4_fc_replay_unlink(struct super_block *sb, + struct ext4_fc_tl_mem *tl, u8 *val) { struct inode *inode, *old_parent; struct qstr entry; @@ -1473,8 +1481,8 @@ out: } /* Link replay function */ -static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl, - u8 *val) +static int ext4_fc_replay_link(struct super_block *sb, + struct ext4_fc_tl_mem *tl, u8 *val) { struct inode *inode; struct dentry_info_args darg; @@ -1528,8 +1536,8 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino) /* * Inode replay function */ -static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl, - u8 *val) +static int ext4_fc_replay_inode(struct super_block *sb, + struct ext4_fc_tl_mem *tl, u8 *val) { struct ext4_fc_inode fc_inode; struct ext4_inode *raw_inode; @@ -1631,8 +1639,8 @@ out: * inode for which we are trying to create a dentry here, should already have * been replayed before we start here. */ -static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl, - u8 *val) +static int ext4_fc_replay_create(struct super_block *sb, + struct ext4_fc_tl_mem *tl, u8 *val) { int ret = 0; struct inode *inode = NULL; @@ -1730,7 +1738,7 @@ int ext4_fc_record_regions(struct super_block *sb, int ino, /* Replay add range tag */ static int ext4_fc_replay_add_range(struct super_block *sb, - struct ext4_fc_tl *tl, u8 *val) + struct ext4_fc_tl_mem *tl, u8 *val) { struct ext4_fc_add_range fc_add_ex; struct ext4_extent newex, *ex; @@ -1850,8 +1858,8 @@ out: /* Replay DEL_RANGE tag */ static int -ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, - u8 *val) +ext4_fc_replay_del_range(struct super_block *sb, + struct ext4_fc_tl_mem *tl, u8 *val) { struct inode *inode; struct ext4_fc_del_range lrange; @@ -2047,7 +2055,7 @@ static int ext4_fc_replay_scan(journal_t *journal, struct ext4_fc_replay_state *state; int ret = JBD2_FC_REPLAY_CONTINUE; struct ext4_fc_add_range ext; - struct ext4_fc_tl tl; + struct ext4_fc_tl_mem tl; struct ext4_fc_tail tail; __u8 *start, *end, *cur, *val; struct ext4_fc_head head; @@ -2166,7 +2174,7 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, { struct super_block *sb = journal->j_private; struct ext4_sb_info *sbi = EXT4_SB(sb); - struct ext4_fc_tl tl; + struct ext4_fc_tl_mem tl; __u8 *start, *end, *cur, *val; int ret = JBD2_FC_REPLAY_CONTINUE; struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state; diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index 4493ef0c715e..cdf9bfe10137 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -486,6 +486,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb, keys[0].fmr_physical = bofs; if (keys[1].fmr_physical >= eofs) keys[1].fmr_physical = eofs - 1; + if (keys[1].fmr_physical < keys[0].fmr_physical) + return 0; start_fsb = keys[0].fmr_physical; end_fsb = keys[1].fmr_physical; diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index a4fbe825694b..c4475a74c762 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -159,7 +159,6 @@ int ext4_find_inline_data_nolock(struct inode *inode) (void *)ext4_raw_inode(&is.iloc)); EXT4_I(inode)->i_inline_size = EXT4_MIN_INLINE_DATA_SIZE + le32_to_cpu(is.s.here->e_value_size); - ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); } out: brelse(is.iloc.bh); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 03c137ec8b82..dc63e4a96f2b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -4729,8 +4729,13 @@ static inline int ext4_iget_extra_inode(struct inode *inode, if (EXT4_INODE_HAS_XATTR_SPACE(inode) && *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) { + int err; + ext4_set_inode_state(inode, EXT4_STATE_XATTR); - return ext4_find_inline_data_nolock(inode); + err = ext4_find_inline_data_nolock(inode); + if (!err && ext4_has_inline_data(inode)) + ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); + return err; } else EXT4_I(inode)->i_inline_off = 0; return 0; diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 8067ccda34e4..8c2b1ff5e695 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -434,6 +434,7 @@ static long swap_inode_boot_loader(struct super_block *sb, ei_bl->i_flags = 0; inode_set_iversion(inode_bl, 1); i_size_write(inode_bl, 0); + EXT4_I(inode_bl)->i_disksize = inode_bl->i_size; inode_bl->i_mode = S_IFREG; if (ext4_has_feature_extents(sb)) { ext4_set_inode_flag(inode_bl, EXT4_INODE_EXTENTS); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 1c5518a4bdf9..800d631c920b 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1595,11 +1595,10 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir, int has_inline_data = 1; ret = ext4_find_inline_entry(dir, fname, res_dir, &has_inline_data); - if (has_inline_data) { - if (inlined) - *inlined = 1; + if (inlined) + *inlined = has_inline_data; + if (has_inline_data) goto cleanup_and_exit; - } } if ((namelen <= 2) && (name[0] == '.') && @@ -3646,7 +3645,8 @@ static void ext4_resetent(handle_t *handle, struct ext4_renament *ent, * so the old->de may no longer valid and need to find it again * before reset old inode info. */ - old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, + &old.inlined); if (IS_ERR(old.bh)) retval = PTR_ERR(old.bh); if (!old.bh) @@ -3813,9 +3813,20 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir, return retval; } - old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL); - if (IS_ERR(old.bh)) - return PTR_ERR(old.bh); + /* + * We need to protect against old.inode directory getting converted + * from inline directory format into a normal one. + */ + if (S_ISDIR(old.inode->i_mode)) + inode_lock_nested(old.inode, I_MUTEX_NONDIR2); + + old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, + &old.inlined); + if (IS_ERR(old.bh)) { + retval = PTR_ERR(old.bh); + goto unlock_moved_dir; + } + /* * Check for inode number is _not_ due to possible IO errors. * We might rmdir the source, keep it as pwd of some process @@ -3873,8 +3884,10 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir, goto end_rename; } retval = ext4_rename_dir_prepare(handle, &old); - if (retval) + if (retval) { + inode_unlock(old.inode); goto end_rename; + } } /* * If we're renaming a file within an inline_data dir and adding or @@ -4010,6 +4023,11 @@ release_bh: brelse(old.dir_bh); brelse(old.bh); brelse(new.bh); + +unlock_moved_dir: + if (S_ISDIR(old.inode->i_mode)) + inode_unlock(old.inode); + return retval; } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 97fa7b4c645f..d0302b66c215 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -409,7 +409,8 @@ static void io_submit_init_bio(struct ext4_io_submit *io, static void io_submit_add_bh(struct ext4_io_submit *io, struct inode *inode, - struct page *page, + struct page *pagecache_page, + struct page *bounce_page, struct buffer_head *bh) { int ret; @@ -421,10 +422,11 @@ submit_and_retry: } if (io->io_bio == NULL) io_submit_init_bio(io, bh); - ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); + ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page, + bh->b_size, bh_offset(bh)); if (ret != bh->b_size) goto submit_and_retry; - wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size); + wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size); io->io_next_block++; } @@ -543,8 +545,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, do { if (!buffer_async_write(bh)) continue; - io_submit_add_bh(io, inode, - bounce_page ? bounce_page : page, bh); + io_submit_add_bh(io, inode, page, bounce_page, bh); nr_submitted++; clear_buffer_dirty(bh); } while ((bh = bh->b_this_page) != head); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 79e4880cc1d8..05ff8bc9c298 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2146,7 +2146,7 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) return 0; case Opt_commit: if (result.uint_32 == 0) - ctx->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE; + result.uint_32 = JBD2_DEFAULT_MAX_COMMIT_AGE; else if (result.uint_32 > INT_MAX / HZ) { ext4_msg(NULL, KERN_ERR, "Invalid commit interval %d, " @@ -2883,7 +2883,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; - int def_errors, def_mount_opt = sbi->s_def_mount_opt; + int def_errors; const struct mount_opts *m; char sep = nodefs ? '\n' : ','; @@ -2895,15 +2895,28 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, for (m = ext4_mount_opts; m->token != Opt_err; m++) { int want_set = m->flags & MOPT_SET; + int opt_2 = m->flags & MOPT_2; + unsigned int mount_opt, def_mount_opt; + if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) || m->flags & MOPT_SKIP) continue; - if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt))) - continue; /* skip if same as the default */ + + if (opt_2) { + mount_opt = sbi->s_mount_opt2; + def_mount_opt = sbi->s_def_mount_opt2; + } else { + mount_opt = sbi->s_mount_opt; + def_mount_opt = sbi->s_def_mount_opt; + } + /* skip if same as the default */ + if (!nodefs && !(m->mount_opt & (mount_opt ^ def_mount_opt))) + continue; + /* select Opt_noFoo vs Opt_Foo */ if ((want_set && - (sbi->s_mount_opt & m->mount_opt) != m->mount_opt) || - (!want_set && (sbi->s_mount_opt & m->mount_opt))) - continue; /* select Opt_noFoo vs Opt_Foo */ + (mount_opt & m->mount_opt) != m->mount_opt) || + (!want_set && (mount_opt & m->mount_opt))) + continue; SEQ_OPTS_PRINT("%s", token2str(m->token)); } @@ -2931,7 +2944,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, if (nodefs || sbi->s_stripe) SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); if (nodefs || EXT4_MOUNT_DATA_FLAGS & - (sbi->s_mount_opt ^ def_mount_opt)) { + (sbi->s_mount_opt ^ sbi->s_def_mount_opt)) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) SEQ_OPTS_PUTS("data=journal"); else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) @@ -5076,6 +5089,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) goto failed_mount; sbi->s_def_mount_opt = sbi->s_mount_opt; + sbi->s_def_mount_opt2 = sbi->s_mount_opt2; err = ext4_check_opt_consistency(fc, sb); if (err < 0) diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 099a87ec9b2a..e0eb6eb02a83 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -2790,6 +2790,9 @@ shift: (void *)header, total_ino); EXT4_I(inode)->i_extra_isize = new_extra_isize; + if (ext4_has_inline_data(inode)) + error = ext4_find_inline_data_nolock(inode); + cleanup: if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) { ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.", diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index 28b12553f2b3..9a8153895d20 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -161,118 +161,52 @@ static bool __is_front_mergeable(struct extent_info *cur, return __is_extent_mergeable(cur, front, type); } -static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re, - unsigned int ofs) -{ - if (cached_re) { - if (cached_re->ofs <= ofs && - cached_re->ofs + cached_re->len > ofs) { - return cached_re; - } - } - return NULL; -} - -static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root, - unsigned int ofs) +static struct extent_node *__lookup_extent_node(struct rb_root_cached *root, + struct extent_node *cached_en, unsigned int fofs) { struct rb_node *node = root->rb_root.rb_node; - struct rb_entry *re; + struct extent_node *en; + /* check a cached entry */ + if (cached_en && cached_en->ei.fofs <= fofs && + cached_en->ei.fofs + cached_en->ei.len > fofs) + return cached_en; + + /* check rb_tree */ while (node) { - re = rb_entry(node, struct rb_entry, rb_node); + en = rb_entry(node, struct extent_node, rb_node); - if (ofs < re->ofs) + if (fofs < en->ei.fofs) node = node->rb_left; - else if (ofs >= re->ofs + re->len) + else if (fofs >= en->ei.fofs + en->ei.len) node = node->rb_right; else - return re; + return en; } return NULL; } -struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, - struct rb_entry *cached_re, unsigned int ofs) -{ - struct rb_entry *re; - - re = __lookup_rb_tree_fast(cached_re, ofs); - if (!re) - return __lookup_rb_tree_slow(root, ofs); - - return re; -} - -struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, - struct rb_node **parent, - unsigned long long key, bool *leftmost) -{ - struct rb_node **p = &root->rb_root.rb_node; - struct rb_entry *re; - - while (*p) { - *parent = *p; - re = rb_entry(*parent, struct rb_entry, rb_node); - - if (key < re->key) { - p = &(*p)->rb_left; - } else { - p = &(*p)->rb_right; - *leftmost = false; - } - } - - return p; -} - -struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, - struct rb_node **parent, - unsigned int ofs, bool *leftmost) -{ - struct rb_node **p = &root->rb_root.rb_node; - struct rb_entry *re; - - while (*p) { - *parent = *p; - re = rb_entry(*parent, struct rb_entry, rb_node); - - if (ofs < re->ofs) { - p = &(*p)->rb_left; - } else if (ofs >= re->ofs + re->len) { - p = &(*p)->rb_right; - *leftmost = false; - } else { - f2fs_bug_on(sbi, 1); - } - } - - return p; -} - /* - * lookup rb entry in position of @ofs in rb-tree, + * lookup rb entry in position of @fofs in rb-tree, * if hit, return the entry, otherwise, return NULL - * @prev_ex: extent before ofs - * @next_ex: extent after ofs - * @insert_p: insert point for new extent at ofs + * @prev_ex: extent before fofs + * @next_ex: extent after fofs + * @insert_p: insert point for new extent at fofs * in order to simplify the insertion after. * tree must stay unchanged between lookup and insertion. */ -struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, - struct rb_entry *cached_re, - unsigned int ofs, - struct rb_entry **prev_entry, - struct rb_entry **next_entry, +static struct extent_node *__lookup_extent_node_ret(struct rb_root_cached *root, + struct extent_node *cached_en, + unsigned int fofs, + struct extent_node **prev_entry, + struct extent_node **next_entry, struct rb_node ***insert_p, struct rb_node **insert_parent, - bool force, bool *leftmost) + bool *leftmost) { struct rb_node **pnode = &root->rb_root.rb_node; struct rb_node *parent = NULL, *tmp_node; - struct rb_entry *re = cached_re; + struct extent_node *en = cached_en; *insert_p = NULL; *insert_parent = NULL; @@ -282,24 +216,20 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, if (RB_EMPTY_ROOT(&root->rb_root)) return NULL; - if (re) { - if (re->ofs <= ofs && re->ofs + re->len > ofs) - goto lookup_neighbors; - } + if (en && en->ei.fofs <= fofs && en->ei.fofs + en->ei.len > fofs) + goto lookup_neighbors; - if (leftmost) - *leftmost = true; + *leftmost = true; while (*pnode) { parent = *pnode; - re = rb_entry(*pnode, struct rb_entry, rb_node); + en = rb_entry(*pnode, struct extent_node, rb_node); - if (ofs < re->ofs) { + if (fofs < en->ei.fofs) { pnode = &(*pnode)->rb_left; - } else if (ofs >= re->ofs + re->len) { + } else if (fofs >= en->ei.fofs + en->ei.len) { pnode = &(*pnode)->rb_right; - if (leftmost) - *leftmost = false; + *leftmost = false; } else { goto lookup_neighbors; } @@ -308,71 +238,32 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, *insert_p = pnode; *insert_parent = parent; - re = rb_entry(parent, struct rb_entry, rb_node); + en = rb_entry(parent, struct extent_node, rb_node); tmp_node = parent; - if (parent && ofs > re->ofs) + if (parent && fofs > en->ei.fofs) tmp_node = rb_next(parent); - *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node); + *next_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node); tmp_node = parent; - if (parent && ofs < re->ofs) + if (parent && fofs < en->ei.fofs) tmp_node = rb_prev(parent); - *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node); + *prev_entry = rb_entry_safe(tmp_node, struct extent_node, rb_node); return NULL; lookup_neighbors: - if (ofs == re->ofs || force) { + if (fofs == en->ei.fofs) { /* lookup prev node for merging backward later */ - tmp_node = rb_prev(&re->rb_node); - *prev_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node); + tmp_node = rb_prev(&en->rb_node); + *prev_entry = rb_entry_safe(tmp_node, + struct extent_node, rb_node); } - if (ofs == re->ofs + re->len - 1 || force) { + if (fofs == en->ei.fofs + en->ei.len - 1) { /* lookup next node for merging frontward later */ - tmp_node = rb_next(&re->rb_node); - *next_entry = rb_entry_safe(tmp_node, struct rb_entry, rb_node); + tmp_node = rb_next(&en->rb_node); + *next_entry = rb_entry_safe(tmp_node, + struct extent_node, rb_node); } - return re; -} - -bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, bool check_key) -{ -#ifdef CONFIG_F2FS_CHECK_FS - struct rb_node *cur = rb_first_cached(root), *next; - struct rb_entry *cur_re, *next_re; - - if (!cur) - return true; - - while (cur) { - next = rb_next(cur); - if (!next) - return true; - - cur_re = rb_entry(cur, struct rb_entry, rb_node); - next_re = rb_entry(next, struct rb_entry, rb_node); - - if (check_key) { - if (cur_re->key > next_re->key) { - f2fs_info(sbi, "inconsistent rbtree, " - "cur(%llu) next(%llu)", - cur_re->key, next_re->key); - return false; - } - goto next; - } - - if (cur_re->ofs + cur_re->len > next_re->ofs) { - f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)", - cur_re->ofs, cur_re->len, - next_re->ofs, next_re->len); - return false; - } -next: - cur = next; - } -#endif - return true; + return en; } static struct kmem_cache *extent_tree_slab; @@ -587,8 +478,7 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs, goto out; } - en = (struct extent_node *)f2fs_lookup_rb_tree(&et->root, - (struct rb_entry *)et->cached_en, pgofs); + en = __lookup_extent_node(&et->root, et->cached_en, pgofs); if (!en) goto out; @@ -662,7 +552,7 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, bool leftmost) { struct extent_tree_info *eti = &sbi->extent_tree[et->type]; - struct rb_node **p; + struct rb_node **p = &et->root.rb_root.rb_node; struct rb_node *parent = NULL; struct extent_node *en = NULL; @@ -674,8 +564,21 @@ static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi, leftmost = true; - p = f2fs_lookup_rb_tree_for_insert(sbi, &et->root, &parent, - ei->fofs, &leftmost); + /* look up extent_node in the rb tree */ + while (*p) { + parent = *p; + en = rb_entry(parent, struct extent_node, rb_node); + + if (ei->fofs < en->ei.fofs) { + p = &(*p)->rb_left; + } else if (ei->fofs >= en->ei.fofs + en->ei.len) { + p = &(*p)->rb_right; + leftmost = false; + } else { + f2fs_bug_on(sbi, 1); + } + } + do_insert: en = __attach_extent_node(sbi, et, ei, parent, p, leftmost); if (!en) @@ -734,11 +637,10 @@ static void __update_extent_tree_range(struct inode *inode, } /* 1. lookup first extent node in range [fofs, fofs + len - 1] */ - en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root, - (struct rb_entry *)et->cached_en, fofs, - (struct rb_entry **)&prev_en, - (struct rb_entry **)&next_en, - &insert_p, &insert_parent, false, + en = __lookup_extent_node_ret(&et->root, + et->cached_en, fofs, + &prev_en, &next_en, + &insert_p, &insert_parent, &leftmost); if (!en) en = next_en; @@ -876,12 +778,11 @@ void f2fs_update_read_extent_tree_range_compressed(struct inode *inode, write_lock(&et->lock); - en = (struct extent_node *)f2fs_lookup_rb_tree_ret(&et->root, - (struct rb_entry *)et->cached_en, fofs, - (struct rb_entry **)&prev_en, - (struct rb_entry **)&next_en, - &insert_p, &insert_parent, false, - &leftmost); + en = __lookup_extent_node_ret(&et->root, + et->cached_en, fofs, + &prev_en, &next_en, + &insert_p, &insert_parent, + &leftmost); if (en) goto unlock_out; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 21596e0266ba..f40fa6cbab9d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -353,15 +353,7 @@ struct discard_info { struct discard_cmd { struct rb_node rb_node; /* rb node located in rb-tree */ - union { - struct { - block_t lstart; /* logical start address */ - block_t len; /* length */ - block_t start; /* actual start address in dev */ - }; - struct discard_info di; /* discard info */ - - }; + struct discard_info di; /* discard info */ struct list_head list; /* command list */ struct completion wait; /* compleation */ struct block_device *bdev; /* bdev */ @@ -628,17 +620,6 @@ enum extent_type { NR_EXTENT_CACHES, }; -struct rb_entry { - struct rb_node rb_node; /* rb node located in rb-tree */ - union { - struct { - unsigned int ofs; /* start offset of the entry */ - unsigned int len; /* length of the entry */ - }; - unsigned long long key; /* 64-bits key */ - } __packed; -}; - struct extent_info { unsigned int fofs; /* start offset in a file */ unsigned int len; /* length of the extent */ @@ -4138,23 +4119,6 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *sbi); * extent_cache.c */ bool sanity_check_extent_cache(struct inode *inode); -struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, - struct rb_entry *cached_re, unsigned int ofs); -struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, - struct rb_node **parent, - unsigned long long key, bool *left_most); -struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, - struct rb_node **parent, - unsigned int ofs, bool *leftmost); -struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, - struct rb_entry *cached_re, unsigned int ofs, - struct rb_entry **prev_entry, struct rb_entry **next_entry, - struct rb_node ***insert_p, struct rb_node **insert_parent, - bool force, bool *leftmost); -bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, - struct rb_root_cached *root, bool check_key); void f2fs_init_extent_tree(struct inode *inode); void f2fs_drop_extent_tree(struct inode *inode); void f2fs_destroy_extent_node(struct inode *inode); diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 63882546e710..adef513aac33 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -390,40 +390,95 @@ static unsigned int count_bits(const unsigned long *addr, return sum; } -static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi, - unsigned long long mtime, unsigned int segno, - struct rb_node *parent, struct rb_node **p, - bool left_most) +static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi, + struct rb_root_cached *root) +{ +#ifdef CONFIG_F2FS_CHECK_FS + struct rb_node *cur = rb_first_cached(root), *next; + struct victim_entry *cur_ve, *next_ve; + + while (cur) { + next = rb_next(cur); + if (!next) + return true; + + cur_ve = rb_entry(cur, struct victim_entry, rb_node); + next_ve = rb_entry(next, struct victim_entry, rb_node); + + if (cur_ve->mtime > next_ve->mtime) { + f2fs_info(sbi, "broken victim_rbtree, " + "cur_mtime(%llu) next_mtime(%llu)", + cur_ve->mtime, next_ve->mtime); + return false; + } + cur = next; + } +#endif + return true; +} + +static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi, + unsigned long long mtime) +{ + struct atgc_management *am = &sbi->am; + struct rb_node *node = am->root.rb_root.rb_node; + struct victim_entry *ve = NULL; + + while (node) { + ve = rb_entry(node, struct victim_entry, rb_node); + + if (mtime < ve->mtime) + node = node->rb_left; + else + node = node->rb_right; + } + return ve; +} + +static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi, + unsigned long long mtime, unsigned int segno) { struct atgc_management *am = &sbi->am; struct victim_entry *ve; - ve = f2fs_kmem_cache_alloc(victim_entry_slab, - GFP_NOFS, true, NULL); + ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL); ve->mtime = mtime; ve->segno = segno; - rb_link_node(&ve->rb_node, parent, p); - rb_insert_color_cached(&ve->rb_node, &am->root, left_most); - list_add_tail(&ve->list, &am->victim_list); - am->victim_count++; return ve; } -static void insert_victim_entry(struct f2fs_sb_info *sbi, +static void __insert_victim_entry(struct f2fs_sb_info *sbi, unsigned long long mtime, unsigned int segno) { struct atgc_management *am = &sbi->am; - struct rb_node **p; + struct rb_root_cached *root = &am->root; + struct rb_node **p = &root->rb_root.rb_node; struct rb_node *parent = NULL; + struct victim_entry *ve; bool left_most = true; - p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most); - attach_victim_entry(sbi, mtime, segno, parent, p, left_most); + /* look up rb tree to find parent node */ + while (*p) { + parent = *p; + ve = rb_entry(parent, struct victim_entry, rb_node); + + if (mtime < ve->mtime) { + p = &(*p)->rb_left; + } else { + p = &(*p)->rb_right; + left_most = false; + } + } + + ve = __create_victim_entry(sbi, mtime, segno); + + rb_link_node(&ve->rb_node, parent, p); + rb_insert_color_cached(&ve->rb_node, root, left_most); } static void add_victim_entry(struct f2fs_sb_info *sbi, @@ -459,19 +514,7 @@ static void add_victim_entry(struct f2fs_sb_info *sbi, if (sit_i->dirty_max_mtime - mtime < p->age_threshold) return; - insert_victim_entry(sbi, mtime, segno); -} - -static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi, - struct victim_sel_policy *p) -{ - struct atgc_management *am = &sbi->am; - struct rb_node *parent = NULL; - bool left_most; - - f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most); - - return parent; + __insert_victim_entry(sbi, mtime, segno); } static void atgc_lookup_victim(struct f2fs_sb_info *sbi, @@ -481,7 +524,6 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi, struct atgc_management *am = &sbi->am; struct rb_root_cached *root = &am->root; struct rb_node *node; - struct rb_entry *re; struct victim_entry *ve; unsigned long long total_time; unsigned long long age, u, accu; @@ -508,12 +550,10 @@ static void atgc_lookup_victim(struct f2fs_sb_info *sbi, node = rb_first_cached(root); next: - re = rb_entry_safe(node, struct rb_entry, rb_node); - if (!re) + ve = rb_entry_safe(node, struct victim_entry, rb_node); + if (!ve) return; - ve = (struct victim_entry *)re; - if (ve->mtime >= max_mtime || ve->mtime < min_mtime) goto skip; @@ -555,8 +595,6 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi, { struct sit_info *sit_i = SIT_I(sbi); struct atgc_management *am = &sbi->am; - struct rb_node *node; - struct rb_entry *re; struct victim_entry *ve; unsigned long long age; unsigned long long max_mtime = sit_i->dirty_max_mtime; @@ -566,25 +604,22 @@ static void atssr_lookup_victim(struct f2fs_sb_info *sbi, unsigned int dirty_threshold = max(am->max_candidate_count, am->candidate_ratio * am->victim_count / 100); - unsigned int cost; - unsigned int iter = 0; + unsigned int cost, iter; int stage = 0; if (max_mtime < min_mtime) return; max_mtime += 1; next_stage: - node = lookup_central_victim(sbi, p); + iter = 0; + ve = __lookup_victim_entry(sbi, p->age); next_node: - re = rb_entry_safe(node, struct rb_entry, rb_node); - if (!re) { - if (stage == 0) - goto skip_stage; + if (!ve) { + if (stage++ == 0) + goto next_stage; return; } - ve = (struct victim_entry *)re; - if (ve->mtime >= max_mtime || ve->mtime < min_mtime) goto skip_node; @@ -610,24 +645,20 @@ next_node: } skip_node: if (iter < dirty_threshold) { - if (stage == 0) - node = rb_prev(node); - else if (stage == 1) - node = rb_next(node); + ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) : + rb_next(&ve->rb_node), + struct victim_entry, rb_node); goto next_node; } -skip_stage: - if (stage < 1) { - stage++; - iter = 0; + + if (stage++ == 0) goto next_stage; - } } + static void lookup_victim_by_age(struct f2fs_sb_info *sbi, struct victim_sel_policy *p) { - f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, - &sbi->am.root, true)); + f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root)); if (p->gc_mode == GC_AT) atgc_lookup_victim(sbi, p); diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 15bd1d680f67..5ad6ac63e13f 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h @@ -55,20 +55,10 @@ struct gc_inode_list { struct radix_tree_root iroot; }; -struct victim_info { - unsigned long long mtime; /* mtime of section */ - unsigned int segno; /* section No. */ -}; - struct victim_entry { struct rb_node rb_node; /* rb node located in rb-tree */ - union { - struct { - unsigned long long mtime; /* mtime of section */ - unsigned int segno; /* segment No. */ - }; - struct victim_info vi; /* victim info */ - }; + unsigned long long mtime; /* mtime of section */ + unsigned int segno; /* segment No. */ struct list_head list; }; diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 60e7617c8460..d45243bb13c8 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -933,9 +933,9 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS, true, NULL); INIT_LIST_HEAD(&dc->list); dc->bdev = bdev; - dc->lstart = lstart; - dc->start = start; - dc->len = len; + dc->di.lstart = lstart; + dc->di.start = start; + dc->di.len = len; dc->ref = 0; dc->state = D_PREP; dc->queued = 0; @@ -950,20 +950,108 @@ static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, return dc; } -static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi, - struct block_device *bdev, block_t lstart, - block_t start, block_t len, - struct rb_node *parent, struct rb_node **p, - bool leftmost) +static bool f2fs_check_discard_tree(struct f2fs_sb_info *sbi) +{ +#ifdef CONFIG_F2FS_CHECK_FS + struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct rb_node *cur = rb_first_cached(&dcc->root), *next; + struct discard_cmd *cur_dc, *next_dc; + + while (cur) { + next = rb_next(cur); + if (!next) + return true; + + cur_dc = rb_entry(cur, struct discard_cmd, rb_node); + next_dc = rb_entry(next, struct discard_cmd, rb_node); + + if (cur_dc->di.lstart + cur_dc->di.len > next_dc->di.lstart) { + f2fs_info(sbi, "broken discard_rbtree, " + "cur(%u, %u) next(%u, %u)", + cur_dc->di.lstart, cur_dc->di.len, + next_dc->di.lstart, next_dc->di.len); + return false; + } + cur = next; + } +#endif + return true; +} + +static struct discard_cmd *__lookup_discard_cmd(struct f2fs_sb_info *sbi, + block_t blkaddr) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; + struct rb_node *node = dcc->root.rb_root.rb_node; struct discard_cmd *dc; - dc = __create_discard_cmd(sbi, bdev, lstart, start, len); + while (node) { + dc = rb_entry(node, struct discard_cmd, rb_node); - rb_link_node(&dc->rb_node, parent, p); - rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost); + if (blkaddr < dc->di.lstart) + node = node->rb_left; + else if (blkaddr >= dc->di.lstart + dc->di.len) + node = node->rb_right; + else + return dc; + } + return NULL; +} +static struct discard_cmd *__lookup_discard_cmd_ret(struct rb_root_cached *root, + block_t blkaddr, + struct discard_cmd **prev_entry, + struct discard_cmd **next_entry, + struct rb_node ***insert_p, + struct rb_node **insert_parent) +{ + struct rb_node **pnode = &root->rb_root.rb_node; + struct rb_node *parent = NULL, *tmp_node; + struct discard_cmd *dc; + + *insert_p = NULL; + *insert_parent = NULL; + *prev_entry = NULL; + *next_entry = NULL; + + if (RB_EMPTY_ROOT(&root->rb_root)) + return NULL; + + while (*pnode) { + parent = *pnode; + dc = rb_entry(*pnode, struct discard_cmd, rb_node); + + if (blkaddr < dc->di.lstart) + pnode = &(*pnode)->rb_left; + else if (blkaddr >= dc->di.lstart + dc->di.len) + pnode = &(*pnode)->rb_right; + else + goto lookup_neighbors; + } + + *insert_p = pnode; + *insert_parent = parent; + + dc = rb_entry(parent, struct discard_cmd, rb_node); + tmp_node = parent; + if (parent && blkaddr > dc->di.lstart) + tmp_node = rb_next(parent); + *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); + + tmp_node = parent; + if (parent && blkaddr < dc->di.lstart) + tmp_node = rb_prev(parent); + *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); + return NULL; + +lookup_neighbors: + /* lookup prev node for merging backward later */ + tmp_node = rb_prev(&dc->rb_node); + *prev_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); + + /* lookup next node for merging frontward later */ + tmp_node = rb_next(&dc->rb_node); + *next_entry = rb_entry_safe(tmp_node, struct discard_cmd, rb_node); return dc; } @@ -975,7 +1063,7 @@ static void __detach_discard_cmd(struct discard_cmd_control *dcc, list_del(&dc->list); rb_erase_cached(&dc->rb_node, &dcc->root); - dcc->undiscard_blks -= dc->len; + dcc->undiscard_blks -= dc->di.len; kmem_cache_free(discard_cmd_slab, dc); @@ -988,7 +1076,7 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; unsigned long flags; - trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len); + trace_f2fs_remove_discard(dc->bdev, dc->di.start, dc->di.len); spin_lock_irqsave(&dc->lock, flags); if (dc->bio_ref) { @@ -1006,7 +1094,7 @@ static void __remove_discard_cmd(struct f2fs_sb_info *sbi, printk_ratelimited( "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d", KERN_INFO, sbi->sb->s_id, - dc->lstart, dc->start, dc->len, dc->error); + dc->di.lstart, dc->di.start, dc->di.len, dc->error); __detach_discard_cmd(dcc, dc); } @@ -1122,14 +1210,14 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) return 0; - trace_f2fs_issue_discard(bdev, dc->start, dc->len); + trace_f2fs_issue_discard(bdev, dc->di.start, dc->di.len); - lstart = dc->lstart; - start = dc->start; - len = dc->len; + lstart = dc->di.lstart; + start = dc->di.start; + len = dc->di.len; total_len = len; - dc->len = 0; + dc->di.len = 0; while (total_len && *issued < dpolicy->max_requests && !err) { struct bio *bio = NULL; @@ -1145,7 +1233,7 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, if (*issued == dpolicy->max_requests) last = true; - dc->len += len; + dc->di.len += len; if (time_to_inject(sbi, FAULT_DISCARD)) { err = -EIO; @@ -1207,34 +1295,41 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi, return err; } -static void __insert_discard_tree(struct f2fs_sb_info *sbi, +static void __insert_discard_cmd(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, - block_t start, block_t len, - struct rb_node **insert_p, - struct rb_node *insert_parent) + block_t start, block_t len) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; - struct rb_node **p; + struct rb_node **p = &dcc->root.rb_root.rb_node; struct rb_node *parent = NULL; + struct discard_cmd *dc; bool leftmost = true; - if (insert_p && insert_parent) { - parent = insert_parent; - p = insert_p; - goto do_insert; + /* look up rb tree to find parent node */ + while (*p) { + parent = *p; + dc = rb_entry(parent, struct discard_cmd, rb_node); + + if (lstart < dc->di.lstart) { + p = &(*p)->rb_left; + } else if (lstart >= dc->di.lstart + dc->di.len) { + p = &(*p)->rb_right; + leftmost = false; + } else { + f2fs_bug_on(sbi, 1); + } } - p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, - lstart, &leftmost); -do_insert: - __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, - p, leftmost); + dc = __create_discard_cmd(sbi, bdev, lstart, start, len); + + rb_link_node(&dc->rb_node, parent, p); + rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost); } static void __relocate_discard_cmd(struct discard_cmd_control *dcc, struct discard_cmd *dc) { - list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]); + list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->di.len)]); } static void __punch_discard_cmd(struct f2fs_sb_info *sbi, @@ -1244,7 +1339,7 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi, struct discard_info di = dc->di; bool modified = false; - if (dc->state == D_DONE || dc->len == 1) { + if (dc->state == D_DONE || dc->di.len == 1) { __remove_discard_cmd(sbi, dc); return; } @@ -1252,23 +1347,22 @@ static void __punch_discard_cmd(struct f2fs_sb_info *sbi, dcc->undiscard_blks -= di.len; if (blkaddr > di.lstart) { - dc->len = blkaddr - dc->lstart; - dcc->undiscard_blks += dc->len; + dc->di.len = blkaddr - dc->di.lstart; + dcc->undiscard_blks += dc->di.len; __relocate_discard_cmd(dcc, dc); modified = true; } if (blkaddr < di.lstart + di.len - 1) { if (modified) { - __insert_discard_tree(sbi, dc->bdev, blkaddr + 1, + __insert_discard_cmd(sbi, dc->bdev, blkaddr + 1, di.start + blkaddr + 1 - di.lstart, - di.lstart + di.len - 1 - blkaddr, - NULL, NULL); + di.lstart + di.len - 1 - blkaddr); } else { - dc->lstart++; - dc->len--; - dc->start++; - dcc->undiscard_blks += dc->len; + dc->di.lstart++; + dc->di.len--; + dc->di.start++; + dcc->undiscard_blks += dc->di.len; __relocate_discard_cmd(dcc, dc); } } @@ -1287,37 +1381,33 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi, SECTOR_TO_BLOCK(bdev_max_discard_sectors(bdev)); block_t end = lstart + len; - dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root, - NULL, lstart, - (struct rb_entry **)&prev_dc, - (struct rb_entry **)&next_dc, - &insert_p, &insert_parent, true, NULL); + dc = __lookup_discard_cmd_ret(&dcc->root, lstart, + &prev_dc, &next_dc, &insert_p, &insert_parent); if (dc) prev_dc = dc; if (!prev_dc) { di.lstart = lstart; - di.len = next_dc ? next_dc->lstart - lstart : len; + di.len = next_dc ? next_dc->di.lstart - lstart : len; di.len = min(di.len, len); di.start = start; } while (1) { struct rb_node *node; - bool merged = false; struct discard_cmd *tdc = NULL; if (prev_dc) { - di.lstart = prev_dc->lstart + prev_dc->len; + di.lstart = prev_dc->di.lstart + prev_dc->di.len; if (di.lstart < lstart) di.lstart = lstart; if (di.lstart >= end) break; - if (!next_dc || next_dc->lstart > end) + if (!next_dc || next_dc->di.lstart > end) di.len = end - di.lstart; else - di.len = next_dc->lstart - di.lstart; + di.len = next_dc->di.lstart - di.lstart; di.start = start + di.lstart - lstart; } @@ -1333,7 +1423,7 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi, __relocate_discard_cmd(dcc, prev_dc); di = prev_dc->di; tdc = prev_dc; - merged = true; + goto next; } if (next_dc && next_dc->state == D_PREP && @@ -1347,13 +1437,10 @@ static void __update_discard_tree_range(struct f2fs_sb_info *sbi, __relocate_discard_cmd(dcc, next_dc); if (tdc) __remove_discard_cmd(sbi, tdc); - merged = true; + goto next; } - if (!merged) { - __insert_discard_tree(sbi, bdev, di.lstart, di.start, - di.len, NULL, NULL); - } + __insert_discard_cmd(sbi, bdev, di.lstart, di.start, di.len); next: prev_dc = next_dc; if (!prev_dc) @@ -1392,15 +1479,11 @@ static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, struct rb_node **insert_p = NULL, *insert_parent = NULL; struct discard_cmd *dc; struct blk_plug plug; - unsigned int pos = dcc->next_pos; bool io_interrupted = false; mutex_lock(&dcc->cmd_lock); - dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root, - NULL, pos, - (struct rb_entry **)&prev_dc, - (struct rb_entry **)&next_dc, - &insert_p, &insert_parent, true, NULL); + dc = __lookup_discard_cmd_ret(&dcc->root, dcc->next_pos, + &prev_dc, &next_dc, &insert_p, &insert_parent); if (!dc) dc = next_dc; @@ -1418,7 +1501,7 @@ static void __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi, break; } - dcc->next_pos = dc->lstart + dc->len; + dcc->next_pos = dc->di.lstart + dc->di.len; err = __submit_discard_cmd(sbi, dpolicy, dc, issued); if (*issued >= dpolicy->max_requests) @@ -1477,8 +1560,7 @@ retry: if (list_empty(pend_list)) goto next; if (unlikely(dcc->rbtree_check)) - f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, - &dcc->root, false)); + f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); blk_start_plug(&plug); list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); @@ -1556,7 +1638,7 @@ static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi, dc->ref--; if (!dc->ref) { if (!dc->error) - len = dc->len; + len = dc->di.len; __remove_discard_cmd(sbi, dc); } mutex_unlock(&dcc->cmd_lock); @@ -1579,14 +1661,15 @@ next: mutex_lock(&dcc->cmd_lock); list_for_each_entry_safe(iter, tmp, wait_list, list) { - if (iter->lstart + iter->len <= start || end <= iter->lstart) + if (iter->di.lstart + iter->di.len <= start || + end <= iter->di.lstart) continue; - if (iter->len < dpolicy->granularity) + if (iter->di.len < dpolicy->granularity) continue; if (iter->state == D_DONE && !iter->ref) { wait_for_completion_io(&iter->wait); if (!iter->error) - trimmed += iter->len; + trimmed += iter->di.len; __remove_discard_cmd(sbi, iter); } else { iter->ref++; @@ -1630,8 +1713,7 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) bool need_wait = false; mutex_lock(&dcc->cmd_lock); - dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root, - NULL, blkaddr); + dc = __lookup_discard_cmd(sbi, blkaddr); if (dc) { if (dc->state == D_PREP) { __punch_discard_cmd(sbi, dc, blkaddr); @@ -2964,24 +3046,20 @@ next: mutex_lock(&dcc->cmd_lock); if (unlikely(dcc->rbtree_check)) - f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, - &dcc->root, false)); + f2fs_bug_on(sbi, !f2fs_check_discard_tree(sbi)); - dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root, - NULL, start, - (struct rb_entry **)&prev_dc, - (struct rb_entry **)&next_dc, - &insert_p, &insert_parent, true, NULL); + dc = __lookup_discard_cmd_ret(&dcc->root, start, + &prev_dc, &next_dc, &insert_p, &insert_parent); if (!dc) dc = next_dc; blk_start_plug(&plug); - while (dc && dc->lstart <= end) { + while (dc && dc->di.lstart <= end) { struct rb_node *node; int err = 0; - if (dc->len < dpolicy->granularity) + if (dc->di.len < dpolicy->granularity) goto skip; if (dc->state != D_PREP) { @@ -2992,7 +3070,7 @@ next: err = __submit_discard_cmd(sbi, dpolicy, dc, &issued); if (issued >= dpolicy->max_requests) { - start = dc->lstart + dc->len; + start = dc->di.lstart + dc->di.len; if (err) __remove_discard_cmd(sbi, dc); diff --git a/fs/file.c b/fs/file.c index c942c89ca4cd..7893ea161d77 100644 --- a/fs/file.c +++ b/fs/file.c @@ -642,6 +642,7 @@ static struct file *pick_file(struct files_struct *files, unsigned fd) if (fd >= fdt->max_fds) return NULL; + fd = array_index_nospec(fd, fdt->max_fds); file = fdt->fd[fd]; if (file) { rcu_assign_pointer(fdt->fd[fd], NULL); diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c index e784c17306db..e5289c0c00fd 100644 --- a/fs/fuse/backing.c +++ b/fs/fuse/backing.c @@ -1173,8 +1173,6 @@ int fuse_handle_backing(struct fuse_entry_bpf *feb, struct inode **backing_inode path_put(backing_path); *backing_path = backing_file->f_path; path_get(backing_path); - - fput(backing_file); break; } @@ -1245,36 +1243,55 @@ struct dentry *fuse_lookup_finalize(struct fuse_bpf_args *fa, struct inode *dir, struct fuse_entry_bpf *feb = container_of(febo, struct fuse_entry_bpf, out); int error = -1; u64 target_nodeid = 0; + struct dentry *ret; fd = get_fuse_dentry(entry); - if (!fd) - return ERR_PTR(-EIO); + if (!fd) { + ret = ERR_PTR(-EIO); + goto out; + } + bd = fd->backing_path.dentry; - if (!bd) - return ERR_PTR(-ENOENT); + if (!bd) { + ret = ERR_PTR(-ENOENT); + goto out; + } + backing_inode = bd->d_inode; - if (!backing_inode) - return 0; + if (!backing_inode) { + ret = 0; + goto out; + } if (d_inode) target_nodeid = get_fuse_inode(d_inode)->nodeid; inode = fuse_iget_backing(dir->i_sb, target_nodeid, backing_inode); - if (IS_ERR(inode)) - return ERR_PTR(PTR_ERR(inode)); + if (IS_ERR(inode)) { + ret = ERR_PTR(PTR_ERR(inode)); + goto out; + } error = fuse_handle_bpf_prog(feb, dir, &get_fuse_inode(inode)->bpf); - if (error) - return ERR_PTR(error); + if (error) { + ret = ERR_PTR(error); + goto out; + } error = fuse_handle_backing(feb, &get_fuse_inode(inode)->backing_inode, &fd->backing_path); - if (error) - return ERR_PTR(error); + if (error) { + ret = ERR_PTR(error); + goto out; + } get_fuse_inode(inode)->nodeid = feo->nodeid; - return d_splice_alias(inode, entry); + ret = d_splice_alias(inode, entry); +out: + if (feb->backing_file) + fput(feb->backing_file); + return ret; } int fuse_revalidate_backing(struct dentry *entry, unsigned int flags) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index d7c90d733bb7..8c05913ff384 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -192,8 +192,10 @@ static bool backing_data_changed(struct fuse_inode *fi, struct dentry *entry, int err; bool ret = true; - if (!entry) - return false; + if (!entry) { + ret = false; + goto put_backing_file; + } get_fuse_backing_path(entry, &new_backing_path); new_backing_inode = fi->backing_inode; @@ -216,6 +218,9 @@ put_bpf: put_inode: iput(new_backing_inode); path_put(&new_backing_path); +put_backing_file: + if (bpf_arg->backing_file) + fput(bpf_arg->backing_file); return ret; } #endif @@ -530,7 +535,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name backing_inode = backing_file->f_inode; *inode = fuse_iget_backing(sb, outarg->nodeid, backing_inode); if (!*inode) - goto bpf_arg_out; + goto out; err = fuse_handle_backing(&bpf_arg, &get_fuse_inode(*inode)->backing_inode, @@ -541,8 +546,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name err = fuse_handle_bpf_prog(&bpf_arg, NULL, &get_fuse_inode(*inode)->bpf); if (err) goto out; -bpf_arg_out: - fput(backing_file); } else #endif { @@ -574,6 +577,8 @@ out_queue_forget: out_put_forget: kfree(forget); out: + if (bpf_arg.backing_file) + fput(bpf_arg.backing_file); return err; } diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index 765838578a72..a3eb1e826947 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -193,7 +193,8 @@ int dbMount(struct inode *ipbmap) bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); - if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG) { + if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG || + bmp->db_agl2size < 0) { err = -EINVAL; goto err_release_metapage; } diff --git a/fs/locks.c b/fs/locks.c index 7dc129cc1a26..240b9309ed6d 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1862,9 +1862,10 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp, void **priv) { struct inode *inode = locks_inode(filp); + vfsuid_t vfsuid = i_uid_into_vfsuid(file_mnt_user_ns(filp), inode); int error; - if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE)) + if ((!vfsuid_eq_kuid(vfsuid, current_fsuid())) && !capable(CAP_LEASE)) return -EACCES; if (!S_ISREG(inode->i_mode)) return -EINVAL; diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 0d49c6bb22eb..59f9a8cee012 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -1037,7 +1037,9 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf, since = READ_ONCE(file->f_wb_err); if (verf) nfsd_copy_write_verifier(verf, nn); + file_start_write(file); host_err = vfs_iter_write(file, &iter, &pos, flags); + file_end_write(file); if (host_err < 0) { nfsd_reset_write_verifier(nn); trace_nfsd_writeverf_reset(nn, rqstp, host_err); diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index e8b9b756f0ac..d76eb7b39f56 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c @@ -209,11 +209,10 @@ long long ubifs_calc_available(const struct ubifs_info *c, int min_idx_lebs) subtract_lebs += 1; /* - * The GC journal head LEB is not really accessible. And since - * different write types go to different heads, we may count only on - * one head's space. + * Since different write types go to different heads, we should + * reserve one leb for each head. */ - subtract_lebs += c->jhead_cnt - 1; + subtract_lebs += c->jhead_cnt; /* We also reserve one LEB for deletions, which bypass budgeting */ subtract_lebs += 1; @@ -400,7 +399,7 @@ static int calc_dd_growth(const struct ubifs_info *c, dd_growth = req->dirtied_page ? c->bi.page_budget : 0; if (req->dirtied_ino) - dd_growth += c->bi.inode_budget << (req->dirtied_ino - 1); + dd_growth += c->bi.inode_budget * req->dirtied_ino; if (req->mod_dent) dd_growth += c->bi.dent_budget; dd_growth += req->dirtied_ino_d; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 0f29cf201136..5e6bcce94e64 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1151,7 +1151,6 @@ static int ubifs_symlink(struct user_namespace *mnt_userns, struct inode *dir, int err, sz_change, len = strlen(symname); struct fscrypt_str disk_link; struct ubifs_budget_req req = { .new_ino = 1, .new_dent = 1, - .new_ino_d = ALIGN(len, 8), .dirtied_ino = 1 }; struct fscrypt_name nm; @@ -1167,6 +1166,7 @@ static int ubifs_symlink(struct user_namespace *mnt_userns, struct inode *dir, * Budget request settings: new inode, new direntry and changing parent * directory inode. */ + req.new_ino_d = ALIGN(disk_link.len - 1, 8); err = ubifs_budget_space(c, &req); if (err) return err; @@ -1324,6 +1324,8 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry, if (unlink) { ubifs_assert(c, inode_is_locked(new_inode)); + /* Budget for old inode's data when its nlink > 1. */ + req.dirtied_ino_d = ALIGN(ubifs_inode(new_inode)->data_len, 8); err = ubifs_purge_xattrs(new_inode); if (err) return err; @@ -1576,6 +1578,10 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry, return err; } + err = ubifs_budget_space(c, &req); + if (err) + goto out; + lock_4_inodes(old_dir, new_dir, NULL, NULL); time = current_time(old_dir); @@ -1601,6 +1607,7 @@ static int ubifs_xrename(struct inode *old_dir, struct dentry *old_dentry, unlock_4_inodes(old_dir, new_dir, NULL, NULL); ubifs_release_budget(c, &req); +out: fscrypt_free_filename(&fst_nm); fscrypt_free_filename(&snd_nm); return err; diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index f2353dd676ef..10c1779af9c5 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1032,7 +1032,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) if (page->index >= synced_i_size >> PAGE_SHIFT) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) - goto out_unlock; + goto out_redirty; /* * The inode has been written, but the write-buffer has * not been synchronized, so in case of an unclean @@ -1060,11 +1060,17 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) if (i_size > synced_i_size) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) - goto out_unlock; + goto out_redirty; } return do_writepage(page, len); - +out_redirty: + /* + * redirty_page_for_writepage() won't call ubifs_dirty_inode() because + * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so + * there is no need to do space budget for dirty inode. + */ + redirty_page_for_writepage(wbc, page); out_unlock: unlock_page(page); return err; @@ -1466,14 +1472,23 @@ static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags) struct inode *inode = folio->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; - /* - * An attempt to release a dirty page without budgeting for it - should - * not happen. - */ if (folio_test_writeback(folio)) return false; + + /* + * Page is private but not dirty, weird? There is one condition + * making it happened. ubifs_writepage skipped the page because + * page index beyonds isize (for example. truncated by other + * process named A), then the page is invalidated by fadvise64 + * syscall before being truncated by process A. + */ ubifs_assert(c, folio_test_private(folio)); - ubifs_assert(c, 0); + if (folio_test_checked(folio)) + release_new_page_budget(c); + else + release_existing_page_budget(c); + + atomic_long_dec(&c->dirty_pg_cnt); folio_detach_private(folio); folio_clear_checked(folio); return true; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index d0c9a09988bc..32cb14759796 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -833,7 +833,7 @@ static int alloc_wbufs(struct ubifs_info *c) INIT_LIST_HEAD(&c->jheads[i].buds_list); err = ubifs_wbuf_init(c, &c->jheads[i].wbuf); if (err) - return err; + goto out_wbuf; c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback; c->jheads[i].wbuf.jhead = i; @@ -841,7 +841,7 @@ static int alloc_wbufs(struct ubifs_info *c) c->jheads[i].log_hash = ubifs_hash_get_desc(c); if (IS_ERR(c->jheads[i].log_hash)) { err = PTR_ERR(c->jheads[i].log_hash); - goto out; + goto out_log_hash; } } @@ -854,9 +854,18 @@ static int alloc_wbufs(struct ubifs_info *c) return 0; -out: - while (i--) +out_log_hash: + kfree(c->jheads[i].wbuf.buf); + kfree(c->jheads[i].wbuf.inodes); + +out_wbuf: + while (i--) { + kfree(c->jheads[i].wbuf.buf); + kfree(c->jheads[i].wbuf.inodes); kfree(c->jheads[i].log_hash); + } + kfree(c->jheads); + c->jheads = NULL; return err; } diff --git a/fs/ubifs/sysfs.c b/fs/ubifs/sysfs.c index 06ad8fa1fcfb..54270ad36321 100644 --- a/fs/ubifs/sysfs.c +++ b/fs/ubifs/sysfs.c @@ -144,6 +144,8 @@ int __init ubifs_sysfs_init(void) kobject_set_name(&ubifs_kset.kobj, "ubifs"); ubifs_kset.kobj.parent = fs_kobj; ret = kset_register(&ubifs_kset); + if (ret) + kset_put(&ubifs_kset); return ret; } diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 488f3da7a6c6..2469f72eeaab 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -267,11 +267,18 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c, if (zbr->len) { err = insert_old_idx(c, zbr->lnum, zbr->offs); if (unlikely(err)) - return ERR_PTR(err); + /* + * Obsolete znodes will be freed by tnc_destroy_cnext() + * or free_obsolete_znodes(), copied up znodes should + * be added back to tnc and freed by + * ubifs_destroy_tnc_subtree(). + */ + goto out; err = add_idx_dirt(c, zbr->lnum, zbr->len); } else err = 0; +out: zbr->znode = zn; zbr->lnum = 0; zbr->offs = 0; @@ -3053,6 +3060,21 @@ static void tnc_destroy_cnext(struct ubifs_info *c) cnext = cnext->cnext; if (ubifs_zn_obsolete(znode)) kfree(znode); + else if (!ubifs_zn_cow(znode)) { + /* + * Don't forget to update clean znode count after + * committing failed, because ubifs will check this + * count while closing tnc. Non-obsolete znode could + * be re-dirtied during committing process, so dirty + * flag is untrustable. The flag 'COW_ZNODE' is set + * for each dirty znode before committing, and it is + * cleared as long as the znode become clean, so we + * can statistic clean znode count according to this + * flag. + */ + atomic_long_inc(&c->clean_zn_cnt); + atomic_long_inc(&ubifs_clean_zn_cnt); + } } while (cnext && cnext != c->cnext); } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 478bbbb5382f..2f1f31581094 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1623,8 +1623,13 @@ static inline int ubifs_check_hmac(const struct ubifs_info *c, return crypto_memneq(expected, got, c->hmac_desc_len); } +#ifdef CONFIG_UBIFS_FS_AUTHENTICATION void ubifs_bad_hash(const struct ubifs_info *c, const void *node, const u8 *hash, int lnum, int offs); +#else +static inline void ubifs_bad_hash(const struct ubifs_info *c, const void *node, + const u8 *hash, int lnum, int offs) {}; +#endif int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf, const u8 *expected); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 259152a08852..a4e875b61f89 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -443,7 +443,7 @@ static int udf_get_block(struct inode *inode, sector_t block, * Block beyond EOF and prealloc extents? Just discard preallocation * as it is not useful and complicates things. */ - if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents) + if (((loff_t)block) << inode->i_blkbits >= iinfo->i_lenExtents) udf_discard_prealloc(inode); udf_clear_extent_cache(inode); phys = inode_getblk(inode, block, &err, &new); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index ab2d6266038a..6badc50ec4e6 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -534,6 +534,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p); int acpi_device_update_power(struct acpi_device *device, int *state_p); bool acpi_bus_power_manageable(acpi_handle handle); void acpi_dev_power_up_children_with_adr(struct acpi_device *adev); +u8 acpi_dev_power_state_for_wake(struct acpi_device *adev); int acpi_device_power_add_dependent(struct acpi_device *adev, struct device *dev); void acpi_device_power_remove_dependent(struct acpi_device *adev, diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 41fd8352ab65..6c0c3dc3d3ac 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -867,6 +867,9 @@ struct drm_dp_mst_topology_state * drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); struct drm_dp_mst_topology_state * +drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_topology_state * drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); struct drm_dp_mst_atomic_payload * diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index 1611f9db878e..ca73940e26df 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -59,7 +59,7 @@ struct xbc_node { /* Maximum size of boot config is 32KB - 1 */ #define XBC_DATA_MAX (XBC_VALUE - 1) -#define XBC_NODE_MAX 1024 +#define XBC_NODE_MAX 8192 #define XBC_KEYLEN_MAX 256 #define XBC_DEPTH_MAX 16 diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 7ed532025fba..e97274c450ff 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -675,6 +675,8 @@ dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) return !!attach->importer_ops; } +int dma_buf_get_each(int (*callback)(const struct dma_buf *dmabuf, + void *private), void *private); struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev); struct dma_buf_attachment * diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 728691365464..d2e617bffb3a 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -641,6 +641,20 @@ int its_init(struct fwnode_handle *handle, struct rdists *rdists, struct irq_domain *domain); int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); +struct gic_chip_data_v3 { + struct fwnode_handle *fwnode; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + static inline bool gic_enable_sre(void) { u32 val; diff --git a/include/linux/mdio/mdio-mscc-miim.h b/include/linux/mdio/mdio-mscc-miim.h index 5b4ed2c3cbb9..1ce699740af6 100644 --- a/include/linux/mdio/mdio-mscc-miim.h +++ b/include/linux/mdio/mdio-mscc-miim.h @@ -14,6 +14,6 @@ int mscc_miim_setup(struct device *device, struct mii_bus **bus, const char *name, struct regmap *mii_regmap, - int status_offset); + int status_offset, bool ignore_read_errors); #endif diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index 478aece17046..f198a8ac7ee7 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -70,8 +70,8 @@ struct mhi_ep_db_info { * @cmd_ctx_cache_phys: Physical address of the host command context cache * @chdb: Array of channel doorbell interrupt info * @event_lock: Lock for protecting event rings - * @list_lock: Lock for protecting state transition and channel doorbell lists * @state_lock: Lock for protecting state transitions + * @list_lock: Lock for protecting state transition and channel doorbell lists * @st_transition_list: List of state transitions * @ch_db_list: List of queued channel doorbells * @wq: Dedicated workqueue for handling rings and state changes @@ -117,8 +117,8 @@ struct mhi_ep_cntrl { struct mhi_ep_db_info chdb[4]; struct mutex event_lock; + struct mutex state_lock; spinlock_t list_lock; - spinlock_t state_lock; struct list_head st_transition_list; struct list_head ch_db_list; diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 119a0c9d2a8b..f45c80836539 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -8,6 +8,8 @@ extern struct static_key_false page_owner_inited; extern struct page_ext_operations page_owner_ops; +extern depot_stack_handle_t get_page_owner_handle(struct page_ext *page_ext, + unsigned long pfn); extern void __reset_page_owner(struct page *page, unsigned short order); extern void __set_page_owner(struct page *page, unsigned short order, gfp_t gfp_mask); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index b362d90eb9b0..45c3d62e616d 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -3012,6 +3012,8 @@ #define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b #define PCI_DEVICE_ID_INTEL_S21152BB 0xb152 +#define PCI_VENDOR_ID_WANGXUN 0x8088 + #define PCI_VENDOR_ID_SCALEMP 0x8686 #define PCI_DEVICE_ID_SCALEMP_VSMP_CTL 0x1010 @@ -3092,6 +3094,8 @@ #define PCI_VENDOR_ID_3COM_2 0xa727 +#define PCI_VENDOR_ID_SOLIDRUN 0xd063 + #define PCI_VENDOR_ID_DIGIUM 0xd161 #define PCI_DEVICE_ID_DIGIUM_HFC4S 0xb410 diff --git a/include/media/v4l2-uvc.h b/include/media/v4l2-uvc.h index f83e31661333..b010a36fc1d9 100644 --- a/include/media/v4l2-uvc.h +++ b/include/media/v4l2-uvc.h @@ -99,6 +99,9 @@ #define UVC_GUID_FORMAT_BGR3 \ { 0x7d, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \ 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70} +#define UVC_GUID_FORMAT_BGR4 \ + { 0x7e, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \ + 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70} #define UVC_GUID_FORMAT_M420 \ { 'M', '4', '2', '0', 0x00, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} @@ -266,6 +269,11 @@ static struct uvc_format_desc uvc_fmts[] = { .guid = UVC_GUID_FORMAT_BGR3, .fcc = V4L2_PIX_FMT_BGR24, }, + { + .name = "BGRA/X 8:8:8:8 (BGR4)", + .guid = UVC_GUID_FORMAT_BGR4, + .fcc = V4L2_PIX_FMT_XBGR32, + }, { .name = "H.264", .guid = UVC_GUID_FORMAT_H264, diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h index 9c0ad64b8d29..ddf94356752d 100644 --- a/include/memory/renesas-rpc-if.h +++ b/include/memory/renesas-rpc-if.h @@ -64,24 +64,8 @@ enum rpcif_type { struct rpcif { struct device *dev; - void __iomem *base; void __iomem *dirmap; - struct regmap *regmap; - struct reset_control *rstc; size_t size; - enum rpcif_type type; - enum rpcif_data_dir dir; - u8 bus_size; - u8 xfer_size; - void *buffer; - u32 xferlen; - u32 smcr; - u32 smadr; - u32 command; /* DRCMR or SMCMR */ - u32 option; /* DROPR or SMOPR */ - u32 enable; /* DRENR or SMENR */ - u32 dummy; /* DRDMCR or SMDMCR */ - u32 ddr; /* DRDRENR or SMDRENR */ }; int rpcif_sw_init(struct rpcif *rpc, struct device *dev); diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h index 82d0e41b76f2..faa108b1ba67 100644 --- a/include/net/netfilter/nf_tproxy.h +++ b/include/net/netfilter/nf_tproxy.h @@ -17,6 +17,13 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk) return false; } +static inline void nf_tproxy_twsk_deschedule_put(struct inet_timewait_sock *tw) +{ + local_bh_disable(); + inet_twsk_deschedule_put(tw); + local_bh_enable(); +} + /* assign a socket to the skb -- consumes sk */ static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) { diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 350f250b0dc7..00ee9eb601a6 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1409,6 +1409,7 @@ struct sctp_stream_priorities { /* The next stream in line */ struct sctp_stream_out_ext *next; __u16 prio; + __u16 users; }; struct sctp_stream_out_ext { diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h index 3e02709a1df6..83fe39931781 100644 --- a/include/net/tc_act/tc_pedit.h +++ b/include/net/tc_act/tc_pedit.h @@ -4,22 +4,29 @@ #include #include +#include struct tcf_pedit_key_ex { enum pedit_header_type htype; enum pedit_cmd cmd; }; -struct tcf_pedit { - struct tc_action common; - unsigned char tcfp_nkeys; - unsigned char tcfp_flags; - u32 tcfp_off_max_hint; +struct tcf_pedit_parms { struct tc_pedit_key *tcfp_keys; struct tcf_pedit_key_ex *tcfp_keys_ex; + u32 tcfp_off_max_hint; + unsigned char tcfp_nkeys; + unsigned char tcfp_flags; + struct rcu_head rcu; +}; + +struct tcf_pedit { + struct tc_action common; + struct tcf_pedit_parms __rcu *parms; }; #define to_pedit(a) ((struct tcf_pedit *)a) +#define to_pedit_parms(a) (rcu_dereference(to_pedit(a)->parms)) static inline bool is_tcf_pedit(const struct tc_action *a) { @@ -32,37 +39,81 @@ static inline bool is_tcf_pedit(const struct tc_action *a) static inline int tcf_pedit_nkeys(const struct tc_action *a) { - return to_pedit(a)->tcfp_nkeys; + struct tcf_pedit_parms *parms; + int nkeys; + + rcu_read_lock(); + parms = to_pedit_parms(a); + nkeys = parms->tcfp_nkeys; + rcu_read_unlock(); + + return nkeys; } static inline u32 tcf_pedit_htype(const struct tc_action *a, int index) { - if (to_pedit(a)->tcfp_keys_ex) - return to_pedit(a)->tcfp_keys_ex[index].htype; + u32 htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; + struct tcf_pedit_parms *parms; - return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; + rcu_read_lock(); + parms = to_pedit_parms(a); + if (parms->tcfp_keys_ex) + htype = parms->tcfp_keys_ex[index].htype; + rcu_read_unlock(); + + return htype; } static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index) { - if (to_pedit(a)->tcfp_keys_ex) - return to_pedit(a)->tcfp_keys_ex[index].cmd; + struct tcf_pedit_parms *parms; + u32 cmd = __PEDIT_CMD_MAX; - return __PEDIT_CMD_MAX; + rcu_read_lock(); + parms = to_pedit_parms(a); + if (parms->tcfp_keys_ex) + cmd = parms->tcfp_keys_ex[index].cmd; + rcu_read_unlock(); + + return cmd; } static inline u32 tcf_pedit_mask(const struct tc_action *a, int index) { - return to_pedit(a)->tcfp_keys[index].mask; + struct tcf_pedit_parms *parms; + u32 mask; + + rcu_read_lock(); + parms = to_pedit_parms(a); + mask = parms->tcfp_keys[index].mask; + rcu_read_unlock(); + + return mask; } static inline u32 tcf_pedit_val(const struct tc_action *a, int index) { - return to_pedit(a)->tcfp_keys[index].val; + struct tcf_pedit_parms *parms; + u32 val; + + rcu_read_lock(); + parms = to_pedit_parms(a); + val = parms->tcfp_keys[index].val; + rcu_read_unlock(); + + return val; } static inline u32 tcf_pedit_offset(const struct tc_action *a, int index) { - return to_pedit(a)->tcfp_keys[index].off; + struct tcf_pedit_parms *parms; + u32 off; + + rcu_read_lock(); + parms = to_pedit_parms(a); + off = parms->tcfp_keys[index].off; + rcu_read_unlock(); + + return off; } #endif /* __NET_TC_PED_H */ diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h index da0eaae98fa3..4ec47828d55e 100644 --- a/include/trace/events/dlm.h +++ b/include/trace/events/dlm.h @@ -46,6 +46,56 @@ { DLM_SBF_VALNOTVALID, "VALNOTVALID" }, \ { DLM_SBF_ALTMODE, "ALTMODE" }) +#define show_lkb_flags(flags) __print_flags(flags, "|", \ + { DLM_IFL_MSTCPY, "MSTCPY" }, \ + { DLM_IFL_RESEND, "RESEND" }, \ + { DLM_IFL_DEAD, "DEAD" }, \ + { DLM_IFL_OVERLAP_UNLOCK, "OVERLAP_UNLOCK" }, \ + { DLM_IFL_OVERLAP_CANCEL, "OVERLAP_CANCEL" }, \ + { DLM_IFL_ENDOFLIFE, "ENDOFLIFE" }, \ + { DLM_IFL_DEADLOCK_CANCEL, "DEADLOCK_CANCEL" }, \ + { DLM_IFL_STUB_MS, "STUB_MS" }, \ + { DLM_IFL_USER, "USER" }, \ + { DLM_IFL_ORPHAN, "ORPHAN" }) + +#define show_header_cmd(cmd) __print_symbolic(cmd, \ + { DLM_MSG, "MSG"}, \ + { DLM_RCOM, "RCOM"}, \ + { DLM_OPTS, "OPTS"}, \ + { DLM_ACK, "ACK"}, \ + { DLM_FIN, "FIN"}) + +#define show_message_version(version) __print_symbolic(version, \ + { DLM_VERSION_3_1, "3.1"}, \ + { DLM_VERSION_3_2, "3.2"}) + +#define show_message_type(type) __print_symbolic(type, \ + { DLM_MSG_REQUEST, "REQUEST"}, \ + { DLM_MSG_CONVERT, "CONVERT"}, \ + { DLM_MSG_UNLOCK, "UNLOCK"}, \ + { DLM_MSG_CANCEL, "CANCEL"}, \ + { DLM_MSG_REQUEST_REPLY, "REQUEST_REPLY"}, \ + { DLM_MSG_CONVERT_REPLY, "CONVERT_REPLY"}, \ + { DLM_MSG_UNLOCK_REPLY, "UNLOCK_REPLY"}, \ + { DLM_MSG_CANCEL_REPLY, "CANCEL_REPLY"}, \ + { DLM_MSG_GRANT, "GRANT"}, \ + { DLM_MSG_BAST, "BAST"}, \ + { DLM_MSG_LOOKUP, "LOOKUP"}, \ + { DLM_MSG_REMOVE, "REMOVE"}, \ + { DLM_MSG_LOOKUP_REPLY, "LOOKUP_REPLY"}, \ + { DLM_MSG_PURGE, "PURGE"}) + +#define show_rcom_type(type) __print_symbolic(type, \ + { DLM_RCOM_STATUS, "STATUS"}, \ + { DLM_RCOM_NAMES, "NAMES"}, \ + { DLM_RCOM_LOOKUP, "LOOKUP"}, \ + { DLM_RCOM_LOCK, "LOCK"}, \ + { DLM_RCOM_STATUS_REPLY, "STATUS_REPLY"}, \ + { DLM_RCOM_NAMES_REPLY, "NAMES_REPLY"}, \ + { DLM_RCOM_LOOKUP_REPLY, "LOOKUP_REPLY"}, \ + { DLM_RCOM_LOCK_REPLY, "LOCK_REPLY"}) + + /* note: we begin tracing dlm_lock_start() only if ls and lkb are found */ TRACE_EVENT(dlm_lock_start, @@ -290,6 +340,253 @@ TRACE_EVENT(dlm_unlock_end, ); +DECLARE_EVENT_CLASS(dlm_rcom_template, + + TP_PROTO(uint32_t seq, const struct dlm_rcom *rc), + + TP_ARGS(seq, rc), + + TP_STRUCT__entry( + __field(uint32_t, seq) + __field(uint32_t, h_version) + __field(uint32_t, h_lockspace) + __field(uint32_t, h_nodeid) + __field(uint16_t, h_length) + __field(uint8_t, h_cmd) + __field(uint32_t, rc_type) + __field(int32_t, rc_result) + __field(uint64_t, rc_id) + __field(uint64_t, rc_seq) + __field(uint64_t, rc_seq_reply) + __dynamic_array(unsigned char, rc_buf, + le16_to_cpu(rc->rc_header.h_length) - sizeof(*rc)) + ), + + TP_fast_assign( + __entry->seq = seq; + __entry->h_version = le32_to_cpu(rc->rc_header.h_version); + __entry->h_lockspace = le32_to_cpu(rc->rc_header.u.h_lockspace); + __entry->h_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); + __entry->h_length = le16_to_cpu(rc->rc_header.h_length); + __entry->h_cmd = rc->rc_header.h_cmd; + __entry->rc_type = le32_to_cpu(rc->rc_type); + __entry->rc_result = le32_to_cpu(rc->rc_result); + __entry->rc_id = le64_to_cpu(rc->rc_id); + __entry->rc_seq = le64_to_cpu(rc->rc_seq); + __entry->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); + memcpy(__get_dynamic_array(rc_buf), rc->rc_buf, + __get_dynamic_array_len(rc_buf)); + ), + + TP_printk("seq=%u, h_version=%s h_lockspace=%u h_nodeid=%u " + "h_length=%u h_cmd=%s rc_type=%s rc_result=%d " + "rc_id=%llu rc_seq=%llu rc_seq_reply=%llu " + "rc_buf=0x%s", __entry->seq, + show_message_version(__entry->h_version), + __entry->h_lockspace, __entry->h_nodeid, __entry->h_length, + show_header_cmd(__entry->h_cmd), + show_rcom_type(__entry->rc_type), + __entry->rc_result, __entry->rc_id, __entry->rc_seq, + __entry->rc_seq_reply, + __print_hex_str(__get_dynamic_array(rc_buf), + __get_dynamic_array_len(rc_buf))) + +); + +DEFINE_EVENT(dlm_rcom_template, dlm_send_rcom, + TP_PROTO(uint32_t seq, const struct dlm_rcom *rc), + TP_ARGS(seq, rc)); + +DEFINE_EVENT(dlm_rcom_template, dlm_recv_rcom, + TP_PROTO(uint32_t seq, const struct dlm_rcom *rc), + TP_ARGS(seq, rc)); + +TRACE_EVENT(dlm_send_message, + + TP_PROTO(uint32_t seq, const struct dlm_message *ms, + const void *name, int namelen), + + TP_ARGS(seq, ms, name, namelen), + + TP_STRUCT__entry( + __field(uint32_t, seq) + __field(uint32_t, h_version) + __field(uint32_t, h_lockspace) + __field(uint32_t, h_nodeid) + __field(uint16_t, h_length) + __field(uint8_t, h_cmd) + __field(uint32_t, m_type) + __field(uint32_t, m_nodeid) + __field(uint32_t, m_pid) + __field(uint32_t, m_lkid) + __field(uint32_t, m_remid) + __field(uint32_t, m_parent_lkid) + __field(uint32_t, m_parent_remid) + __field(uint32_t, m_exflags) + __field(uint32_t, m_sbflags) + __field(uint32_t, m_flags) + __field(uint32_t, m_lvbseq) + __field(uint32_t, m_hash) + __field(int32_t, m_status) + __field(int32_t, m_grmode) + __field(int32_t, m_rqmode) + __field(int32_t, m_bastmode) + __field(int32_t, m_asts) + __field(int32_t, m_result) + __dynamic_array(unsigned char, m_extra, + le16_to_cpu(ms->m_header.h_length) - sizeof(*ms)) + __dynamic_array(unsigned char, res_name, namelen) + ), + + TP_fast_assign( + __entry->seq = seq; + __entry->h_version = le32_to_cpu(ms->m_header.h_version); + __entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace); + __entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid); + __entry->h_length = le16_to_cpu(ms->m_header.h_length); + __entry->h_cmd = ms->m_header.h_cmd; + __entry->m_type = le32_to_cpu(ms->m_type); + __entry->m_nodeid = le32_to_cpu(ms->m_nodeid); + __entry->m_pid = le32_to_cpu(ms->m_pid); + __entry->m_lkid = le32_to_cpu(ms->m_lkid); + __entry->m_remid = le32_to_cpu(ms->m_remid); + __entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid); + __entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid); + __entry->m_exflags = le32_to_cpu(ms->m_exflags); + __entry->m_sbflags = le32_to_cpu(ms->m_sbflags); + __entry->m_flags = le32_to_cpu(ms->m_flags); + __entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq); + __entry->m_hash = le32_to_cpu(ms->m_hash); + __entry->m_status = le32_to_cpu(ms->m_status); + __entry->m_grmode = le32_to_cpu(ms->m_grmode); + __entry->m_rqmode = le32_to_cpu(ms->m_rqmode); + __entry->m_bastmode = le32_to_cpu(ms->m_bastmode); + __entry->m_asts = le32_to_cpu(ms->m_asts); + __entry->m_result = le32_to_cpu(ms->m_result); + memcpy(__get_dynamic_array(m_extra), ms->m_extra, + __get_dynamic_array_len(m_extra)); + memcpy(__get_dynamic_array(res_name), name, + __get_dynamic_array_len(res_name)); + ), + + TP_printk("seq=%u h_version=%s h_lockspace=%u h_nodeid=%u " + "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u " + "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u " + "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s " + "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s " + "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d " + "m_extra=0x%s res_name=0x%s", + __entry->seq, show_message_version(__entry->h_version), + __entry->h_lockspace, __entry->h_nodeid, __entry->h_length, + show_header_cmd(__entry->h_cmd), + show_message_type(__entry->m_type), + __entry->m_nodeid, __entry->m_pid, __entry->m_lkid, + __entry->m_remid, __entry->m_parent_lkid, + __entry->m_parent_remid, show_lock_flags(__entry->m_exflags), + show_dlm_sb_flags(__entry->m_sbflags), + show_lkb_flags(__entry->m_flags), __entry->m_lvbseq, + __entry->m_hash, __entry->m_status, + show_lock_mode(__entry->m_grmode), + show_lock_mode(__entry->m_rqmode), + show_lock_mode(__entry->m_bastmode), + __entry->m_asts, __entry->m_result, + __print_hex_str(__get_dynamic_array(m_extra), + __get_dynamic_array_len(m_extra)), + __print_hex_str(__get_dynamic_array(res_name), + __get_dynamic_array_len(res_name))) + +); + +TRACE_EVENT(dlm_recv_message, + + TP_PROTO(uint32_t seq, const struct dlm_message *ms), + + TP_ARGS(seq, ms), + + TP_STRUCT__entry( + __field(uint32_t, seq) + __field(uint32_t, h_version) + __field(uint32_t, h_lockspace) + __field(uint32_t, h_nodeid) + __field(uint16_t, h_length) + __field(uint8_t, h_cmd) + __field(uint32_t, m_type) + __field(uint32_t, m_nodeid) + __field(uint32_t, m_pid) + __field(uint32_t, m_lkid) + __field(uint32_t, m_remid) + __field(uint32_t, m_parent_lkid) + __field(uint32_t, m_parent_remid) + __field(uint32_t, m_exflags) + __field(uint32_t, m_sbflags) + __field(uint32_t, m_flags) + __field(uint32_t, m_lvbseq) + __field(uint32_t, m_hash) + __field(int32_t, m_status) + __field(int32_t, m_grmode) + __field(int32_t, m_rqmode) + __field(int32_t, m_bastmode) + __field(int32_t, m_asts) + __field(int32_t, m_result) + __dynamic_array(unsigned char, m_extra, + le16_to_cpu(ms->m_header.h_length) - sizeof(*ms)) + ), + + TP_fast_assign( + __entry->seq = seq; + __entry->h_version = le32_to_cpu(ms->m_header.h_version); + __entry->h_lockspace = le32_to_cpu(ms->m_header.u.h_lockspace); + __entry->h_nodeid = le32_to_cpu(ms->m_header.h_nodeid); + __entry->h_length = le16_to_cpu(ms->m_header.h_length); + __entry->h_cmd = ms->m_header.h_cmd; + __entry->m_type = le32_to_cpu(ms->m_type); + __entry->m_nodeid = le32_to_cpu(ms->m_nodeid); + __entry->m_pid = le32_to_cpu(ms->m_pid); + __entry->m_lkid = le32_to_cpu(ms->m_lkid); + __entry->m_remid = le32_to_cpu(ms->m_remid); + __entry->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid); + __entry->m_parent_remid = le32_to_cpu(ms->m_parent_remid); + __entry->m_exflags = le32_to_cpu(ms->m_exflags); + __entry->m_sbflags = le32_to_cpu(ms->m_sbflags); + __entry->m_flags = le32_to_cpu(ms->m_flags); + __entry->m_lvbseq = le32_to_cpu(ms->m_lvbseq); + __entry->m_hash = le32_to_cpu(ms->m_hash); + __entry->m_status = le32_to_cpu(ms->m_status); + __entry->m_grmode = le32_to_cpu(ms->m_grmode); + __entry->m_rqmode = le32_to_cpu(ms->m_rqmode); + __entry->m_bastmode = le32_to_cpu(ms->m_bastmode); + __entry->m_asts = le32_to_cpu(ms->m_asts); + __entry->m_result = le32_to_cpu(ms->m_result); + memcpy(__get_dynamic_array(m_extra), ms->m_extra, + __get_dynamic_array_len(m_extra)); + ), + + TP_printk("seq=%u h_version=%s h_lockspace=%u h_nodeid=%u " + "h_length=%u h_cmd=%s m_type=%s m_nodeid=%u " + "m_pid=%u m_lkid=%u m_remid=%u m_parent_lkid=%u " + "m_parent_remid=%u m_exflags=%s m_sbflags=%s m_flags=%s " + "m_lvbseq=%u m_hash=%u m_status=%d m_grmode=%s " + "m_rqmode=%s m_bastmode=%s m_asts=%d m_result=%d " + "m_extra=0x%s", + __entry->seq, show_message_version(__entry->h_version), + __entry->h_lockspace, __entry->h_nodeid, __entry->h_length, + show_header_cmd(__entry->h_cmd), + show_message_type(__entry->m_type), + __entry->m_nodeid, __entry->m_pid, __entry->m_lkid, + __entry->m_remid, __entry->m_parent_lkid, + __entry->m_parent_remid, show_lock_flags(__entry->m_exflags), + show_dlm_sb_flags(__entry->m_sbflags), + show_lkb_flags(__entry->m_flags), __entry->m_lvbseq, + __entry->m_hash, __entry->m_status, + show_lock_mode(__entry->m_grmode), + show_lock_mode(__entry->m_rqmode), + show_lock_mode(__entry->m_bastmode), + __entry->m_asts, __entry->m_result, + __print_hex_str(__get_dynamic_array(m_extra), + __get_dynamic_array_len(m_extra))) + +); + TRACE_EVENT(dlm_send, TP_PROTO(int nodeid, int ret), diff --git a/include/trace/hooks/audio_usboffload.h b/include/trace/hooks/audio_usboffload.h index 1b9dd276b8a8..e957fb122d42 100644 --- a/include/trace/hooks/audio_usboffload.h +++ b/include/trace/hooks/audio_usboffload.h @@ -12,18 +12,6 @@ struct usb_interface; struct snd_usb_audio; -DECLARE_HOOK(android_vh_audio_usb_offload_vendor_set, - TP_PROTO(void *arg), - TP_ARGS(arg)); - -DECLARE_HOOK(android_vh_audio_usb_offload_ep_action, - TP_PROTO(void *arg, bool action), - TP_ARGS(arg, action)); - -DECLARE_HOOK(android_vh_audio_usb_offload_synctype, - TP_PROTO(void *arg, int attr, bool *need_ignore), - TP_ARGS(arg, attr, need_ignore)); - DECLARE_HOOK(android_vh_audio_usb_offload_connect, TP_PROTO(struct usb_interface *intf, struct snd_usb_audio *chip), TP_ARGS(intf, chip)); diff --git a/include/trace/hooks/gic.h b/include/trace/hooks/gic.h index daa11cd2f598..7662eeb981e7 100644 --- a/include/trace/hooks/gic.h +++ b/include/trace/hooks/gic.h @@ -9,7 +9,13 @@ #include +struct gic_chip_data_v3; struct irq_data; + +DECLARE_HOOK(android_vh_gic_resume, + TP_PROTO(struct gic_chip_data_v3 *gd), + TP_ARGS(gd)); + DECLARE_HOOK(android_vh_gic_set_affinity, TP_PROTO(struct irq_data *d, const struct cpumask *mask_val, bool force, u8 *gic_cpu_map, void __iomem *reg), diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h index 6e8e572c2980..2ff0e8a3a683 100644 --- a/include/uapi/linux/usb/video.h +++ b/include/uapi/linux/usb/video.h @@ -179,6 +179,36 @@ #define UVC_CONTROL_CAP_AUTOUPDATE (1 << 3) #define UVC_CONTROL_CAP_ASYNCHRONOUS (1 << 4) +/* 3.9.2.6 Color Matching Descriptor Values */ +enum uvc_color_primaries_values { + UVC_COLOR_PRIMARIES_UNSPECIFIED, + UVC_COLOR_PRIMARIES_BT_709_SRGB, + UVC_COLOR_PRIMARIES_BT_470_2_M, + UVC_COLOR_PRIMARIES_BT_470_2_B_G, + UVC_COLOR_PRIMARIES_SMPTE_170M, + UVC_COLOR_PRIMARIES_SMPTE_240M, +}; + +enum uvc_transfer_characteristics_values { + UVC_TRANSFER_CHARACTERISTICS_UNSPECIFIED, + UVC_TRANSFER_CHARACTERISTICS_BT_709, + UVC_TRANSFER_CHARACTERISTICS_BT_470_2_M, + UVC_TRANSFER_CHARACTERISTICS_BT_470_2_B_G, + UVC_TRANSFER_CHARACTERISTICS_SMPTE_170M, + UVC_TRANSFER_CHARACTERISTICS_SMPTE_240M, + UVC_TRANSFER_CHARACTERISTICS_LINEAR, + UVC_TRANSFER_CHARACTERISTICS_SRGB, +}; + +enum uvc_matrix_coefficients { + UVC_MATRIX_COEFFICIENTS_UNSPECIFIED, + UVC_MATRIX_COEFFICIENTS_BT_709, + UVC_MATRIX_COEFFICIENTS_FCC, + UVC_MATRIX_COEFFICIENTS_BT_470_2_B_G, + UVC_MATRIX_COEFFICIENTS_SMPTE_170M, + UVC_MATRIX_COEFFICIENTS_SMPTE_240M, +}; + /* ------------------------------------------------------------------------ * UVC structures */ diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h index 8288137387c0..a9d0a64007ba 100644 --- a/include/uapi/linux/uvcvideo.h +++ b/include/uapi/linux/uvcvideo.h @@ -86,7 +86,7 @@ struct uvc_xu_control_query { * struct. The first two fields are added by the driver, they can be used for * clock synchronisation. The rest is an exact copy of a UVC payload header. * Only complete objects with complete buffers are included. Therefore it's - * always sizeof(meta->ts) + sizeof(meta->sof) + meta->length bytes large. + * always sizeof(meta->ns) + sizeof(meta->sof) + meta->length bytes large. */ struct uvc_meta_buf { __u64 ns; diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index e2c46889d5fa..746b137b96e9 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -509,7 +509,7 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) } pages = io_pin_pages(reg.ring_addr, - struct_size(br, bufs, reg.ring_entries), + flex_array_size(br, bufs, reg.ring_entries), &nr_pages); if (IS_ERR(pages)) { kfree(free_bl); diff --git a/io_uring/net.c b/io_uring/net.c index 55d822beaf08..4fdc2770bbe4 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -126,13 +126,15 @@ static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req, struct io_cache_entry *entry; struct io_async_msghdr *hdr; - if (!(issue_flags & IO_URING_F_UNLOCKED) && - (entry = io_alloc_cache_get(&ctx->netmsg_cache)) != NULL) { - hdr = container_of(entry, struct io_async_msghdr, cache); - hdr->free_iov = NULL; - req->flags |= REQ_F_ASYNC_DATA; - req->async_data = hdr; - return hdr; + if (!(issue_flags & IO_URING_F_UNLOCKED)) { + entry = io_alloc_cache_get(&ctx->netmsg_cache); + if (entry) { + hdr = container_of(entry, struct io_async_msghdr, cache); + hdr->free_iov = NULL; + req->flags |= REQ_F_ASYNC_DATA; + req->async_data = hdr; + return hdr; + } } if (!io_alloc_async_data(req)) { diff --git a/io_uring/poll.c b/io_uring/poll.c index ab5ae475840f..56dbd1863c78 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -668,6 +668,14 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); } +/* + * We can't reliably detect loops in repeated poll triggers and issue + * subsequently failing. But rather than fail these immediately, allow a + * certain amount of retries before we give up. Given that this condition + * should _rarely_ trigger even once, we should be fine with a larger value. + */ +#define APOLL_MAX_RETRY 128 + static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, unsigned issue_flags) { @@ -678,16 +686,23 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, if (req->flags & REQ_F_POLLED) { apoll = req->apoll; kfree(apoll->double_poll); - } else if (!(issue_flags & IO_URING_F_UNLOCKED) && - (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) { + } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { + entry = io_alloc_cache_get(&ctx->apoll_cache); + if (entry == NULL) + goto alloc_apoll; apoll = container_of(entry, struct async_poll, cache); + apoll->poll.retries = APOLL_MAX_RETRY; } else { +alloc_apoll: apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); if (unlikely(!apoll)) return NULL; + apoll->poll.retries = APOLL_MAX_RETRY; } apoll->double_poll = NULL; req->apoll = apoll; + if (unlikely(!--apoll->poll.retries)) + return NULL; return apoll; } @@ -709,8 +724,6 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) return IO_APOLL_ABORTED; if (!file_can_poll(req->file)) return IO_APOLL_ABORTED; - if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) - return IO_APOLL_ABORTED; if (!(req->flags & REQ_F_APOLL_MULTISHOT)) mask |= EPOLLONESHOT; diff --git a/io_uring/poll.h b/io_uring/poll.h index 5f3bae50fc81..b2393b403a2c 100644 --- a/io_uring/poll.h +++ b/io_uring/poll.h @@ -12,6 +12,7 @@ struct io_poll { struct file *file; struct wait_queue_head *head; __poll_t events; + int retries; struct wait_queue_entry wait; }; diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index e50de0b6b9f8..18dfc5f6a8b7 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -108,7 +108,7 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) struct file *file = req->file; int ret; - if (!req->file->f_op->uring_cmd) + if (!file->f_op->uring_cmd) return -EOPNOTSUPP; ret = security_uring_cmd(ioucmd); @@ -120,6 +120,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) if (ctx->flags & IORING_SETUP_CQE32) issue_flags |= IO_URING_F_CQE32; if (ctx->flags & IORING_SETUP_IOPOLL) { + if (!file->f_op->uring_cmd_iopoll) + return -EOPNOTSUPP; issue_flags |= IO_URING_F_IOPOLL; req->iopoll_completed = 0; WRITE_ONCE(ioucmd->cookie, NULL); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index b19d4c9362b0..540c167bb58e 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4177,6 +4177,7 @@ static int btf_datasec_resolve(struct btf_verifier_env *env, struct btf *btf = env->btf; u16 i; + env->resolve_mode = RESOLVE_TBD; for_each_vsi_from(i, v->next_member, v->t, vsi) { u32 var_type_id = vsi->type, type_id, type_size = 0; const struct btf_type *var_type = btf_type_by_id(env->btf, diff --git a/kernel/fail_function.c b/kernel/fail_function.c index a7ccd2930c5f..d971a0189319 100644 --- a/kernel/fail_function.c +++ b/kernel/fail_function.c @@ -163,10 +163,7 @@ static void fei_debugfs_add_attr(struct fei_attr *attr) static void fei_debugfs_remove_attr(struct fei_attr *attr) { - struct dentry *dir; - - dir = debugfs_lookup(attr->kp.symbol_name, fei_debugfs_dir); - debugfs_remove_recursive(dir); + debugfs_lookup_and_remove(attr->kp.symbol_name, fei_debugfs_dir); } static int fei_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) diff --git a/kernel/fork.c b/kernel/fork.c index d37b2666e6c7..63b34cf7ac64 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2943,7 +2943,7 @@ static bool clone3_args_valid(struct kernel_clone_args *kargs) * - make the CLONE_DETACHED bit reusable for clone3 * - make the CSIGNAL bits reusable for clone3 */ - if (kargs->flags & (CLONE_DETACHED | CSIGNAL)) + if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME)))) return false; if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index bbd945bacef0..961d4af76af3 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c @@ -188,9 +188,9 @@ EXPORT_SYMBOL_GPL(ipi_get_hwirq); static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data, const struct cpumask *dest, unsigned int cpu) { - const struct cpumask *ipimask = irq_data_get_affinity_mask(data); + const struct cpumask *ipimask; - if (!chip || !ipimask) + if (!chip || !data) return -EINVAL; if (!chip->ipi_send_single && !chip->ipi_send_mask) @@ -199,6 +199,10 @@ static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data, if (cpu >= nr_cpu_ids) return -EINVAL; + ipimask = irq_data_get_affinity_mask(data); + if (!ipimask) + return -EINVAL; + if (dest) { if (!cpumask_subset(dest, ipimask)) return -EINVAL; diff --git a/kernel/printk/index.c b/kernel/printk/index.c index c85be186a783..a6b27526baaf 100644 --- a/kernel/printk/index.c +++ b/kernel/printk/index.c @@ -145,7 +145,7 @@ static void pi_create_file(struct module *mod) #ifdef CONFIG_MODULES static void pi_remove_file(struct module *mod) { - debugfs_remove(debugfs_lookup(pi_get_module_name(mod), dfs_index)); + debugfs_lookup_and_remove(pi_get_module_name(mod), dfs_index); } static int pi_module_notify(struct notifier_block *nb, unsigned long op, diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index e83c321461cf..932e2097ad44 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c @@ -1411,27 +1411,19 @@ static int psi_cpu_show(struct seq_file *m, void *v) return psi_show(m, &psi_system, PSI_CPU); } -static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *)) -{ - if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE)) - return -EPERM; - - return single_open(file, psi_show, NULL); -} - static int psi_io_open(struct inode *inode, struct file *file) { - return psi_open(file, psi_io_show); + return single_open(file, psi_io_show, NULL); } static int psi_memory_open(struct inode *inode, struct file *file) { - return psi_open(file, psi_memory_show); + return single_open(file, psi_memory_show, NULL); } static int psi_cpu_open(struct inode *inode, struct file *file) { - return psi_open(file, psi_cpu_show); + return single_open(file, psi_cpu_show, NULL); } static ssize_t psi_write(struct file *file, const char __user *user_buf, @@ -1545,7 +1537,7 @@ static int psi_irq_show(struct seq_file *m, void *v) static int psi_irq_open(struct inode *inode, struct file *file) { - return psi_open(file, psi_irq_show); + return single_open(file, psi_irq_show, NULL); } static ssize_t psi_irq_write(struct file *file, const char __user *user_buf, @@ -1568,11 +1560,11 @@ static int __init psi_proc_init(void) { if (psi_enable) { proc_mkdir("pressure", NULL); - proc_create("pressure/io", 0666, NULL, &psi_io_proc_ops); - proc_create("pressure/memory", 0666, NULL, &psi_memory_proc_ops); - proc_create("pressure/cpu", 0666, NULL, &psi_cpu_proc_ops); + proc_create("pressure/io", 0, NULL, &psi_io_proc_ops); + proc_create("pressure/memory", 0, NULL, &psi_memory_proc_ops); + proc_create("pressure/cpu", 0, NULL, &psi_cpu_proc_ops); #ifdef CONFIG_IRQ_TIME_ACCOUNTING - proc_create("pressure/irq", 0666, NULL, &psi_irq_proc_ops); + proc_create("pressure/irq", 0, NULL, &psi_irq_proc_ops); #endif } return 0; diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index fbc3467e8310..b6d2945016b4 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -5791,11 +5791,16 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); */ void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) { - struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; + struct ring_buffer_per_cpu *cpu_buffer; struct buffer_data_page *bpage = data; struct page *page = virt_to_page(bpage); unsigned long flags; + if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) + return; + + cpu_buffer = buffer->buffers[cpu]; + /* If the page is still in use someplace else, we can't reuse it */ if (page_ref_count(page) > 1) goto out; diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index a6f9bdd956c3..f10f403104e7 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -273,6 +273,7 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) if (ret < 0) goto error; + ret = -ENOMEM; pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); if (!pages) goto error; diff --git a/mm/page_ext.c b/mm/page_ext.c index ddf1968560f0..1b3a67b9d5e0 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -163,6 +163,7 @@ struct page_ext *page_ext_get(struct page *page) return page_ext; } +EXPORT_SYMBOL_NS_GPL(page_ext_get, MINIDUMP); /** * page_ext_put() - Working with page extended information is done. @@ -181,6 +182,7 @@ void page_ext_put(struct page_ext *page_ext) rcu_read_unlock(); } +EXPORT_SYMBOL_NS_GPL(page_ext_put, MINIDUMP); #ifndef CONFIG_SPARSEMEM diff --git a/mm/page_owner.c b/mm/page_owner.c index 2d27f532df4c..ab06f4391954 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -106,6 +106,25 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext) return (void *)page_ext + page_owner_ops.offset; } +depot_stack_handle_t get_page_owner_handle(struct page_ext *page_ext, unsigned long pfn) +{ + struct page_owner *page_owner; + depot_stack_handle_t handle; + + if (!static_branch_unlikely(&page_owner_inited)) + return 0; + + page_owner = get_page_owner(page_ext); + + /* skip handle for tail pages of higher order allocations */ + if (!IS_ALIGNED(pfn, 1 << page_owner->order)) + return 0; + + handle = READ_ONCE(page_owner->handle); + return handle; +} +EXPORT_SYMBOL_NS_GPL(get_page_owner_handle, MINIDUMP); + static noinline depot_stack_handle_t save_stack(gfp_t flags) { unsigned long entries[PAGE_OWNER_STACK_DEPTH]; diff --git a/mm/percpu.c b/mm/percpu.c index 5457a2c9c250..18d08095b356 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -3448,6 +3448,7 @@ unsigned long pcpu_nr_pages(void) { return pcpu_nr_populated * pcpu_nr_units; } +EXPORT_SYMBOL_GPL(pcpu_nr_pages); /* * Percpu allocator is initialized early during boot when neither slab or diff --git a/mm/slab.c b/mm/slab.c index 59c8e28f7b6a..c30a6a3cd48e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3927,6 +3927,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) sinfo->objects_per_slab = cachep->num; sinfo->cache_order = cachep->gfporder; } +EXPORT_SYMBOL_NS_GPL(get_slabinfo, MINIDUMP); void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) { diff --git a/mm/slab.h b/mm/slab.h index 0202a8c2f0d2..553228999c32 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -282,6 +282,24 @@ void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller); gfp_t kmalloc_fix_flags(gfp_t flags); +#ifdef CONFIG_SLUB +/* + * Tracking user of a slab. + */ +#define TRACK_ADDRS_COUNT 16 +struct track { + unsigned long addr; /* Called from address */ +#ifdef CONFIG_STACKDEPOT + depot_stack_handle_t handle; +#endif + int cpu; /* Was running on cpu */ + int pid; /* Pid context */ + unsigned long when; /* When did the operation occur */ +}; + +enum track_item { TRACK_ALLOC, TRACK_FREE }; +#endif + /* Functions provided by the slab allocators */ int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); @@ -399,6 +417,10 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); #endif extern void print_tracking(struct kmem_cache *s, void *object); long validate_slab_cache(struct kmem_cache *s); +extern unsigned long get_each_object_track(struct kmem_cache *s, + struct page *page, enum track_item alloc, + int (*fn)(const struct kmem_cache *, const void *, + const struct track *, void *), void *private); static inline bool __slub_debug_enabled(void) { return static_branch_unlikely(&slub_debug_enabled); @@ -407,6 +429,15 @@ static inline bool __slub_debug_enabled(void) static inline void print_tracking(struct kmem_cache *s, void *object) { } +#ifdef CONFIG_SLUB +static inline unsigned long get_each_object_track(struct kmem_cache *s, + struct page *page, enum track_item alloc, + int (*fn)(const struct kmem_cache *, const void *, + const struct track *, void *), void *private) +{ + return 0; +} +#endif static inline bool __slub_debug_enabled(void) { return false; diff --git a/mm/slub.c b/mm/slub.c index 157527d7101b..3c9d4e333963 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -282,22 +282,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) /* Use cmpxchg_double */ #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) -/* - * Tracking user of a slab. - */ -#define TRACK_ADDRS_COUNT 16 -struct track { - unsigned long addr; /* Called from address */ -#ifdef CONFIG_STACKDEPOT - depot_stack_handle_t handle; -#endif - int cpu; /* Was running on cpu */ - int pid; /* Pid context */ - unsigned long when; /* When did the operation occur */ -}; - -enum track_item { TRACK_ALLOC, TRACK_FREE }; - #ifdef CONFIG_SYSFS static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_alias(struct kmem_cache *, const char *); @@ -725,6 +709,42 @@ static struct track *get_track(struct kmem_cache *s, void *object, return kasan_reset_tag(p + alloc); } +/* + * This function will be used to loop through all the slab objects in + * a page to give track structure for each object, the function fn will + * be using this track structure and extract required info into its private + * data, the return value will be the number of track structures that are + * processed. + */ +unsigned long get_each_object_track(struct kmem_cache *s, + struct page *page, enum track_item alloc, + int (*fn)(const struct kmem_cache *, const void *, + const struct track *, void *), void *private) +{ + void *p; + struct track *t; + int ret; + unsigned long num_track = 0; + struct slab *p_slab = page_slab(page); + + if (!slub_debug || !(s->flags & SLAB_STORE_USER) || !p_slab) + return 0; + + slab_lock(p_slab); + for_each_object(p, s, page_address(page), p_slab->objects) { + t = get_track(s, p, alloc); + metadata_access_enable(); + ret = fn(s, p, t, private); + metadata_access_disable(); + if (ret < 0) + break; + num_track += 1; + } + slab_unlock(p_slab); + return num_track; +} +EXPORT_SYMBOL_NS_GPL(get_each_object_track, MINIDUMP); + #ifdef CONFIG_STACKDEPOT static noinline depot_stack_handle_t set_track_prepare(void) { @@ -6297,6 +6317,7 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) sinfo->objects_per_slab = oo_objects(s->oo); sinfo->cache_order = oo_order(s->oo); } +EXPORT_SYMBOL_NS_GPL(get_slabinfo, MINIDUMP); void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) { diff --git a/mm/swapfile.c b/mm/swapfile.c index b72908df52ac..c8c88e2734c0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3260,6 +3260,7 @@ void si_swapinfo(struct sysinfo *val) val->totalswap = total_swap_pages + nr_to_be_unused; spin_unlock(&swap_lock); } +EXPORT_SYMBOL_NS_GPL(si_swapinfo, MINIDUMP); /* * Verify that a swap entry is valid and increment its swap map count. diff --git a/mm/vmalloc.c b/mm/vmalloc.c index bdcbde04caa8..de9b4f79e829 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -811,6 +811,7 @@ unsigned long vmalloc_nr_pages(void) { return atomic_long_read(&nr_vmalloc_pages); } +EXPORT_SYMBOL_GPL(vmalloc_nr_pages); /* Look up the first VA which satisfies addr < va_end, NULL if none. */ static struct vmap_area *find_vmap_area_exceed_addr(unsigned long addr) diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index e9a830c69058..29a9292303ea 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -386,6 +386,7 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c) struct p9_trans_rdma *rdma = client->trans; struct ib_recv_wr wr; struct ib_sge sge; + int ret; c->busa = ib_dma_map_single(rdma->cm_id->device, c->rc.sdata, client->msize, @@ -403,7 +404,12 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c) wr.wr_cqe = &c->cqe; wr.sg_list = &sge; wr.num_sge = 1; - return ib_post_recv(rdma->qp, &wr, NULL); + + ret = ib_post_recv(rdma->qp, &wr, NULL); + if (ret) + ib_dma_unmap_single(rdma->cm_id->device, c->busa, + client->msize, DMA_FROM_DEVICE); + return ret; error: p9_debug(P9_DEBUG_ERROR, "EIO\n"); @@ -500,7 +506,7 @@ dont_need_post_recv: if (down_interruptible(&rdma->sq_sem)) { err = -EINTR; - goto send_error; + goto dma_unmap; } /* Mark request as `sent' *before* we actually send it, @@ -510,11 +516,14 @@ dont_need_post_recv: WRITE_ONCE(req->status, REQ_STATUS_SENT); err = ib_post_send(rdma->qp, &wr, NULL); if (err) - goto send_error; + goto dma_unmap; /* Success */ return 0; +dma_unmap: + ib_dma_unmap_single(rdma->cm_id->device, c->busa, + c->req->tc.size, DMA_TO_DEVICE); /* Handle errors that happened during or while preparing the send: */ send_error: WRITE_ONCE(req->status, REQ_STATUS_ERROR); diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index cf1b89ba522b..75c03a82baf3 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -371,19 +371,24 @@ out: return ret; } -static int xen_9pfs_front_probe(struct xenbus_device *dev, - const struct xenbus_device_id *id) +static int xen_9pfs_front_init(struct xenbus_device *dev) { int ret, i; struct xenbus_transaction xbt; - struct xen_9pfs_front_priv *priv = NULL; - char *versions; + struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev); + char *versions, *v; unsigned int max_rings, max_ring_order, len = 0; versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len); if (IS_ERR(versions)) return PTR_ERR(versions); - if (strcmp(versions, "1")) { + for (v = versions; *v; v++) { + if (simple_strtoul(v, &v, 10) == 1) { + v = NULL; + break; + } + } + if (v) { kfree(versions); return -EINVAL; } @@ -398,11 +403,6 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order)) p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->dev = dev; priv->num_rings = XEN_9PFS_NUM_RINGS; priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings), GFP_KERNEL); @@ -461,23 +461,35 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev, goto error; } - write_lock(&xen_9pfs_lock); - list_add_tail(&priv->list, &xen_9pfs_devs); - write_unlock(&xen_9pfs_lock); - dev_set_drvdata(&dev->dev, priv); - xenbus_switch_state(dev, XenbusStateInitialised); - return 0; error_xenbus: xenbus_transaction_end(xbt, 1); xenbus_dev_fatal(dev, ret, "writing xenstore"); error: - dev_set_drvdata(&dev->dev, NULL); xen_9pfs_front_free(priv); return ret; } +static int xen_9pfs_front_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + struct xen_9pfs_front_priv *priv = NULL; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + dev_set_drvdata(&dev->dev, priv); + + write_lock(&xen_9pfs_lock); + list_add_tail(&priv->list, &xen_9pfs_devs); + write_unlock(&xen_9pfs_lock); + + return 0; +} + static int xen_9pfs_front_resume(struct xenbus_device *dev) { dev_warn(&dev->dev, "suspend/resume unsupported\n"); @@ -496,6 +508,8 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev, break; case XenbusStateInitWait: + if (!xen_9pfs_front_init(dev)) + xenbus_switch_state(dev, XenbusStateInitialised); break; case XenbusStateConnected: diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index ce5dfa3babd2..757ec46fc45a 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1090,7 +1090,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, audit_log_nfcfg(repl->name, AF_BRIDGE, repl->nentries, AUDIT_XT_OP_REPLACE, GFP_KERNEL); - return ret; + return 0; free_unlock: mutex_unlock(&ebt_mutex); diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c index ebc202ffdd8d..bf61ea4b8132 100644 --- a/net/caif/caif_usb.c +++ b/net/caif/caif_usb.c @@ -134,6 +134,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, struct usb_device *usbdev; int res; + if (what == NETDEV_UNREGISTER && dev->reg_state >= NETREG_UNREGISTERED) + return 0; + /* Check whether we have a NCM device, and find its VID/PID. */ if (!(dev->dev.parent && dev->dev.parent->driver && strcmp(dev->dev.parent->driver->name, "cdc_ncm") == 0)) diff --git a/net/core/dev.c b/net/core/dev.c index 6f768184d148..00d25f1542c7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3153,8 +3153,10 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) { if (in_hardirq() || irqs_disabled()) __dev_kfree_skb_irq(skb, reason); + else if (unlikely(reason == SKB_REASON_DROPPED)) + kfree_skb(skb); else - dev_kfree_skb(skb); + consume_skb(skb); } EXPORT_SYMBOL(__dev_kfree_skb_any); diff --git a/net/core/sock.c b/net/core/sock.c index bfb99a6c9164..a78df5082a8d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2806,7 +2806,8 @@ static void sk_enter_memory_pressure(struct sock *sk) static void sk_leave_memory_pressure(struct sock *sk) { if (sk->sk_prot->leave_memory_pressure) { - sk->sk_prot->leave_memory_pressure(sk); + INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure, + tcp_leave_memory_pressure, sk); } else { unsigned long *memory_pressure = sk->sk_prot->memory_pressure; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index ffc0cab7cf18..2407066b0fec 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1525,6 +1525,10 @@ int arpt_register_table(struct net *net, new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { + struct arpt_entry *iter; + + xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) + cleanup_entry(iter, net); xt_free_table_info(newinfo); return PTR_ERR(new_table); } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 2ed7c58b471a..da5998011ab9 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1045,7 +1045,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_counters *counters; struct ipt_entry *iter; - ret = 0; counters = xt_counters_alloc(num_counters); if (!counters) { ret = -ENOMEM; @@ -1091,7 +1090,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n"); } vfree(counters); - return ret; + return 0; put_module: module_put(t->me); @@ -1742,6 +1741,10 @@ int ipt_register_table(struct net *net, const struct xt_table *table, new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { + struct ipt_entry *iter; + + xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) + cleanup_entry(iter, net); xt_free_table_info(newinfo); return PTR_ERR(new_table); } diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c index b22b2c745c76..69e331799604 100644 --- a/net/ipv4/netfilter/nf_tproxy_ipv4.c +++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c @@ -38,7 +38,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb, hp->source, lport ? lport : hp->dest, skb->dev, NF_TPROXY_LOOKUP_LISTENER); if (sk2) { - inet_twsk_deschedule_put(inet_twsk(sk)); + nf_tproxy_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; } } diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index cf26d65ca389..ebf917511937 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -186,6 +186,9 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); + if (!len) + return 0; + psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, flags, addr_len); @@ -244,6 +247,9 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); + if (!len) + return 0; + psock = sk_psock_get(sk); if (unlikely(!psock)) return tcp_recvmsg(sk, msg, len, flags, addr_len); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index c375f603a16c..7f37e7da6467 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -580,6 +580,9 @@ EXPORT_SYMBOL(tcp_create_openreq_child); * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? * * We don't need to initialize tmp_opt.sack_ok as we don't use the results + * + * Note: If @fastopen is true, this can be called from process context. + * Otherwise, this is from BH context. */ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, @@ -731,7 +734,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, &tcp_rsk(req)->last_oow_ack_time)) req->rsk_ops->send_ack(sk, skb, req); if (paws_reject) - __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); + NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); return NULL; } @@ -750,7 +753,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, * "fourth, check the SYN bit" */ if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { - __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); goto embryonic_reset; } diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c index e5dc91d0e079..0735d820e413 100644 --- a/net/ipv4/udp_bpf.c +++ b/net/ipv4/udp_bpf.c @@ -68,6 +68,9 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); + if (!len) + return 0; + psock = sk_psock_get(sk); if (unlikely(!psock)) return sk_udp_recvmsg(sk, msg, len, flags, addr_len); diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 47447f0241df..bee45dfeb187 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -477,6 +477,7 @@ int ila_xlat_nl_cmd_get_mapping(struct sk_buff *skb, struct genl_info *info) rcu_read_lock(); + ret = -ESRCH; ila = ila_lookup_by_params(&xp, ilan); if (ila) { ret = ila_dump_info(ila, diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 2d816277f2c5..0ce0ed17c758 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1062,7 +1062,6 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_counters *counters; struct ip6t_entry *iter; - ret = 0; counters = xt_counters_alloc(num_counters); if (!counters) { ret = -ENOMEM; @@ -1108,7 +1107,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); } vfree(counters); - return ret; + return 0; put_module: module_put(t->me); @@ -1751,6 +1750,10 @@ int ip6t_register_table(struct net *net, const struct xt_table *table, new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { + struct ip6t_entry *iter; + + xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) + cleanup_entry(iter, net); xt_free_table_info(newinfo); return PTR_ERR(new_table); } diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index a01d9b842bd0..67c87a88cde4 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -72,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, goto out; } - if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE)) + if (rt->rt6i_idev->dev == dev || + l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex || + (flags & XT_RPFILTER_LOOSE)) ret = true; out: ip6_rt_put(rt); diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c index 929502e51203..52f828bb5a83 100644 --- a/net/ipv6/netfilter/nf_tproxy_ipv6.c +++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c @@ -63,7 +63,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff, lport ? lport : hp->dest, skb->dev, NF_TPROXY_LOOKUP_LISTENER); if (sk2) { - inet_twsk_deschedule_put(inet_twsk(sk)); + nf_tproxy_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; } } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index b6dac8981147..9c433cdc81ef 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -5517,16 +5517,17 @@ static size_t rt6_nlmsg_size(struct fib6_info *f6i) nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size, &nexthop_len); } else { + struct fib6_info *sibling, *next_sibling; struct fib6_nh *nh = f6i->fib6_nh; nexthop_len = 0; if (f6i->fib6_nsiblings) { - nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ - + NLA_ALIGN(sizeof(struct rtnexthop)) - + nla_total_size(16) /* RTA_GATEWAY */ - + lwtunnel_get_encap_size(nh->fib_nh_lws); + rt6_nh_nlmsg_size(nh, &nexthop_len); - nexthop_len *= f6i->fib6_nsiblings; + list_for_each_entry_safe(sibling, next_sibling, + &f6i->fib6_siblings, fib6_siblings) { + rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len); + } } nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); } diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c index 8639e7efd0e2..adae86e8e02e 100644 --- a/net/netfilter/nf_conntrack_bpf.c +++ b/net/netfilter/nf_conntrack_bpf.c @@ -384,7 +384,6 @@ struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i) struct nf_conn *nfct = (struct nf_conn *)nfct_i; int err; - nfct->status |= IPS_CONFIRMED; err = nf_conntrack_hash_check_insert(nfct); if (err < 0) { nf_conntrack_free(nfct); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 23b3fedd619a..30ed45b1b57d 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -96,8 +96,8 @@ static DEFINE_MUTEX(nf_conntrack_mutex); #define GC_SCAN_MAX_DURATION msecs_to_jiffies(10) #define GC_SCAN_EXPIRED_MAX (64000u / HZ) -#define MIN_CHAINLEN 8u -#define MAX_CHAINLEN (32u - MIN_CHAINLEN) +#define MIN_CHAINLEN 50u +#define MAX_CHAINLEN (80u - MIN_CHAINLEN) static struct conntrack_gc_work conntrack_gc_work; @@ -890,10 +890,8 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) zone = nf_ct_zone(ct); - if (!nf_ct_ext_valid_pre(ct->ext)) { - NF_CT_STAT_INC_ATOMIC(net, insert_failed); - return -ETIMEDOUT; - } + if (!nf_ct_ext_valid_pre(ct->ext)) + return -EAGAIN; local_bh_disable(); do { @@ -928,6 +926,19 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) goto chaintoolong; } + /* If genid has changed, we can't insert anymore because ct + * extensions could have stale pointers and nf_ct_iterate_destroy + * might have completed its table scan already. + * + * Increment of the ext genid right after this check is fine: + * nf_ct_iterate_destroy blocks until locks are released. + */ + if (!nf_ct_ext_valid_post(ct->ext)) { + err = -EAGAIN; + goto out; + } + + ct->status |= IPS_CONFIRMED; smp_wmb(); /* The caller holds a reference to this object */ refcount_set(&ct->ct_general.use, 2); @@ -936,12 +947,6 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) NF_CT_STAT_INC(net, insert); local_bh_enable(); - if (!nf_ct_ext_valid_post(ct->ext)) { - nf_ct_kill(ct); - NF_CT_STAT_INC_ATOMIC(net, drop); - return -ETIMEDOUT; - } - return 0; chaintoolong: NF_CT_STAT_INC(net, chaintoolong); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 1286ae7d4609..d095d3c1ceca 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -328,11 +328,12 @@ nla_put_failure: } #ifdef CONFIG_NF_CONNTRACK_MARK -static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) +static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct, + bool dump) { u32 mark = READ_ONCE(ct->mark); - if (!mark) + if (!mark && !dump) return 0; if (nla_put_be32(skb, CTA_MARK, htonl(mark))) @@ -343,7 +344,7 @@ nla_put_failure: return -1; } #else -#define ctnetlink_dump_mark(a, b) (0) +#define ctnetlink_dump_mark(a, b, c) (0) #endif #ifdef CONFIG_NF_CONNTRACK_SECMARK @@ -548,7 +549,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb, static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct) { if (ctnetlink_dump_status(skb, ct) < 0 || - ctnetlink_dump_mark(skb, ct) < 0 || + ctnetlink_dump_mark(skb, ct, true) < 0 || ctnetlink_dump_secctx(skb, ct) < 0 || ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0 || @@ -831,8 +832,7 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) } #ifdef CONFIG_NF_CONNTRACK_MARK - if (events & (1 << IPCT_MARK) && - ctnetlink_dump_mark(skb, ct) < 0) + if (ctnetlink_dump_mark(skb, ct, events & (1 << IPCT_MARK))) goto nla_put_failure; #endif nlmsg_end(skb, nlh); @@ -2316,9 +2316,6 @@ ctnetlink_create_conntrack(struct net *net, nfct_seqadj_ext_add(ct); nfct_synproxy_ext_add(ct); - /* we must add conntrack extensions before confirmation. */ - ct->status |= IPS_CONFIRMED; - if (cda[CTA_STATUS]) { err = ctnetlink_change_status(ct, cda); if (err < 0) @@ -2375,12 +2372,15 @@ ctnetlink_create_conntrack(struct net *net, err = nf_conntrack_hash_check_insert(ct); if (err < 0) - goto err2; + goto err3; rcu_read_unlock(); return ct; +err3: + if (ct->master) + nf_ct_put(ct->master); err2: rcu_read_unlock(); err1: @@ -2735,7 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) goto nla_put_failure; #ifdef CONFIG_NF_CONNTRACK_MARK - if (ctnetlink_dump_mark(skb, ct) < 0) + if (ctnetlink_dump_mark(skb, ct, true) < 0) goto nla_put_failure; #endif if (ctnetlink_dump_labels(skb, ct) < 0) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index dca5352bdf3d..1a9d759d0a02 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5439,7 +5439,7 @@ static int nf_tables_getsetelem(struct sk_buff *skb, int rem, err = 0; table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family, - genmask, NETLINK_CB(skb).portid); + genmask, 0); if (IS_ERR(table)) { NL_SET_BAD_ATTR(extack, nla[NFTA_SET_ELEM_LIST_TABLE]); return PTR_ERR(table); diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c index bb15a55dad5c..eaa54964cf23 100644 --- a/net/netfilter/nft_last.c +++ b/net/netfilter/nft_last.c @@ -104,11 +104,15 @@ static void nft_last_destroy(const struct nft_ctx *ctx, static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src) { struct nft_last_priv *priv_dst = nft_expr_priv(dst); + struct nft_last_priv *priv_src = nft_expr_priv(src); priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC); if (!priv_dst->last) return -ENOMEM; + priv_dst->last->set = priv_src->last->set; + priv_dst->last->jiffies = priv_src->last->jiffies; + return 0; } diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c index e6b0df68feea..410a5fcf8830 100644 --- a/net/netfilter/nft_quota.c +++ b/net/netfilter/nft_quota.c @@ -235,12 +235,16 @@ static void nft_quota_destroy(const struct nft_ctx *ctx, static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src) { struct nft_quota *priv_dst = nft_expr_priv(dst); + struct nft_quota *priv_src = nft_expr_priv(src); + + priv_dst->quota = priv_src->quota; + priv_dst->flags = priv_src->flags; priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC); if (!priv_dst->consumed) return -ENOMEM; - atomic64_set(priv_dst->consumed, 0); + *priv_dst->consumed = *priv_src->consumed; return 0; } diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c index 1873da3a945a..9fbfad13176f 100644 --- a/net/netfilter/xt_length.c +++ b/net/netfilter/xt_length.c @@ -30,8 +30,7 @@ static bool length_mt6(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_length_info *info = par->matchinfo; - const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) + - sizeof(struct ipv6hdr); + u32 pktlen = skb->len; return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; } diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c index 1fc339084d89..b9264e730fd9 100644 --- a/net/nfc/netlink.c +++ b/net/nfc/netlink.c @@ -1442,8 +1442,12 @@ static int nfc_se_io(struct nfc_dev *dev, u32 se_idx, rc = dev->ops->se_io(dev, se_idx, apdu, apdu_length, cb, cb_context); + device_unlock(&dev->dev); + return rc; + error: device_unlock(&dev->dev); + kfree(cb_context); return rc; } diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 4662a6ce8a7e..bcdd6e925343 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -503,17 +503,6 @@ config NET_CLS_BASIC To compile this code as a module, choose M here: the module will be called cls_basic. -config NET_CLS_TCINDEX - tristate "Traffic-Control Index (TCINDEX)" - select NET_CLS - help - Say Y here if you want to be able to classify packets based on - traffic control indices. You will want this feature if you want - to implement Differentiated Services together with DSMARK. - - To compile this code as a module, choose M here: the - module will be called cls_tcindex. - config NET_CLS_ROUTE4 tristate "Routing decision (ROUTE)" depends on INET diff --git a/net/sched/Makefile b/net/sched/Makefile index dd14ef413fda..b7dbac5c519f 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -70,7 +70,6 @@ obj-$(CONFIG_NET_CLS_U32) += cls_u32.o obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o obj-$(CONFIG_NET_CLS_FW) += cls_fw.o obj-$(CONFIG_NET_CLS_RSVP) += cls_rsvp.o -obj-$(CONFIG_NET_CLS_TCINDEX) += cls_tcindex.o obj-$(CONFIG_NET_CLS_RSVP6) += cls_rsvp6.o obj-$(CONFIG_NET_CLS_BASIC) += cls_basic.o obj-$(CONFIG_NET_CLS_FLOW) += cls_flow.o diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index ea5959094adb..f24f997a7aaf 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -188,40 +188,67 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, parm = nla_data(tb[TCA_MPLS_PARMS]); index = parm->index; + err = tcf_idr_check_alloc(tn, &index, a, bind); + if (err < 0) + return err; + exists = err; + if (exists && bind) + return 0; + + if (!exists) { + ret = tcf_idr_create(tn, index, est, a, &act_mpls_ops, bind, + true, flags); + if (ret) { + tcf_idr_cleanup(tn, index); + return ret; + } + + ret = ACT_P_CREATED; + } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { + tcf_idr_release(*a, bind); + return -EEXIST; + } + /* Verify parameters against action type. */ switch (parm->m_action) { case TCA_MPLS_ACT_POP: if (!tb[TCA_MPLS_PROTO]) { NL_SET_ERR_MSG_MOD(extack, "Protocol must be set for MPLS pop"); - return -EINVAL; + err = -EINVAL; + goto release_idr; } if (!eth_proto_is_802_3(nla_get_be16(tb[TCA_MPLS_PROTO]))) { NL_SET_ERR_MSG_MOD(extack, "Invalid protocol type for MPLS pop"); - return -EINVAL; + err = -EINVAL; + goto release_idr; } if (tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) { NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC or BOS cannot be used with MPLS pop"); - return -EINVAL; + err = -EINVAL; + goto release_idr; } break; case TCA_MPLS_ACT_DEC_TTL: if (tb[TCA_MPLS_PROTO] || tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) { NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC, BOS or protocol cannot be used with MPLS dec_ttl"); - return -EINVAL; + err = -EINVAL; + goto release_idr; } break; case TCA_MPLS_ACT_PUSH: case TCA_MPLS_ACT_MAC_PUSH: if (!tb[TCA_MPLS_LABEL]) { NL_SET_ERR_MSG_MOD(extack, "Label is required for MPLS push"); - return -EINVAL; + err = -EINVAL; + goto release_idr; } if (tb[TCA_MPLS_PROTO] && !eth_p_mpls(nla_get_be16(tb[TCA_MPLS_PROTO]))) { NL_SET_ERR_MSG_MOD(extack, "Protocol must be an MPLS type for MPLS push"); - return -EPROTONOSUPPORT; + err = -EPROTONOSUPPORT; + goto release_idr; } /* Push needs a TTL - if not specified, set a default value. */ if (!tb[TCA_MPLS_TTL]) { @@ -236,33 +263,14 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla, case TCA_MPLS_ACT_MODIFY: if (tb[TCA_MPLS_PROTO]) { NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be used with MPLS modify"); - return -EINVAL; + err = -EINVAL; + goto release_idr; } break; default: NL_SET_ERR_MSG_MOD(extack, "Unknown MPLS action"); - return -EINVAL; - } - - err = tcf_idr_check_alloc(tn, &index, a, bind); - if (err < 0) - return err; - exists = err; - if (exists && bind) - return 0; - - if (!exists) { - ret = tcf_idr_create(tn, index, est, a, - &act_mpls_ops, bind, true, flags); - if (ret) { - tcf_idr_cleanup(tn, index); - return ret; - } - - ret = ACT_P_CREATED; - } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { - tcf_idr_release(*a, bind); - return -EEXIST; + err = -EINVAL; + goto release_idr; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 94ed5857ce67..238759c3192e 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -133,6 +133,17 @@ nla_failure: return -EINVAL; } +static void tcf_pedit_cleanup_rcu(struct rcu_head *head) +{ + struct tcf_pedit_parms *parms = + container_of(head, struct tcf_pedit_parms, rcu); + + kfree(parms->tcfp_keys_ex); + kfree(parms->tcfp_keys); + + kfree(parms); +} + static int tcf_pedit_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, @@ -140,10 +151,9 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, { struct tc_action_net *tn = net_generic(net, act_pedit_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; - struct nlattr *tb[TCA_PEDIT_MAX + 1]; struct tcf_chain *goto_ch = NULL; - struct tc_pedit_key *keys = NULL; - struct tcf_pedit_key_ex *keys_ex; + struct tcf_pedit_parms *oparms, *nparms; + struct nlattr *tb[TCA_PEDIT_MAX + 1]; struct tc_pedit *parm; struct nlattr *pattr; struct tcf_pedit *p; @@ -170,109 +180,125 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, } parm = nla_data(pattr); - if (!parm->nkeys) { - NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); - return -EINVAL; - } - ksize = parm->nkeys * sizeof(struct tc_pedit_key); - if (nla_len(pattr) < sizeof(*parm) + ksize) { - NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); - return -EINVAL; - } - - keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); - if (IS_ERR(keys_ex)) - return PTR_ERR(keys_ex); index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (!err) { - ret = tcf_idr_create(tn, index, est, a, - &act_pedit_ops, bind, false, flags); + ret = tcf_idr_create_from_flags(tn, index, est, a, + &act_pedit_ops, bind, flags); if (ret) { tcf_idr_cleanup(tn, index); - goto out_free; + return ret; } ret = ACT_P_CREATED; } else if (err > 0) { if (bind) - goto out_free; + return 0; if (!(flags & TCA_ACT_FLAGS_REPLACE)) { ret = -EEXIST; goto out_release; } } else { - ret = err; + return err; + } + + if (!parm->nkeys) { + NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); + ret = -EINVAL; + goto out_release; + } + ksize = parm->nkeys * sizeof(struct tc_pedit_key); + if (nla_len(pattr) < sizeof(*parm) + ksize) { + NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); + ret = -EINVAL; + goto out_release; + } + + nparms = kzalloc(sizeof(*nparms), GFP_KERNEL); + if (!nparms) { + ret = -ENOMEM; + goto out_release; + } + + nparms->tcfp_keys_ex = + tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); + if (IS_ERR(nparms->tcfp_keys_ex)) { + ret = PTR_ERR(nparms->tcfp_keys_ex); goto out_free; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) { ret = err; - goto out_release; + goto out_free_ex; } - p = to_pedit(*a); - spin_lock_bh(&p->tcf_lock); - if (ret == ACT_P_CREATED || - (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys)) { - keys = kmalloc(ksize, GFP_ATOMIC); - if (!keys) { - spin_unlock_bh(&p->tcf_lock); - ret = -ENOMEM; - goto put_chain; - } - kfree(p->tcfp_keys); - p->tcfp_keys = keys; - p->tcfp_nkeys = parm->nkeys; + nparms->tcfp_off_max_hint = 0; + nparms->tcfp_flags = parm->flags; + nparms->tcfp_nkeys = parm->nkeys; + + nparms->tcfp_keys = kmalloc(ksize, GFP_KERNEL); + if (!nparms->tcfp_keys) { + ret = -ENOMEM; + goto put_chain; } - memcpy(p->tcfp_keys, parm->keys, ksize); - p->tcfp_off_max_hint = 0; - for (i = 0; i < p->tcfp_nkeys; ++i) { - u32 cur = p->tcfp_keys[i].off; + + memcpy(nparms->tcfp_keys, parm->keys, ksize); + + for (i = 0; i < nparms->tcfp_nkeys; ++i) { + u32 cur = nparms->tcfp_keys[i].off; /* sanitize the shift value for any later use */ - p->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1, - p->tcfp_keys[i].shift); + nparms->tcfp_keys[i].shift = min_t(size_t, + BITS_PER_TYPE(int) - 1, + nparms->tcfp_keys[i].shift); /* The AT option can read a single byte, we can bound the actual * value with uchar max. */ - cur += (0xff & p->tcfp_keys[i].offmask) >> p->tcfp_keys[i].shift; + cur += (0xff & nparms->tcfp_keys[i].offmask) >> nparms->tcfp_keys[i].shift; /* Each key touches 4 bytes starting from the computed offset */ - p->tcfp_off_max_hint = max(p->tcfp_off_max_hint, cur + 4); + nparms->tcfp_off_max_hint = + max(nparms->tcfp_off_max_hint, cur + 4); } - p->tcfp_flags = parm->flags; + p = to_pedit(*a); + + spin_lock_bh(&p->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); - - kfree(p->tcfp_keys_ex); - p->tcfp_keys_ex = keys_ex; - + oparms = rcu_replace_pointer(p->parms, nparms, 1); spin_unlock_bh(&p->tcf_lock); + + if (oparms) + call_rcu(&oparms->rcu, tcf_pedit_cleanup_rcu); + if (goto_ch) tcf_chain_put_by_act(goto_ch); + return ret; put_chain: if (goto_ch) tcf_chain_put_by_act(goto_ch); +out_free_ex: + kfree(nparms->tcfp_keys_ex); +out_free: + kfree(nparms); out_release: tcf_idr_release(*a, bind); -out_free: - kfree(keys_ex); return ret; - } static void tcf_pedit_cleanup(struct tc_action *a) { struct tcf_pedit *p = to_pedit(a); - struct tc_pedit_key *keys = p->tcfp_keys; + struct tcf_pedit_parms *parms; - kfree(keys); - kfree(p->tcfp_keys_ex); + parms = rcu_dereference_protected(p->parms, 1); + + if (parms) + call_rcu(&parms->rcu, tcf_pedit_cleanup_rcu); } static bool offset_valid(struct sk_buff *skb, int offset) @@ -323,28 +349,30 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_pedit *p = to_pedit(a); + struct tcf_pedit_parms *parms; u32 max_offset; int i; - spin_lock(&p->tcf_lock); + parms = rcu_dereference_bh(p->parms); max_offset = (skb_transport_header_was_set(skb) ? skb_transport_offset(skb) : skb_network_offset(skb)) + - p->tcfp_off_max_hint; + parms->tcfp_off_max_hint; if (skb_ensure_writable(skb, min(skb->len, max_offset))) - goto unlock; + goto done; tcf_lastuse_update(&p->tcf_tm); + tcf_action_update_bstats(&p->common, skb); - if (p->tcfp_nkeys > 0) { - struct tc_pedit_key *tkey = p->tcfp_keys; - struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex; + if (parms->tcfp_nkeys > 0) { + struct tc_pedit_key *tkey = parms->tcfp_keys; + struct tcf_pedit_key_ex *tkey_ex = parms->tcfp_keys_ex; enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK; enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET; - for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { + for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) { u32 *ptr, hdata; int offset = tkey->off; int hoffset; @@ -420,11 +448,10 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, } bad: + spin_lock(&p->tcf_lock); p->tcf_qstats.overlimits++; -done: - bstats_update(&p->tcf_bstats, skb); -unlock: spin_unlock(&p->tcf_lock); +done: return p->tcf_action; } @@ -443,30 +470,33 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, { unsigned char *b = skb_tail_pointer(skb); struct tcf_pedit *p = to_pedit(a); + struct tcf_pedit_parms *parms; struct tc_pedit *opt; struct tcf_t t; int s; - s = struct_size(opt, keys, p->tcfp_nkeys); - - /* netlink spinlocks held above us - must use ATOMIC */ - opt = kzalloc(s, GFP_ATOMIC); - if (unlikely(!opt)) - return -ENOBUFS; - spin_lock_bh(&p->tcf_lock); - memcpy(opt->keys, p->tcfp_keys, flex_array_size(opt, keys, p->tcfp_nkeys)); + parms = rcu_dereference_protected(p->parms, 1); + s = struct_size(opt, keys, parms->tcfp_nkeys); + + opt = kzalloc(s, GFP_ATOMIC); + if (unlikely(!opt)) { + spin_unlock_bh(&p->tcf_lock); + return -ENOBUFS; + } + + memcpy(opt->keys, parms->tcfp_keys, + flex_array_size(opt, keys, parms->tcfp_nkeys)); opt->index = p->tcf_index; - opt->nkeys = p->tcfp_nkeys; - opt->flags = p->tcfp_flags; + opt->nkeys = parms->tcfp_nkeys; + opt->flags = parms->tcfp_flags; opt->action = p->tcf_action; opt->refcnt = refcount_read(&p->tcf_refcnt) - ref; opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind; - if (p->tcfp_keys_ex) { - if (tcf_pedit_key_ex_dump(skb, - p->tcfp_keys_ex, - p->tcfp_nkeys)) + if (parms->tcfp_keys_ex) { + if (tcf_pedit_key_ex_dump(skb, parms->tcfp_keys_ex, + parms->tcfp_nkeys)) goto nla_put_failure; if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt)) diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 7a25477f5d99..09735a33e57e 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -54,8 +54,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, sample_policy, NULL); if (ret < 0) return ret; - if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] || - !tb[TCA_SAMPLE_PSAMPLE_GROUP]) + + if (!tb[TCA_SAMPLE_PARMS]) return -EINVAL; parm = nla_data(tb[TCA_SAMPLE_PARMS]); @@ -79,6 +79,13 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla, tcf_idr_release(*a, bind); return -EEXIST; } + + if (!tb[TCA_SAMPLE_RATE] || !tb[TCA_SAMPLE_PSAMPLE_GROUP]) { + NL_SET_ERR_MSG(extack, "sample rate and group are required"); + err = -EINVAL; + goto release_idr; + } + err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c deleted file mode 100644 index eea8e185fcdb..000000000000 --- a/net/sched/cls_tcindex.c +++ /dev/null @@ -1,741 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * net/sched/cls_tcindex.c Packet classifier for skb->tc_index - * - * Written 1998,1999 by Werner Almesberger, EPFL ICA - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Passing parameters to the root seems to be done more awkwardly than really - * necessary. At least, u32 doesn't seem to use such dirty hacks. To be - * verified. FIXME. - */ - -#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */ -#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */ - - -struct tcindex_data; - -struct tcindex_filter_result { - struct tcf_exts exts; - struct tcf_result res; - struct tcindex_data *p; - struct rcu_work rwork; -}; - -struct tcindex_filter { - u16 key; - struct tcindex_filter_result result; - struct tcindex_filter __rcu *next; - struct rcu_work rwork; -}; - - -struct tcindex_data { - struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */ - struct tcindex_filter __rcu **h; /* imperfect hash; */ - struct tcf_proto *tp; - u16 mask; /* AND key with mask */ - u32 shift; /* shift ANDed key to the right */ - u32 hash; /* hash table size; 0 if undefined */ - u32 alloc_hash; /* allocated size */ - u32 fall_through; /* 0: only classify if explicit match */ - refcount_t refcnt; /* a temporary refcnt for perfect hash */ - struct rcu_work rwork; -}; - -static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) -{ - return tcf_exts_has_actions(&r->exts) || r->res.classid; -} - -static void tcindex_data_get(struct tcindex_data *p) -{ - refcount_inc(&p->refcnt); -} - -static void tcindex_data_put(struct tcindex_data *p) -{ - if (refcount_dec_and_test(&p->refcnt)) { - kfree(p->perfect); - kfree(p->h); - kfree(p); - } -} - -static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p, - u16 key) -{ - if (p->perfect) { - struct tcindex_filter_result *f = p->perfect + key; - - return tcindex_filter_is_set(f) ? f : NULL; - } else if (p->h) { - struct tcindex_filter __rcu **fp; - struct tcindex_filter *f; - - fp = &p->h[key % p->hash]; - for (f = rcu_dereference_bh_rtnl(*fp); - f; - fp = &f->next, f = rcu_dereference_bh_rtnl(*fp)) - if (f->key == key) - return &f->result; - } - - return NULL; -} - - -static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, - struct tcf_result *res) -{ - struct tcindex_data *p = rcu_dereference_bh(tp->root); - struct tcindex_filter_result *f; - int key = (skb->tc_index & p->mask) >> p->shift; - - pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n", - skb, tp, res, p); - - f = tcindex_lookup(p, key); - if (!f) { - struct Qdisc *q = tcf_block_q(tp->chain->block); - - if (!p->fall_through) - return -1; - res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key); - res->class = 0; - pr_debug("alg 0x%x\n", res->classid); - return 0; - } - *res = f->res; - pr_debug("map 0x%x\n", res->classid); - - return tcf_exts_exec(skb, &f->exts, res); -} - - -static void *tcindex_get(struct tcf_proto *tp, u32 handle) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r; - - pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); - if (p->perfect && handle >= p->alloc_hash) - return NULL; - r = tcindex_lookup(p, handle); - return r && tcindex_filter_is_set(r) ? r : NULL; -} - -static int tcindex_init(struct tcf_proto *tp) -{ - struct tcindex_data *p; - - pr_debug("tcindex_init(tp %p)\n", tp); - p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL); - if (!p) - return -ENOMEM; - - p->mask = 0xffff; - p->hash = DEFAULT_HASH_SIZE; - p->fall_through = 1; - refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */ - - rcu_assign_pointer(tp->root, p); - return 0; -} - -static void __tcindex_destroy_rexts(struct tcindex_filter_result *r) -{ - tcf_exts_destroy(&r->exts); - tcf_exts_put_net(&r->exts); - tcindex_data_put(r->p); -} - -static void tcindex_destroy_rexts_work(struct work_struct *work) -{ - struct tcindex_filter_result *r; - - r = container_of(to_rcu_work(work), - struct tcindex_filter_result, - rwork); - rtnl_lock(); - __tcindex_destroy_rexts(r); - rtnl_unlock(); -} - -static void __tcindex_destroy_fexts(struct tcindex_filter *f) -{ - tcf_exts_destroy(&f->result.exts); - tcf_exts_put_net(&f->result.exts); - kfree(f); -} - -static void tcindex_destroy_fexts_work(struct work_struct *work) -{ - struct tcindex_filter *f = container_of(to_rcu_work(work), - struct tcindex_filter, - rwork); - - rtnl_lock(); - __tcindex_destroy_fexts(f); - rtnl_unlock(); -} - -static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last, - bool rtnl_held, struct netlink_ext_ack *extack) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = arg; - struct tcindex_filter __rcu **walk; - struct tcindex_filter *f = NULL; - - pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p); - if (p->perfect) { - if (!r->res.class) - return -ENOENT; - } else { - int i; - - for (i = 0; i < p->hash; i++) { - walk = p->h + i; - for (f = rtnl_dereference(*walk); f; - walk = &f->next, f = rtnl_dereference(*walk)) { - if (&f->result == r) - goto found; - } - } - return -ENOENT; - -found: - rcu_assign_pointer(*walk, rtnl_dereference(f->next)); - } - tcf_unbind_filter(tp, &r->res); - /* all classifiers are required to call tcf_exts_destroy() after rcu - * grace period, since converted-to-rcu actions are relying on that - * in cleanup() callback - */ - if (f) { - if (tcf_exts_get_net(&f->result.exts)) - tcf_queue_work(&f->rwork, tcindex_destroy_fexts_work); - else - __tcindex_destroy_fexts(f); - } else { - tcindex_data_get(p); - - if (tcf_exts_get_net(&r->exts)) - tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work); - else - __tcindex_destroy_rexts(r); - } - - *last = false; - return 0; -} - -static void tcindex_destroy_work(struct work_struct *work) -{ - struct tcindex_data *p = container_of(to_rcu_work(work), - struct tcindex_data, - rwork); - - tcindex_data_put(p); -} - -static inline int -valid_perfect_hash(struct tcindex_data *p) -{ - return p->hash > (p->mask >> p->shift); -} - -static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { - [TCA_TCINDEX_HASH] = { .type = NLA_U32 }, - [TCA_TCINDEX_MASK] = { .type = NLA_U16 }, - [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 }, - [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 }, - [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, -}; - -static int tcindex_filter_result_init(struct tcindex_filter_result *r, - struct tcindex_data *p, - struct net *net) -{ - memset(r, 0, sizeof(*r)); - r->p = p; - return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT, - TCA_TCINDEX_POLICE); -} - -static void tcindex_free_perfect_hash(struct tcindex_data *cp); - -static void tcindex_partial_destroy_work(struct work_struct *work) -{ - struct tcindex_data *p = container_of(to_rcu_work(work), - struct tcindex_data, - rwork); - - rtnl_lock(); - if (p->perfect) - tcindex_free_perfect_hash(p); - kfree(p); - rtnl_unlock(); -} - -static void tcindex_free_perfect_hash(struct tcindex_data *cp) -{ - int i; - - for (i = 0; i < cp->hash; i++) - tcf_exts_destroy(&cp->perfect[i].exts); - kfree(cp->perfect); -} - -static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) -{ - int i, err = 0; - - cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result), - GFP_KERNEL | __GFP_NOWARN); - if (!cp->perfect) - return -ENOMEM; - - for (i = 0; i < cp->hash; i++) { - err = tcf_exts_init(&cp->perfect[i].exts, net, - TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); - if (err < 0) - goto errout; - cp->perfect[i].p = cp; - } - - return 0; - -errout: - tcindex_free_perfect_hash(cp); - return err; -} - -static int -tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, - u32 handle, struct tcindex_data *p, - struct tcindex_filter_result *r, struct nlattr **tb, - struct nlattr *est, u32 flags, struct netlink_ext_ack *extack) -{ - struct tcindex_filter_result new_filter_result; - struct tcindex_data *cp = NULL, *oldp; - struct tcindex_filter *f = NULL; /* make gcc behave */ - struct tcf_result cr = {}; - int err, balloc = 0; - struct tcf_exts e; - bool update_h = false; - - err = tcf_exts_init(&e, net, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); - if (err < 0) - return err; - err = tcf_exts_validate(net, tp, tb, est, &e, flags, extack); - if (err < 0) - goto errout; - - err = -ENOMEM; - /* tcindex_data attributes must look atomic to classifier/lookup so - * allocate new tcindex data and RCU assign it onto root. Keeping - * perfect hash and hash pointers from old data. - */ - cp = kzalloc(sizeof(*cp), GFP_KERNEL); - if (!cp) - goto errout; - - cp->mask = p->mask; - cp->shift = p->shift; - cp->hash = p->hash; - cp->alloc_hash = p->alloc_hash; - cp->fall_through = p->fall_through; - cp->tp = tp; - refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */ - - if (tb[TCA_TCINDEX_HASH]) - cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); - - if (tb[TCA_TCINDEX_MASK]) - cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); - - if (tb[TCA_TCINDEX_SHIFT]) { - cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); - if (cp->shift > 16) { - err = -EINVAL; - goto errout; - } - } - if (!cp->hash) { - /* Hash not specified, use perfect hash if the upper limit - * of the hashing index is below the threshold. - */ - if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD) - cp->hash = (cp->mask >> cp->shift) + 1; - else - cp->hash = DEFAULT_HASH_SIZE; - } - - if (p->perfect) { - int i; - - if (tcindex_alloc_perfect_hash(net, cp) < 0) - goto errout; - cp->alloc_hash = cp->hash; - for (i = 0; i < min(cp->hash, p->hash); i++) - cp->perfect[i].res = p->perfect[i].res; - balloc = 1; - } - cp->h = p->h; - - err = tcindex_filter_result_init(&new_filter_result, cp, net); - if (err < 0) - goto errout_alloc; - if (r) - cr = r->res; - - err = -EBUSY; - - /* Hash already allocated, make sure that we still meet the - * requirements for the allocated hash. - */ - if (cp->perfect) { - if (!valid_perfect_hash(cp) || - cp->hash > cp->alloc_hash) - goto errout_alloc; - } else if (cp->h && cp->hash != cp->alloc_hash) { - goto errout_alloc; - } - - err = -EINVAL; - if (tb[TCA_TCINDEX_FALL_THROUGH]) - cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); - - if (!cp->perfect && !cp->h) - cp->alloc_hash = cp->hash; - - /* Note: this could be as restrictive as if (handle & ~(mask >> shift)) - * but then, we'd fail handles that may become valid after some future - * mask change. While this is extremely unlikely to ever matter, - * the check below is safer (and also more backwards-compatible). - */ - if (cp->perfect || valid_perfect_hash(cp)) - if (handle >= cp->alloc_hash) - goto errout_alloc; - - - err = -ENOMEM; - if (!cp->perfect && !cp->h) { - if (valid_perfect_hash(cp)) { - if (tcindex_alloc_perfect_hash(net, cp) < 0) - goto errout_alloc; - balloc = 1; - } else { - struct tcindex_filter __rcu **hash; - - hash = kcalloc(cp->hash, - sizeof(struct tcindex_filter *), - GFP_KERNEL); - - if (!hash) - goto errout_alloc; - - cp->h = hash; - balloc = 2; - } - } - - if (cp->perfect) { - r = cp->perfect + handle; - } else { - /* imperfect area is updated in-place using rcu */ - update_h = !!tcindex_lookup(cp, handle); - r = &new_filter_result; - } - - if (r == &new_filter_result) { - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (!f) - goto errout_alloc; - f->key = handle; - f->next = NULL; - err = tcindex_filter_result_init(&f->result, cp, net); - if (err < 0) { - kfree(f); - goto errout_alloc; - } - } - - if (tb[TCA_TCINDEX_CLASSID]) { - cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); - tcf_bind_filter(tp, &cr, base); - } - - oldp = p; - r->res = cr; - tcf_exts_change(&r->exts, &e); - - rcu_assign_pointer(tp->root, cp); - - if (update_h) { - struct tcindex_filter __rcu **fp; - struct tcindex_filter *cf; - - f->result.res = r->res; - tcf_exts_change(&f->result.exts, &r->exts); - - /* imperfect area bucket */ - fp = cp->h + (handle % cp->hash); - - /* lookup the filter, guaranteed to exist */ - for (cf = rcu_dereference_bh_rtnl(*fp); cf; - fp = &cf->next, cf = rcu_dereference_bh_rtnl(*fp)) - if (cf->key == (u16)handle) - break; - - f->next = cf->next; - - cf = rcu_replace_pointer(*fp, f, 1); - tcf_exts_get_net(&cf->result.exts); - tcf_queue_work(&cf->rwork, tcindex_destroy_fexts_work); - } else if (r == &new_filter_result) { - struct tcindex_filter *nfp; - struct tcindex_filter __rcu **fp; - - f->result.res = r->res; - tcf_exts_change(&f->result.exts, &r->exts); - - fp = cp->h + (handle % cp->hash); - for (nfp = rtnl_dereference(*fp); - nfp; - fp = &nfp->next, nfp = rtnl_dereference(*fp)) - ; /* nothing */ - - rcu_assign_pointer(*fp, f); - } else { - tcf_exts_destroy(&new_filter_result.exts); - } - - if (oldp) - tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work); - return 0; - -errout_alloc: - if (balloc == 1) - tcindex_free_perfect_hash(cp); - else if (balloc == 2) - kfree(cp->h); - tcf_exts_destroy(&new_filter_result.exts); -errout: - kfree(cp); - tcf_exts_destroy(&e); - return err; -} - -static int -tcindex_change(struct net *net, struct sk_buff *in_skb, - struct tcf_proto *tp, unsigned long base, u32 handle, - struct nlattr **tca, void **arg, u32 flags, - struct netlink_ext_ack *extack) -{ - struct nlattr *opt = tca[TCA_OPTIONS]; - struct nlattr *tb[TCA_TCINDEX_MAX + 1]; - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = *arg; - int err; - - pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p," - "p %p,r %p,*arg %p\n", - tp, handle, tca, arg, opt, p, r, *arg); - - if (!opt) - return 0; - - err = nla_parse_nested_deprecated(tb, TCA_TCINDEX_MAX, opt, - tcindex_policy, NULL); - if (err < 0) - return err; - - return tcindex_set_parms(net, tp, base, handle, p, r, tb, - tca[TCA_RATE], flags, extack); -} - -static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker, - bool rtnl_held) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter *f, *next; - int i; - - pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p); - if (p->perfect) { - for (i = 0; i < p->hash; i++) { - if (!p->perfect[i].res.class) - continue; - if (!tc_cls_stats_dump(tp, walker, p->perfect + i)) - return; - } - } - if (!p->h) - return; - for (i = 0; i < p->hash; i++) { - for (f = rtnl_dereference(p->h[i]); f; f = next) { - next = rtnl_dereference(f->next); - if (!tc_cls_stats_dump(tp, walker, &f->result)) - return; - } - } -} - -static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held, - struct netlink_ext_ack *extack) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - int i; - - pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); - - if (p->perfect) { - for (i = 0; i < p->hash; i++) { - struct tcindex_filter_result *r = p->perfect + i; - - /* tcf_queue_work() does not guarantee the ordering we - * want, so we have to take this refcnt temporarily to - * ensure 'p' is freed after all tcindex_filter_result - * here. Imperfect hash does not need this, because it - * uses linked lists rather than an array. - */ - tcindex_data_get(p); - - tcf_unbind_filter(tp, &r->res); - if (tcf_exts_get_net(&r->exts)) - tcf_queue_work(&r->rwork, - tcindex_destroy_rexts_work); - else - __tcindex_destroy_rexts(r); - } - } - - for (i = 0; p->h && i < p->hash; i++) { - struct tcindex_filter *f, *next; - bool last; - - for (f = rtnl_dereference(p->h[i]); f; f = next) { - next = rtnl_dereference(f->next); - tcindex_delete(tp, &f->result, &last, rtnl_held, NULL); - } - } - - tcf_queue_work(&p->rwork, tcindex_destroy_work); -} - - -static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh, - struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) -{ - struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = fh; - struct nlattr *nest; - - pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n", - tp, fh, skb, t, p, r); - pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); - - nest = nla_nest_start_noflag(skb, TCA_OPTIONS); - if (nest == NULL) - goto nla_put_failure; - - if (!fh) { - t->tcm_handle = ~0; /* whatever ... */ - if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) || - nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) || - nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) || - nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through)) - goto nla_put_failure; - nla_nest_end(skb, nest); - } else { - if (p->perfect) { - t->tcm_handle = r - p->perfect; - } else { - struct tcindex_filter *f; - struct tcindex_filter __rcu **fp; - int i; - - t->tcm_handle = 0; - for (i = 0; !t->tcm_handle && i < p->hash; i++) { - fp = &p->h[i]; - for (f = rtnl_dereference(*fp); - !t->tcm_handle && f; - fp = &f->next, f = rtnl_dereference(*fp)) { - if (&f->result == r) - t->tcm_handle = f->key; - } - } - } - pr_debug("handle = %d\n", t->tcm_handle); - if (r->res.class && - nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid)) - goto nla_put_failure; - - if (tcf_exts_dump(skb, &r->exts) < 0) - goto nla_put_failure; - nla_nest_end(skb, nest); - - if (tcf_exts_dump_stats(skb, &r->exts) < 0) - goto nla_put_failure; - } - - return skb->len; - -nla_put_failure: - nla_nest_cancel(skb, nest); - return -1; -} - -static void tcindex_bind_class(void *fh, u32 classid, unsigned long cl, - void *q, unsigned long base) -{ - struct tcindex_filter_result *r = fh; - - tc_cls_bind_class(classid, cl, q, &r->res, base); -} - -static struct tcf_proto_ops cls_tcindex_ops __read_mostly = { - .kind = "tcindex", - .classify = tcindex_classify, - .init = tcindex_init, - .destroy = tcindex_destroy, - .get = tcindex_get, - .change = tcindex_change, - .delete = tcindex_delete, - .walk = tcindex_walk, - .dump = tcindex_dump, - .bind_class = tcindex_bind_class, - .owner = THIS_MODULE, -}; - -static int __init init_tcindex(void) -{ - return register_tcf_proto_ops(&cls_tcindex_ops); -} - -static void __exit exit_tcindex(void) -{ - unregister_tcf_proto_ops(&cls_tcindex_ops); -} - -module_init(init_tcindex) -module_exit(exit_tcindex) -MODULE_LICENSE("GPL"); diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c index 4fc9f2923ed1..7dd9f8b387cc 100644 --- a/net/sctp/stream_sched_prio.c +++ b/net/sctp/stream_sched_prio.c @@ -25,6 +25,18 @@ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream); +static struct sctp_stream_priorities *sctp_sched_prio_head_get(struct sctp_stream_priorities *p) +{ + p->users++; + return p; +} + +static void sctp_sched_prio_head_put(struct sctp_stream_priorities *p) +{ + if (p && --p->users == 0) + kfree(p); +} + static struct sctp_stream_priorities *sctp_sched_prio_new_head( struct sctp_stream *stream, int prio, gfp_t gfp) { @@ -38,6 +50,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_new_head( INIT_LIST_HEAD(&p->active); p->next = NULL; p->prio = prio; + p->users = 1; return p; } @@ -53,7 +66,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head( */ list_for_each_entry(p, &stream->prio_list, prio_sched) { if (p->prio == prio) - return p; + return sctp_sched_prio_head_get(p); if (p->prio > prio) break; } @@ -70,7 +83,7 @@ static struct sctp_stream_priorities *sctp_sched_prio_get_head( */ break; if (p->prio == prio) - return p; + return sctp_sched_prio_head_get(p); } /* If not even there, allocate a new one. */ @@ -154,32 +167,21 @@ static int sctp_sched_prio_set(struct sctp_stream *stream, __u16 sid, struct sctp_stream_out_ext *soute = sout->ext; struct sctp_stream_priorities *prio_head, *old; bool reschedule = false; - int i; + + old = soute->prio_head; + if (old && old->prio == prio) + return 0; prio_head = sctp_sched_prio_get_head(stream, prio, gfp); if (!prio_head) return -ENOMEM; reschedule = sctp_sched_prio_unsched(soute); - old = soute->prio_head; soute->prio_head = prio_head; if (reschedule) sctp_sched_prio_sched(stream, soute); - if (!old) - /* Happens when we set the priority for the first time */ - return 0; - - for (i = 0; i < stream->outcnt; i++) { - soute = SCTP_SO(stream, i)->ext; - if (soute && soute->prio_head == old) - /* It's still in use, nothing else to do here. */ - return 0; - } - - /* No hits, we are good to free it. */ - kfree(old); - + sctp_sched_prio_head_put(old); return 0; } @@ -206,20 +208,8 @@ static int sctp_sched_prio_init_sid(struct sctp_stream *stream, __u16 sid, static void sctp_sched_prio_free_sid(struct sctp_stream *stream, __u16 sid) { - struct sctp_stream_priorities *prio = SCTP_SO(stream, sid)->ext->prio_head; - int i; - - if (!prio) - return; - + sctp_sched_prio_head_put(SCTP_SO(stream, sid)->ext->prio_head); SCTP_SO(stream, sid)->ext->prio_head = NULL; - for (i = 0; i < stream->outcnt; i++) { - if (SCTP_SO(stream, i)->ext && - SCTP_SO(stream, i)->ext->prio_head == prio) - return; - } - - kfree(prio); } static void sctp_sched_prio_free(struct sctp_stream *stream) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index d9413d43b104..e8018b0fb767 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -2644,16 +2644,14 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct smc_sock *smc; - int rc = -EPIPE; + int rc; smc = smc_sk(sk); lock_sock(sk); - if ((sk->sk_state != SMC_ACTIVE) && - (sk->sk_state != SMC_APPCLOSEWAIT1) && - (sk->sk_state != SMC_INIT)) - goto out; + /* SMC does not support connect with fastopen */ if (msg->msg_flags & MSG_FASTOPEN) { + /* not connected yet, fallback */ if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); if (rc) @@ -2662,6 +2660,11 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) rc = -EINVAL; goto out; } + } else if ((sk->sk_state != SMC_ACTIVE) && + (sk->sk_state != SMC_APPCLOSEWAIT1) && + (sk->sk_state != SMC_INIT)) { + rc = -EPIPE; + goto out; } if (smc->use_fallback) { diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 24577d1b9907..9ee32e06f877 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -787,6 +787,7 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) static int svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { + struct svc_rqst *rqstp; struct task_struct *task; unsigned int state = serv->sv_nrthreads-1; @@ -795,7 +796,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) task = choose_victim(serv, pool, &state); if (task == NULL) break; - kthread_stop(task); + rqstp = kthread_data(task); + /* Did we lose a race to svo_function threadfn? */ + if (kthread_stop(task) == -EINTR) + svc_exit_thread(rqstp); nrservs++; } while (nrservs < 0); return 0; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 6c593788dc25..a7cc4f9faac2 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -508,6 +508,8 @@ handle_error: zc_pfrag.offset = iter_offset.offset; zc_pfrag.size = copy; tls_append_frag(record, &zc_pfrag, copy); + + iter_offset.offset += copy; } else if (copy) { copy = min_t(size_t, copy, pfrag->size - pfrag->offset); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 3735cb00905d..b32c112984dd 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -405,13 +405,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EINVAL; goto out; } - lock_sock(sk); memcpy(crypto_info_aes_gcm_128->iv, cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, TLS_CIPHER_AES_GCM_128_IV_SIZE); memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq, TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE); - release_sock(sk); if (copy_to_user(optval, crypto_info_aes_gcm_128, sizeof(*crypto_info_aes_gcm_128))) @@ -429,13 +427,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EINVAL; goto out; } - lock_sock(sk); memcpy(crypto_info_aes_gcm_256->iv, cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE, TLS_CIPHER_AES_GCM_256_IV_SIZE); memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq, TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE); - release_sock(sk); if (copy_to_user(optval, crypto_info_aes_gcm_256, sizeof(*crypto_info_aes_gcm_256))) @@ -451,13 +447,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EINVAL; goto out; } - lock_sock(sk); memcpy(aes_ccm_128->iv, cctx->iv + TLS_CIPHER_AES_CCM_128_SALT_SIZE, TLS_CIPHER_AES_CCM_128_IV_SIZE); memcpy(aes_ccm_128->rec_seq, cctx->rec_seq, TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE); - release_sock(sk); if (copy_to_user(optval, aes_ccm_128, sizeof(*aes_ccm_128))) rc = -EFAULT; break; @@ -472,13 +466,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EINVAL; goto out; } - lock_sock(sk); memcpy(chacha20_poly1305->iv, cctx->iv + TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE, TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE); memcpy(chacha20_poly1305->rec_seq, cctx->rec_seq, TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE); - release_sock(sk); if (copy_to_user(optval, chacha20_poly1305, sizeof(*chacha20_poly1305))) rc = -EFAULT; @@ -493,13 +485,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EINVAL; goto out; } - lock_sock(sk); memcpy(sm4_gcm_info->iv, cctx->iv + TLS_CIPHER_SM4_GCM_SALT_SIZE, TLS_CIPHER_SM4_GCM_IV_SIZE); memcpy(sm4_gcm_info->rec_seq, cctx->rec_seq, TLS_CIPHER_SM4_GCM_REC_SEQ_SIZE); - release_sock(sk); if (copy_to_user(optval, sm4_gcm_info, sizeof(*sm4_gcm_info))) rc = -EFAULT; break; @@ -513,13 +503,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EINVAL; goto out; } - lock_sock(sk); memcpy(sm4_ccm_info->iv, cctx->iv + TLS_CIPHER_SM4_CCM_SALT_SIZE, TLS_CIPHER_SM4_CCM_IV_SIZE); memcpy(sm4_ccm_info->rec_seq, cctx->rec_seq, TLS_CIPHER_SM4_CCM_REC_SEQ_SIZE); - release_sock(sk); if (copy_to_user(optval, sm4_ccm_info, sizeof(*sm4_ccm_info))) rc = -EFAULT; break; @@ -535,13 +523,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EINVAL; goto out; } - lock_sock(sk); memcpy(crypto_info_aria_gcm_128->iv, cctx->iv + TLS_CIPHER_ARIA_GCM_128_SALT_SIZE, TLS_CIPHER_ARIA_GCM_128_IV_SIZE); memcpy(crypto_info_aria_gcm_128->rec_seq, cctx->rec_seq, TLS_CIPHER_ARIA_GCM_128_REC_SEQ_SIZE); - release_sock(sk); if (copy_to_user(optval, crypto_info_aria_gcm_128, sizeof(*crypto_info_aria_gcm_128))) @@ -559,13 +545,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval, rc = -EINVAL; goto out; } - lock_sock(sk); memcpy(crypto_info_aria_gcm_256->iv, cctx->iv + TLS_CIPHER_ARIA_GCM_256_SALT_SIZE, TLS_CIPHER_ARIA_GCM_256_IV_SIZE); memcpy(crypto_info_aria_gcm_256->rec_seq, cctx->rec_seq, TLS_CIPHER_ARIA_GCM_256_REC_SEQ_SIZE); - release_sock(sk); if (copy_to_user(optval, crypto_info_aria_gcm_256, sizeof(*crypto_info_aria_gcm_256))) @@ -614,11 +598,9 @@ static int do_tls_getsockopt_no_pad(struct sock *sk, char __user *optval, if (len < sizeof(value)) return -EINVAL; - lock_sock(sk); value = -EINVAL; if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW) value = ctx->rx_no_pad; - release_sock(sk); if (value < 0) return value; @@ -635,6 +617,8 @@ static int do_tls_getsockopt(struct sock *sk, int optname, { int rc = 0; + lock_sock(sk); + switch (optname) { case TLS_TX: case TLS_RX: @@ -651,6 +635,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname, rc = -ENOPROTOOPT; break; } + + release_sock(sk); + return rc; } diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index a83d2b4275fa..992092aeebad 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -941,7 +941,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) MSG_CMSG_COMPAT)) return -EOPNOTSUPP; - mutex_lock(&tls_ctx->tx_lock); + ret = mutex_lock_interruptible(&tls_ctx->tx_lock); + if (ret) + return ret; lock_sock(sk); if (unlikely(msg->msg_controllen)) { @@ -1275,7 +1277,9 @@ int tls_sw_sendpage(struct sock *sk, struct page *page, MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) return -EOPNOTSUPP; - mutex_lock(&tls_ctx->tx_lock); + ret = mutex_lock_interruptible(&tls_ctx->tx_lock); + if (ret) + return ret; lock_sock(sk); ret = tls_sw_do_sendpage(sk, page, offset, size, flags); release_sock(sk); @@ -2110,7 +2114,7 @@ recv_end: else err = process_rx_list(ctx, msg, &control, 0, async_copy_bytes, is_peek); - decrypted = max(err, 0); + decrypted += max(err, 0); } copied += decrypted; @@ -2416,11 +2420,19 @@ static void tx_work_handler(struct work_struct *work) if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) return; - mutex_lock(&tls_ctx->tx_lock); - lock_sock(sk); - tls_tx_records(sk, -1); - release_sock(sk); - mutex_unlock(&tls_ctx->tx_lock); + + if (mutex_trylock(&tls_ctx->tx_lock)) { + lock_sock(sk); + tls_tx_records(sk, -1); + release_sock(sk); + mutex_unlock(&tls_ctx->tx_lock); + } else if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { + /* Someone is holding the tx_lock, they will likely run Tx + * and cancel the work on their way out of the lock section. + * Schedule a long delay just in case. + */ + schedule_delayed_work(&ctx->tx_work.work, msecs_to_jiffies(10)); + } } static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index f0c2293f1d3b..7d17601ceee7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2104,7 +2104,8 @@ out: #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) #if IS_ENABLED(CONFIG_AF_UNIX_OOB) -static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other) +static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other, + struct scm_cookie *scm, bool fds_sent) { struct unix_sock *ousk = unix_sk(other); struct sk_buff *skb; @@ -2115,6 +2116,11 @@ static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other if (!skb) return err; + err = unix_scm_to_skb(scm, skb, !fds_sent); + if (err < 0) { + kfree_skb(skb); + return err; + } skb_put(skb, 1); err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1); @@ -2242,7 +2248,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, #if IS_ENABLED(CONFIG_AF_UNIX_OOB) if (msg->msg_flags & MSG_OOB) { - err = queue_oob(sock, msg, other); + err = queue_oob(sock, msg, other, &scm, fds_sent); if (err) goto out_err; sent++; diff --git a/net/unix/unix_bpf.c b/net/unix/unix_bpf.c index e9bf15513961..2f9d8271c6ec 100644 --- a/net/unix/unix_bpf.c +++ b/net/unix/unix_bpf.c @@ -54,6 +54,9 @@ static int unix_bpf_recvmsg(struct sock *sk, struct msghdr *msg, struct sk_psock *psock; int copied; + if (!len) + return 0; + psock = sk_psock_get(sk); if (unlikely(!psock)) return __unix_recvmsg(sk, msg, len, flags); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 89fc5683ed26..6e87d2cd8345 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -1486,8 +1486,6 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, connect->key = NULL; connect->key_len = 0; connect->key_idx = 0; - connect->crypto.cipher_group = 0; - connect->crypto.n_ciphers_pairwise = 0; } wdev->connect_keys = connkeys; diff --git a/scripts/checkkconfigsymbols.py b/scripts/checkkconfigsymbols.py index 217d21abc86e..36c920e71313 100755 --- a/scripts/checkkconfigsymbols.py +++ b/scripts/checkkconfigsymbols.py @@ -115,7 +115,7 @@ def parse_options(): return args -def main(): +def print_undefined_symbols(): """Main function of this module.""" args = parse_options() @@ -467,5 +467,16 @@ def parse_kconfig_file(kfile): return defined, references +def main(): + try: + print_undefined_symbols() + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + sys.exit(1) # Python exits with error code 1 on EPIPE + + if __name__ == "__main__": main() diff --git a/scripts/clang-tools/run-clang-tools.py b/scripts/clang-tools/run-clang-tools.py index 56f2ec8f0f40..3266708a8658 100755 --- a/scripts/clang-tools/run-clang-tools.py +++ b/scripts/clang-tools/run-clang-tools.py @@ -61,14 +61,21 @@ def run_analysis(entry): def main(): - args = parse_arguments() + try: + args = parse_arguments() - lock = multiprocessing.Lock() - pool = multiprocessing.Pool(initializer=init, initargs=(lock, args)) - # Read JSON data into the datastore variable - with open(args.path, "r") as f: - datastore = json.load(f) - pool.map(run_analysis, datastore) + lock = multiprocessing.Lock() + pool = multiprocessing.Pool(initializer=init, initargs=(lock, args)) + # Read JSON data into the datastore variable + with open(args.path, "r") as f: + datastore = json.load(f) + pool.map(run_analysis, datastore) + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + sys.exit(1) # Python exits with error code 1 on EPIPE if __name__ == "__main__": diff --git a/scripts/diffconfig b/scripts/diffconfig index d5da5fa05d1d..43f0f3d273ae 100755 --- a/scripts/diffconfig +++ b/scripts/diffconfig @@ -65,7 +65,7 @@ def print_config(op, config, value, new_value): else: print(" %s %s -> %s" % (config, value, new_value)) -def main(): +def show_diff(): global merge_style # parse command line args @@ -129,4 +129,16 @@ def main(): for config in new: print_config("+", config, None, b[config]) -main() +def main(): + try: + show_diff() + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + sys.exit(1) # Python exits with error code 1 on EPIPE + + +if __name__ == '__main__': + main() diff --git a/sound/soc/apple/mca.c b/sound/soc/apple/mca.c index 24381c42eb54..64750db9b963 100644 --- a/sound/soc/apple/mca.c +++ b/sound/soc/apple/mca.c @@ -101,7 +101,6 @@ #define SERDES_CONF_UNK3 BIT(14) #define SERDES_CONF_NO_DATA_FEEDBACK BIT(15) #define SERDES_CONF_SYNC_SEL GENMASK(18, 16) -#define SERDES_CONF_SOME_RST BIT(19) #define REG_TX_SERDES_BITSTART 0x08 #define REG_RX_SERDES_BITSTART 0x0c #define REG_TX_SERDES_SLOTMASK 0x0c @@ -203,15 +202,24 @@ static void mca_fe_early_trigger(struct snd_pcm_substream *substream, int cmd, case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, + FIELD_PREP(SERDES_CONF_SYNC_SEL, 0)); + mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, + FIELD_PREP(SERDES_CONF_SYNC_SEL, 7)); mca_modify(cl, serdes_unit + REG_SERDES_STATUS, SERDES_STATUS_EN | SERDES_STATUS_RST, SERDES_STATUS_RST); - mca_modify(cl, serdes_conf, SERDES_CONF_SOME_RST, - SERDES_CONF_SOME_RST); - readl_relaxed(cl->base + serdes_conf); - mca_modify(cl, serdes_conf, SERDES_STATUS_RST, 0); - WARN_ON(readl_relaxed(cl->base + REG_SERDES_STATUS) & + /* + * Experiments suggest that it takes at most ~1 us + * for the bit to clear, so wait 2 us for good measure. + */ + udelay(2); + WARN_ON(readl_relaxed(cl->base + serdes_unit + REG_SERDES_STATUS) & SERDES_STATUS_RST); + mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, + FIELD_PREP(SERDES_CONF_SYNC_SEL, 0)); + mca_modify(cl, serdes_conf, SERDES_CONF_SYNC_SEL, + FIELD_PREP(SERDES_CONF_SYNC_SEL, cl->no + 1)); break; default: break; @@ -942,10 +950,17 @@ static int mca_pcm_new(struct snd_soc_component *component, chan = mca_request_dma_channel(cl, i); if (IS_ERR_OR_NULL(chan)) { + mca_pcm_free(component, rtd->pcm); + + if (chan && PTR_ERR(chan) == -EPROBE_DEFER) + return PTR_ERR(chan); + dev_err(component->dev, "unable to obtain DMA channel (stream %d cluster %d): %pe\n", i, cl->no, chan); - mca_pcm_free(component, rtd->pcm); - return -EINVAL; + + if (!chan) + return -EINVAL; + return PTR_ERR(chan); } cl->dma_chans[i] = chan; diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 7022e6286e6c..3f16ad1c3758 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -2039,6 +2039,7 @@ config SND_SOC_WSA883X config SND_SOC_ZL38060 tristate "Microsemi ZL38060 Connected Home Audio Processor" depends on SPI_MASTER + depends on GPIOLIB select REGMAP help Support for ZL38060 Connected Home Audio Processor from Microsemi, diff --git a/sound/soc/codecs/adau7118.c b/sound/soc/codecs/adau7118.c index bbb097249887..a663d37e5776 100644 --- a/sound/soc/codecs/adau7118.c +++ b/sound/soc/codecs/adau7118.c @@ -444,22 +444,6 @@ static const struct snd_soc_component_driver adau7118_component_driver = { .endianness = 1, }; -static void adau7118_regulator_disable(void *data) -{ - struct adau7118_data *st = data; - int ret; - /* - * If we fail to disable DVDD, don't bother in trying IOVDD. We - * actually don't want to be left in the situation where DVDD - * is enabled and IOVDD is disabled. - */ - ret = regulator_disable(st->dvdd); - if (ret) - return; - - regulator_disable(st->iovdd); -} - static int adau7118_regulator_setup(struct adau7118_data *st) { st->iovdd = devm_regulator_get(st->dev, "iovdd"); @@ -481,8 +465,7 @@ static int adau7118_regulator_setup(struct adau7118_data *st) regcache_cache_only(st->map, true); } - return devm_add_action_or_reset(st->dev, adau7118_regulator_disable, - st); + return 0; } static int adau7118_parset_dt(const struct adau7118_data *st) diff --git a/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c b/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c index c2e268054773..f2c9a1fdbe0d 100644 --- a/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c +++ b/sound/soc/mediatek/mt8195/mt8195-dai-etdm.c @@ -2567,6 +2567,9 @@ static void mt8195_dai_etdm_parse_of(struct mtk_base_afe *afe) /* etdm in only */ for (i = 0; i < 2; i++) { + dai_id = ETDM_TO_DAI_ID(i); + etdm_data = afe_priv->dai_priv[dai_id]; + ret = snprintf(prop, sizeof(prop), "mediatek,%s-chn-disabled", of_afe_etdms[i].name); diff --git a/sound/usb/power.c b/sound/usb/power.c index 606a2cb23eab..2488a5fb2f50 100644 --- a/sound/usb/power.c +++ b/sound/usb/power.c @@ -104,3 +104,4 @@ int snd_usb_power_domain_set(struct snd_usb_audio *chip, return 0; } +EXPORT_SYMBOL_GPL(snd_usb_power_domain_set); diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c index 8d35893b2fa8..6a00a6eecaef 100644 --- a/tools/iio/iio_utils.c +++ b/tools/iio/iio_utils.c @@ -264,6 +264,7 @@ int iioutils_get_param_float(float *output, const char *param_name, if (fscanf(sysfsfp, "%f", output) != 1) ret = errno ? -errno : -ENODATA; + fclose(sysfsfp); break; } error_free_filename: @@ -345,9 +346,9 @@ int build_channel_array(const char *device_dir, int buffer_idx, } sysfsfp = fopen(filename, "r"); + free(filename); if (!sysfsfp) { ret = -errno; - free(filename); goto error_close_dir; } @@ -357,7 +358,6 @@ int build_channel_array(const char *device_dir, int buffer_idx, if (fclose(sysfsfp)) perror("build_channel_array(): Failed to close file"); - free(filename); goto error_close_dir; } if (ret == 1) @@ -365,11 +365,9 @@ int build_channel_array(const char *device_dir, int buffer_idx, if (fclose(sysfsfp)) { ret = -errno; - free(filename); goto error_close_dir; } - free(filename); } *ci_array = malloc(sizeof(**ci_array) * (*counter)); @@ -395,9 +393,9 @@ int build_channel_array(const char *device_dir, int buffer_idx, } sysfsfp = fopen(filename, "r"); + free(filename); if (!sysfsfp) { ret = -errno; - free(filename); count--; goto error_cleanup_array; } @@ -405,20 +403,17 @@ int build_channel_array(const char *device_dir, int buffer_idx, errno = 0; if (fscanf(sysfsfp, "%i", ¤t_enabled) != 1) { ret = errno ? -errno : -ENODATA; - free(filename); count--; goto error_cleanup_array; } if (fclose(sysfsfp)) { ret = -errno; - free(filename); count--; goto error_cleanup_array; } if (!current_enabled) { - free(filename); count--; continue; } @@ -429,7 +424,6 @@ int build_channel_array(const char *device_dir, int buffer_idx, strlen(ent->d_name) - strlen("_en")); if (!current->name) { - free(filename); ret = -ENOMEM; count--; goto error_cleanup_array; @@ -439,7 +433,6 @@ int build_channel_array(const char *device_dir, int buffer_idx, ret = iioutils_break_up_name(current->name, ¤t->generic_name); if (ret) { - free(filename); free(current->name); count--; goto error_cleanup_array; @@ -450,17 +443,16 @@ int build_channel_array(const char *device_dir, int buffer_idx, scan_el_dir, current->name); if (ret < 0) { - free(filename); ret = -ENOMEM; goto error_cleanup_array; } sysfsfp = fopen(filename, "r"); + free(filename); if (!sysfsfp) { ret = -errno; - fprintf(stderr, "failed to open %s\n", - filename); - free(filename); + fprintf(stderr, "failed to open %s/%s_index\n", + scan_el_dir, current->name); goto error_cleanup_array; } @@ -470,17 +462,14 @@ int build_channel_array(const char *device_dir, int buffer_idx, if (fclose(sysfsfp)) perror("build_channel_array(): Failed to close file"); - free(filename); goto error_cleanup_array; } if (fclose(sysfsfp)) { ret = -errno; - free(filename); goto error_cleanup_array; } - free(filename); /* Find the scale */ ret = iioutils_get_param_float(¤t->scale, "scale", diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 0c1b6acad141..730b49e255e4 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -668,6 +668,7 @@ static int create_static_call_sections(struct objtool_file *file) if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, STATIC_CALL_TRAMP_PREFIX_LEN)) { WARN("static_call: trampoline name malformed: %s", key_name); + free(key_name); return -1; } tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; @@ -677,6 +678,7 @@ static int create_static_call_sections(struct objtool_file *file) if (!key_sym) { if (!opts.module) { WARN("static_call: can't find static_call_key symbol: %s", tmp); + free(key_name); return -1; } diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index e2ce5f294cbd..333d8941ce4d 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -538,6 +538,7 @@ static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool, dso->hit = 1; } dso__put(dso); + perf_event__repipe(tool, event, sample, machine); return 0; } diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 978fdc60b4e8..f6427e3a4742 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -528,12 +528,7 @@ static int enable_counters(void) return err; } - /* - * We need to enable counters only if: - * - we don't have tracee (attaching to task or cpu) - * - we have initial delay configured - */ - if (!target__none(&target)) { + if (!target__enable_on_exec(&target)) { if (!all_counters_use_bpf) evlist__enable(evsel_list); } @@ -906,7 +901,7 @@ try_again_reset: return err; } - if (stat_config.initial_delay) { + if (target.initial_delay) { pr_info(EVLIST_DISABLED_MSG); } else { err = enable_counters(); @@ -918,8 +913,8 @@ try_again_reset: if (forks) evlist__start_workload(evsel_list); - if (stat_config.initial_delay > 0) { - usleep(stat_config.initial_delay * USEC_PER_MSEC); + if (target.initial_delay > 0) { + usleep(target.initial_delay * USEC_PER_MSEC); err = enable_counters(); if (err) return -1; @@ -1243,7 +1238,7 @@ static struct option stat_options[] = { "aggregate counts per thread", AGGR_THREAD), OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, "aggregate counts per numa node", AGGR_NODE), - OPT_INTEGER('D', "delay", &stat_config.initial_delay, + OPT_INTEGER('D', "delay", &target.initial_delay, "ms to wait before starting measurement after program start (-1: start with events disabled)"), OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, "Only print computed metrics. No raw values", enable_metric_only), diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 8ec8bb4a9912..b63b3a312991 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -583,11 +583,7 @@ int create_perf_stat_counter(struct evsel *evsel, if (evsel__is_group_leader(evsel)) { attr->disabled = 1; - /* - * In case of initial_delay we enable tracee - * events manually. - */ - if (target__none(target) && !config->initial_delay) + if (target__enable_on_exec(target)) attr->enable_on_exec = 1; } diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 35c940d7f29c..05c5125d7f41 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -145,7 +145,6 @@ struct perf_stat_config { FILE *output; unsigned int interval; unsigned int timeout; - int initial_delay; unsigned int unit_width; unsigned int metric_only_len; int times; diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h index daec6cba500d..880f1af7f6ad 100644 --- a/tools/perf/util/target.h +++ b/tools/perf/util/target.h @@ -18,6 +18,7 @@ struct target { bool per_thread; bool use_bpf; bool hybrid; + int initial_delay; const char *attr_map; }; @@ -72,6 +73,17 @@ static inline bool target__none(struct target *target) return !target__has_task(target) && !target__has_cpu(target); } +static inline bool target__enable_on_exec(struct target *target) +{ + /* + * Normally enable_on_exec should be set if: + * 1) The tracee process is forked (not attaching to existed task or cpu). + * 2) And initial_delay is not configured. + * Otherwise, we enable tracee events manually. + */ + return target__none(target) && !target->initial_delay; +} + static inline bool target__has_per_thread(struct target *target) { return target->system_wide && target->per_thread; diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index 924ecb3f1f73..dd40d9f6f259 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -404,6 +404,8 @@ EOF echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 & sc_s=$! + sleep 1 + result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT) if [ "$result" = "SERVER-inet" ];then diff --git a/tools/testing/selftests/netfilter/rpath.sh b/tools/testing/selftests/netfilter/rpath.sh index f7311e66d219..5289c8447a41 100755 --- a/tools/testing/selftests/netfilter/rpath.sh +++ b/tools/testing/selftests/netfilter/rpath.sh @@ -62,10 +62,16 @@ ip -net "$ns1" a a fec0:42::2/64 dev v0 nodad ip -net "$ns2" a a fec0:42::1/64 dev d0 nodad # firewall matches to test -[ -n "$iptables" ] && ip netns exec "$ns2" \ - "$iptables" -t raw -A PREROUTING -s 192.168.0.0/16 -m rpfilter -[ -n "$ip6tables" ] && ip netns exec "$ns2" \ - "$ip6tables" -t raw -A PREROUTING -s fec0::/16 -m rpfilter +[ -n "$iptables" ] && { + common='-t raw -A PREROUTING -s 192.168.0.0/16' + ip netns exec "$ns2" "$iptables" $common -m rpfilter + ip netns exec "$ns2" "$iptables" $common -m rpfilter --invert +} +[ -n "$ip6tables" ] && { + common='-t raw -A PREROUTING -s fec0::/16' + ip netns exec "$ns2" "$ip6tables" $common -m rpfilter + ip netns exec "$ns2" "$ip6tables" $common -m rpfilter --invert +} [ -n "$nft" ] && ip netns exec "$ns2" $nft -f - </dev/null } -testrun() { - # clear counters first +clear_counters() { [ -n "$iptables" ] && ip netns exec "$ns2" "$iptables" -t raw -Z [ -n "$ip6tables" ] && ip netns exec "$ns2" "$ip6tables" -t raw -Z if [ -n "$nft" ]; then @@ -111,6 +121,10 @@ testrun() { ip netns exec "$ns2" $nft -s list table inet t; ) | ip netns exec "$ns2" $nft -f - fi +} + +testrun() { + clear_counters # test 1: martian traffic should fail rpfilter matches netns_ping "$ns1" -I v0 192.168.42.1 && \ @@ -120,9 +134,13 @@ testrun() { ipt_zero_rule "$iptables" || die "iptables matched martian" ipt_zero_rule "$ip6tables" || die "ip6tables matched martian" + ipt_zero_reverse_rule "$iptables" && die "iptables not matched martian" + ipt_zero_reverse_rule "$ip6tables" && die "ip6tables not matched martian" nft_zero_rule ip || die "nft IPv4 matched martian" nft_zero_rule ip6 || die "nft IPv6 matched martian" + clear_counters + # test 2: rpfilter match should pass for regular traffic netns_ping "$ns1" 192.168.23.1 || \ die "regular ping 192.168.23.1 failed" @@ -131,6 +149,8 @@ testrun() { ipt_zero_rule "$iptables" && die "iptables match not effective" ipt_zero_rule "$ip6tables" && die "ip6tables match not effective" + ipt_zero_reverse_rule "$iptables" || die "iptables match over-effective" + ipt_zero_reverse_rule "$ip6tables" || die "ip6tables match over-effective" nft_zero_rule ip && die "nft IPv4 match not effective" nft_zero_rule ip6 && die "nft IPv6 match not effective" diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json deleted file mode 100644 index 44901db70376..000000000000 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/tcindex.json +++ /dev/null @@ -1,227 +0,0 @@ -[ - { - "id": "8293", - "name": "Add tcindex filter with default action", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress" - ], - "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex", - "matchPattern": "^filter parent ffff: protocol ip pref 1 tcindex chain 0 handle 0x0001 classid 1:1", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "7281", - "name": "Add tcindex filter with hash size and pass action", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress" - ], - "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 fall_through classid 1:1 action pass", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex", - "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "b294", - "name": "Add tcindex filter with mask shift and reclassify action", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress" - ], - "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action reclassify", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex", - "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action reclassify", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "0532", - "name": "Add tcindex filter with pass_on and continue actions", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress" - ], - "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 pass_on classid 1:1 action continue", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex", - "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action continue", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "d473", - "name": "Add tcindex filter with pipe action", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress" - ], - "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action pipe", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex", - "matchPattern": "^filter parent ffff: protocol ip pref.*tcindex chain [0-9]+ handle 0x0001 classid 1:1.*action order [0-9]+: gact action pipe", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "2940", - "name": "Add tcindex filter with miltiple actions", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress" - ], - "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 7 tcindex hash 32 mask 1 shift 2 fall_through classid 1:1 action skbedit mark 7 pipe action gact drop", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 7 protocol ip tcindex", - "matchPattern": "^filter parent ffff: protocol ip pref 7 tcindex.*handle 0x0001.*action.*skbedit.*mark 7 pipe.*action.*gact action drop", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "1893", - "name": "List tcindex filters", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress", - "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1", - "$TC filter add dev $DEV1 parent ffff: handle 2 protocol ip prio 1 tcindex classid 1:1" - ], - "cmdUnderTest": "$TC filter show dev $DEV1 parent ffff:", - "expExitCode": "0", - "verifyCmd": "$TC filter show dev $DEV1 parent ffff:", - "matchPattern": "handle 0x000[0-9]+ classid 1:1", - "matchCount": "2", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "2041", - "name": "Change tcindex filter with pass action", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress", - "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop" - ], - "cmdUnderTest": "$TC filter change dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action pass", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex", - "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "9203", - "name": "Replace tcindex filter with pass action", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress", - "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop" - ], - "cmdUnderTest": "$TC filter replace dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action pass", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex", - "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action pass", - "matchCount": "1", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - }, - { - "id": "7957", - "name": "Delete tcindex filter with drop action", - "category": [ - "filter", - "tcindex" - ], - "plugins": { - "requires": "nsPlugin" - }, - "setup": [ - "$TC qdisc add dev $DEV1 ingress", - "$TC filter add dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop" - ], - "cmdUnderTest": "$TC filter del dev $DEV1 parent ffff: handle 1 protocol ip prio 1 tcindex classid 1:1 action drop", - "expExitCode": "0", - "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 prio 1 protocol ip tcindex", - "matchPattern": "handle 0x0001 classid 1:1.*action order [0-9]+: gact action drop", - "matchCount": "0", - "teardown": [ - "$TC qdisc del dev $DEV1 ingress" - ] - } -]