This is the 6.1.57 stable release
-----BEGIN PGP SIGNATURE-----
iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmUlrb4ACgkQONu9yGCS
aT4b+hAAgvFC6P+XmyyNXJ9ISHLkgSlcIAdatb+qeOCUtdiWHqfxIha13FdnCdhL
WS2c/O9ORfAzjFwnYWF6LBwH8ArxRSkAXrGCMuCkEFBP3cG/j2HD+XLAAYEuBjjb
sf1fw8e8VSgaPEOnwXie5rTfAY4VnZKEtZjAxjyIQnJKVVKfxQRb8CyaWDPzPD0Z
tL/iABt7UWNHZayHTHsh0YhF2UhXtOjHinWigEarcZQEvOB2qRQtFl71cnqosi+t
3ZRZzepH7/Fx3v6/H/6PNq+GSI/ZzhOiCQolVV5YcMGHXsW9cP6arjLUxco5pzpk
pEg0vdMq47JOZYQ2pIewG4t7+NLmFIxCRFnKQVbxeFNSY9c1jhd8g5lhx9YEXwjT
BzMtV5DnZoaoMdq2P1STw/+RVYrDI1Lm6jqfgw/D27b7LzQ13VsGM9BJ1rCs8Hm7
UhWyjwFcgo0vhpfML1RF0RtT9Mo5SOnpGPfpbFdjg8jdXlGknNH0QsH+EY/BpF8l
h77P5BvoNIjsIN3B1YunfXtFXhx3h0sI8zZrqHR+zhOeWGsXcqQ5mZ/lYdYKkKuH
R8LRB7shPndF4xdRX0uRXwomcXhs+60eA5xEvE9u0CqqdpXfQN5oTwixfCm2C8MS
O5Fc7hfvK11XtR3ja+y3KRhiNG3YsfW2PXnlOfZxMZ6iPqXtA/o=
=5/pn
-----END PGP SIGNATURE-----
Merge 6.1.57 into android14-6.1-lts
Changes in 6.1.57
spi: zynqmp-gqspi: fix clock imbalance on probe failure
ASoC: soc-utils: Export snd_soc_dai_is_dummy() symbol
ASoC: tegra: Fix redundant PLLA and PLLA_OUT0 updates
mptcp: rename timer related helper to less confusing names
mptcp: fix dangling connection hang-up
mptcp: annotate lockless accesses to sk->sk_err
mptcp: move __mptcp_error_report in protocol.c
mptcp: process pending subflow error on close
ata,scsi: do not issue START STOP UNIT on resume
scsi: sd: Differentiate system and runtime start/stop management
scsi: sd: Do not issue commands to suspended disks on shutdown
scsi: core: Improve type safety of scsi_rescan_device()
scsi: Do not attempt to rescan suspended devices
ata: libata-scsi: Fix delayed scsi_rescan_device() execution
NFS: Cleanup unused rpc_clnt variable
NFS: rename nfs_client_kset to nfs_kset
NFSv4: Fix a state manager thread deadlock regression
mm/memory: add vm_normal_folio()
mm/mempolicy: convert queue_pages_pmd() to queue_folios_pmd()
mm/mempolicy: convert queue_pages_pte_range() to queue_folios_pte_range()
mm/mempolicy: convert migrate_page_add() to migrate_folio_add()
mm: mempolicy: keep VMA walk if both MPOL_MF_STRICT and MPOL_MF_MOVE are specified
mm/page_alloc: always remove pages from temporary list
mm/page_alloc: leave IRQs enabled for per-cpu page allocations
mm: page_alloc: fix CMA and HIGHATOMIC landing on the wrong buddy list
ring-buffer: remove obsolete comment for free_buffer_page()
ring-buffer: Fix bytes info in per_cpu buffer stats
btrfs: use struct qstr instead of name and namelen pairs
btrfs: setup qstr from dentrys using fscrypt helper
btrfs: use struct fscrypt_str instead of struct qstr
Revert "NFSv4: Retry LOCK on OLD_STATEID during delegation return"
arm64: Avoid repeated AA64MMFR1_EL1 register read on pagefault path
net: add sysctl accept_ra_min_rtr_lft
net: change accept_ra_min_rtr_lft to affect all RA lifetimes
net: release reference to inet6_dev pointer
arm64: cpufeature: Fix CLRBHB and BC detection
drm/amd/display: Adjust the MST resume flow
iommu/arm-smmu-v3: Set TTL invalidation hint better
iommu/arm-smmu-v3: Avoid constructing invalid range commands
rbd: move rbd_dev_refresh() definition
rbd: decouple header read-in from updating rbd_dev->header
rbd: decouple parent info read-in from updating rbd_dev
rbd: take header_rwsem in rbd_dev_refresh() only when updating
block: fix use-after-free of q->q_usage_counter
hwmon: (nzxt-smart2) Add device id
hwmon: (nzxt-smart2) add another USB ID
i40e: fix the wrong PTP frequency calculation
scsi: zfcp: Fix a double put in zfcp_port_enqueue()
iommu/vt-d: Avoid memory allocation in iommu_suspend()
vringh: don't use vringh_kiov_advance() in vringh_iov_xfer()
net: ethernet: mediatek: disable irq before schedule napi
mptcp: userspace pm allow creating id 0 subflow
qed/red_ll2: Fix undefined behavior bug in struct qed_ll2_info
Bluetooth: hci_codec: Fix leaking content of local_codecs
Bluetooth: hci_sync: Fix handling of HCI_QUIRK_STRICT_DUPLICATE_FILTER
wifi: mwifiex: Fix tlv_buf_left calculation
md/raid5: release batch_last before waiting for another stripe_head
PCI: qcom: Fix IPQ8074 enumeration
net: replace calls to sock->ops->connect() with kernel_connect()
net: prevent rewrite of msg_name in sock_sendmsg()
drm/amd: Fix detection of _PR3 on the PCIe root port
drm/amd: Fix logic error in sienna_cichlid_update_pcie_parameters()
arm64: Add Cortex-A520 CPU part definition
arm64: errata: Add Cortex-A520 speculative unprivileged load workaround
HID: sony: Fix a potential memory leak in sony_probe()
ubi: Refuse attaching if mtd's erasesize is 0
erofs: fix memory leak of LZMA global compressed deduplication
wifi: iwlwifi: dbg_ini: fix structure packing
wifi: iwlwifi: mvm: Fix a memory corruption issue
wifi: cfg80211: hold wiphy lock in auto-disconnect
wifi: cfg80211: move wowlan disable under locks
wifi: cfg80211: add a work abstraction with special semantics
wifi: cfg80211: fix cqm_config access race
wifi: cfg80211: add missing kernel-doc for cqm_rssi_work
wifi: mwifiex: Fix oob check condition in mwifiex_process_rx_packet
leds: Drop BUG_ON check for LED_COLOR_ID_MULTI
bpf: Fix tr dereferencing
regulator: mt6358: Drop *_SSHUB regulators
regulator: mt6358: Use linear voltage helpers for single range regulators
regulator: mt6358: split ops for buck and linear range LDO regulators
Bluetooth: Delete unused hci_req_prepare_suspend() declaration
Bluetooth: ISO: Fix handling of listen for unicast
drivers/net: process the result of hdlc_open() and add call of hdlc_close() in uhdlc_close()
wifi: mt76: mt76x02: fix MT76x0 external LNA gain handling
perf/x86/amd/core: Fix overflow reset on hotplug
regmap: rbtree: Fix wrong register marked as in-cache when creating new node
wifi: mac80211: fix potential key use-after-free
perf/x86/amd: Do not WARN() on every IRQ
iommu/mediatek: Fix share pgtable for iova over 4GB
regulator/core: regulator_register: set device->class earlier
ima: Finish deprecation of IMA_TRUSTED_KEYRING Kconfig
scsi: target: core: Fix deadlock due to recursive locking
ima: rework CONFIG_IMA dependency block
NFSv4: Fix a nfs4_state_manager() race
bpf: tcp_read_skb needs to pop skb regardless of seq
bpf, sockmap: Do not inc copied_seq when PEEK flag set
bpf, sockmap: Reject sk_msg egress redirects to non-TCP sockets
modpost: add missing else to the "of" check
net: fix possible store tearing in neigh_periodic_work()
bpf: Add BPF_FIB_LOOKUP_SKIP_NEIGH for bpf_fib_lookup
neighbour: annotate lockless accesses to n->nud_state
neighbour: switch to standard rcu, instead of rcu_bh
neighbour: fix data-races around n->output
ipv4, ipv6: Fix handling of transhdrlen in __ip{,6}_append_data()
ptp: ocp: Fix error handling in ptp_ocp_device_init
net: dsa: mv88e6xxx: Avoid EEPROM timeout when EEPROM is absent
ipv6: tcp: add a missing nf_reset_ct() in 3WHS handling
net: usb: smsc75xx: Fix uninit-value access in __smsc75xx_read_reg
net: nfc: llcp: Add lock when modifying device list
net: ethernet: ti: am65-cpsw: Fix error code in am65_cpsw_nuss_init_tx_chns()
ibmveth: Remove condition to recompute TCP header checksum.
netfilter: handle the connecting collision properly in nf_conntrack_proto_sctp
selftests: netfilter: Test nf_tables audit logging
selftests: netfilter: Extend nft_audit.sh
netfilter: nf_tables: Deduplicate nft_register_obj audit logs
netfilter: nf_tables: nft_set_rbtree: fix spurious insertion failure
ipv4: Set offload_failed flag in fibmatch results
net: stmmac: dwmac-stm32: fix resume on STM32 MCU
tipc: fix a potential deadlock on &tx->lock
tcp: fix quick-ack counting to count actual ACKs of new data
tcp: fix delayed ACKs for MSS boundary condition
sctp: update transport state when processing a dupcook packet
sctp: update hb timer immediately after users change hb_interval
netlink: split up copies in the ack construction
netlink: Fix potential skb memleak in netlink_ack
netlink: annotate data-races around sk->sk_err
HID: sony: remove duplicate NULL check before calling usb_free_urb()
HID: intel-ish-hid: ipc: Disable and reenable ACPI GPE bit
intel_idle: add Emerald Rapids Xeon support
smb: use kernel_connect() and kernel_bind()
parisc: Fix crash with nr_cpus=1 option
dm zoned: free dmz->ddev array in dmz_put_zoned_devices
RDMA/core: Require admin capabilities to set system parameters
of: dynamic: Fix potential memory leak in of_changeset_action()
IB/mlx4: Fix the size of a buffer in add_port_entries()
gpio: aspeed: fix the GPIO number passed to pinctrl_gpio_set_config()
gpio: pxa: disable pinctrl calls for MMP_GPIO
RDMA/cma: Initialize ib_sa_multicast structure to 0 when join
RDMA/cma: Fix truncation compilation warning in make_cma_ports
RDMA/uverbs: Fix typo of sizeof argument
RDMA/srp: Do not call scsi_done() from srp_abort()
RDMA/siw: Fix connection failure handling
RDMA/mlx5: Fix mutex unlocking on error flow for steering anchor creation
RDMA/mlx5: Fix NULL string error
x86/sev: Use the GHCB protocol when available for SNP CPUID requests
ksmbd: fix race condition between session lookup and expire
ksmbd: fix uaf in smb20_oplock_break_ack
parisc: Restore __ldcw_align for PA-RISC 2.0 processors
ipv6: remove nexthop_fib6_nh_bh()
vrf: Fix lockdep splat in output path
btrfs: fix an error handling path in btrfs_rename()
btrfs: fix fscrypt name leak after failure to join log transaction
netlink: remove the flex array from struct nlmsghdr
btrfs: file_remove_privs needs an exclusive lock in direct io write
ipv6: remove one read_lock()/read_unlock() pair in rt6_check_neigh()
xen/events: replace evtchn_rwlock with RCU
Linux 6.1.57
Change-Id: I2c200264df72a9043d91d31479c91b0d7f94863e
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
c259cc9cb4
191 changed files with 2880 additions and 1687 deletions
|
|
@ -60,6 +60,8 @@ stable kernels.
|
|||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A510 | #1902691 | ARM64_ERRATUM_1902691 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A520 | #2966298 | ARM64_ERRATUM_2966298 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
|
||||
|
|
|
|||
|
|
@ -2148,6 +2148,14 @@ accept_ra_min_hop_limit - INTEGER
|
|||
|
||||
Default: 1
|
||||
|
||||
accept_ra_min_lft - INTEGER
|
||||
Minimum acceptable lifetime value in Router Advertisement.
|
||||
|
||||
RA sections with a lifetime less than this value shall be
|
||||
ignored. Zero lifetimes stay unaffected.
|
||||
|
||||
Default: 0
|
||||
|
||||
accept_ra_pinfo - BOOLEAN
|
||||
Learn Prefix Information in Router Advertisement.
|
||||
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 56
|
||||
SUBLEVEL = 57
|
||||
EXTRAVERSION =
|
||||
NAME = Curry Ramen
|
||||
|
||||
|
|
|
|||
|
|
@ -972,6 +972,19 @@ config ARM64_ERRATUM_2457168
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config ARM64_ERRATUM_2966298
|
||||
bool "Cortex-A520: 2966298: workaround for speculatively executed unprivileged load"
|
||||
default y
|
||||
help
|
||||
This option adds the workaround for ARM Cortex-A520 erratum 2966298.
|
||||
|
||||
On an affected Cortex-A520 core, a speculatively executed unprivileged
|
||||
load might leak data from a privileged level via a cache side channel.
|
||||
|
||||
Work around this problem by executing a TLBI before returning to EL0.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config CAVIUM_ERRATUM_22375
|
||||
bool "Cavium erratum 22375, 24313"
|
||||
default y
|
||||
|
|
|
|||
|
|
@ -670,7 +670,7 @@ static inline bool supports_clearbhb(int scope)
|
|||
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
|
||||
|
||||
return cpuid_feature_extract_unsigned_field(isar2,
|
||||
ID_AA64ISAR2_EL1_BC_SHIFT);
|
||||
ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
|
||||
}
|
||||
|
||||
const struct cpumask *system_32bit_el0_cpumask(void);
|
||||
|
|
@ -863,7 +863,11 @@ static inline bool cpu_has_hw_af(void)
|
|||
if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM))
|
||||
return false;
|
||||
|
||||
mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
/*
|
||||
* Use cached version to avoid emulated msr operation on KVM
|
||||
* guests.
|
||||
*/
|
||||
mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
return cpuid_feature_extract_unsigned_field(mmfr1,
|
||||
ID_AA64MMFR1_EL1_HAFDBS_SHIFT);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -79,6 +79,7 @@
|
|||
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
|
||||
#define ARM_CPU_PART_CORTEX_X1 0xD44
|
||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||
#define ARM_CPU_PART_CORTEX_A520 0xD80
|
||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||
#define ARM_CPU_PART_CORTEX_X2 0xD48
|
||||
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
|
||||
|
|
@ -141,6 +142,7 @@
|
|||
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
|
||||
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
|
||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
|
||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||
#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
|
||||
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
|
||||
|
|
|
|||
|
|
@ -722,6 +722,21 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
|
||||
.cpu_enable = cpu_clear_bf16_from_user_emulation,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_2966298
|
||||
{
|
||||
.desc = "ARM erratum 2966298",
|
||||
.capability = ARM64_WORKAROUND_2966298,
|
||||
/* Cortex-A520 r0p0 - r0p1 */
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
|
||||
{
|
||||
.desc = "AmpereOne erratum AC03_CPU_38",
|
||||
.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
|
||||
ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
|
|
|||
|
|
@ -212,7 +212,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
|||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
||||
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
||||
|
|
|
|||
|
|
@ -419,6 +419,10 @@ alternative_else_nop_endif
|
|||
ldp x28, x29, [sp, #16 * 14]
|
||||
|
||||
.if \el == 0
|
||||
alternative_if ARM64_WORKAROUND_2966298
|
||||
tlbi vale1, xzr
|
||||
dsb nsh
|
||||
alternative_else_nop_endif
|
||||
alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #PT_REGS_SIZE // restore sp
|
||||
|
|
|
|||
|
|
@ -71,6 +71,7 @@ WORKAROUND_2064142
|
|||
WORKAROUND_2077057
|
||||
WORKAROUND_2457168
|
||||
WORKAROUND_2658417
|
||||
WORKAROUND_2966298
|
||||
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
|
||||
WORKAROUND_TSB_FLUSH_FAILURE
|
||||
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
|
||||
|
|
|
|||
|
|
@ -484,7 +484,11 @@ EndEnum
|
|||
EndSysreg
|
||||
|
||||
Sysreg ID_AA64ISAR2_EL1 3 0 0 6 2
|
||||
Res0 63:28
|
||||
Res0 63:32
|
||||
Enum 31:28 CLRBHB
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
Enum 27:24 PAC_frac
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
|
|
|
|||
|
|
@ -2,14 +2,28 @@
|
|||
#ifndef __PARISC_LDCW_H
|
||||
#define __PARISC_LDCW_H
|
||||
|
||||
#ifndef CONFIG_PA20
|
||||
/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
|
||||
and GCC only guarantees 8-byte alignment for stack locals, we can't
|
||||
be assured of 16-byte alignment for atomic lock data even if we
|
||||
specify "__attribute ((aligned(16)))" in the type declaration. So,
|
||||
we use a struct containing an array of four ints for the atomic lock
|
||||
type and dynamically select the 16-byte aligned int from the array
|
||||
for the semaphore. */
|
||||
for the semaphore. */
|
||||
|
||||
/* From: "Jim Hull" <jim.hull of hp.com>
|
||||
I've attached a summary of the change, but basically, for PA 2.0, as
|
||||
long as the ",CO" (coherent operation) completer is implemented, then the
|
||||
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
|
||||
they only require "natural" alignment (4-byte for ldcw, 8-byte for
|
||||
ldcd).
|
||||
|
||||
Although the cache control hint is accepted by all PA 2.0 processors,
|
||||
it is only implemented on PA8800/PA8900 CPUs. Prior PA8X00 CPUs still
|
||||
require 16-byte alignment. If the address is unaligned, the operation
|
||||
of the instruction is undefined. The ldcw instruction does not generate
|
||||
unaligned data reference traps so misaligned accesses are not detected.
|
||||
This hid the problem for years. So, restore the 16-byte alignment dropped
|
||||
by Kyle McMartin in "Remove __ldcw_align for PA-RISC 2.0 processors". */
|
||||
|
||||
#define __PA_LDCW_ALIGNMENT 16
|
||||
#define __PA_LDCW_ALIGN_ORDER 4
|
||||
|
|
@ -19,22 +33,12 @@
|
|||
& ~(__PA_LDCW_ALIGNMENT - 1); \
|
||||
(volatile unsigned int *) __ret; \
|
||||
})
|
||||
#define __LDCW "ldcw"
|
||||
|
||||
#else /*CONFIG_PA20*/
|
||||
/* From: "Jim Hull" <jim.hull of hp.com>
|
||||
I've attached a summary of the change, but basically, for PA 2.0, as
|
||||
long as the ",CO" (coherent operation) completer is specified, then the
|
||||
16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
|
||||
they only require "natural" alignment (4-byte for ldcw, 8-byte for
|
||||
ldcd). */
|
||||
|
||||
#define __PA_LDCW_ALIGNMENT 4
|
||||
#define __PA_LDCW_ALIGN_ORDER 2
|
||||
#define __ldcw_align(a) (&(a)->slock)
|
||||
#ifdef CONFIG_PA20
|
||||
#define __LDCW "ldcw,co"
|
||||
|
||||
#endif /*!CONFIG_PA20*/
|
||||
#else
|
||||
#define __LDCW "ldcw"
|
||||
#endif
|
||||
|
||||
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
|
||||
We don't explicitly expose that "*a" may be written as reload
|
||||
|
|
|
|||
|
|
@ -3,13 +3,8 @@
|
|||
#define __ASM_SPINLOCK_TYPES_H
|
||||
|
||||
typedef struct {
|
||||
#ifdef CONFIG_PA20
|
||||
volatile unsigned int slock;
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
|
||||
#else
|
||||
volatile unsigned int lock[4];
|
||||
# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
|
||||
#endif
|
||||
} arch_spinlock_t;
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -443,7 +443,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
|||
if (cpu_online(cpu))
|
||||
return 0;
|
||||
|
||||
if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
|
||||
if (num_online_cpus() < nr_cpu_ids &&
|
||||
num_online_cpus() < setup_max_cpus &&
|
||||
smp_boot_one_cpu(cpu, tidle))
|
||||
return -EIO;
|
||||
|
||||
return cpu_online(cpu) ? 0 : -EIO;
|
||||
|
|
|
|||
|
|
@ -534,8 +534,12 @@ static void amd_pmu_cpu_reset(int cpu)
|
|||
/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
|
||||
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
|
||||
|
||||
/* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */
|
||||
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask);
|
||||
/*
|
||||
* Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
|
||||
* and PerfCntrGLobalStatus.PerfCntrOvfl
|
||||
*/
|
||||
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
|
||||
GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
|
||||
}
|
||||
|
||||
static int amd_pmu_cpu_prepare(int cpu)
|
||||
|
|
@ -570,6 +574,7 @@ static void amd_pmu_cpu_starting(int cpu)
|
|||
int i, nb_id;
|
||||
|
||||
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
|
||||
amd_pmu_cpu_reset(cpu);
|
||||
|
||||
if (!x86_pmu.amd_nb_constraints)
|
||||
return;
|
||||
|
|
@ -591,8 +596,6 @@ static void amd_pmu_cpu_starting(int cpu)
|
|||
|
||||
cpuc->amd_nb->nb_id = nb_id;
|
||||
cpuc->amd_nb->refcnt++;
|
||||
|
||||
amd_pmu_cpu_reset(cpu);
|
||||
}
|
||||
|
||||
static void amd_pmu_cpu_dead(int cpu)
|
||||
|
|
@ -601,6 +604,7 @@ static void amd_pmu_cpu_dead(int cpu)
|
|||
|
||||
kfree(cpuhw->lbr_sel);
|
||||
cpuhw->lbr_sel = NULL;
|
||||
amd_pmu_cpu_reset(cpu);
|
||||
|
||||
if (!x86_pmu.amd_nb_constraints)
|
||||
return;
|
||||
|
|
@ -613,8 +617,6 @@ static void amd_pmu_cpu_dead(int cpu)
|
|||
|
||||
cpuhw->amd_nb = NULL;
|
||||
}
|
||||
|
||||
amd_pmu_cpu_reset(cpu);
|
||||
}
|
||||
|
||||
static inline void amd_pmu_set_global_ctl(u64 ctl)
|
||||
|
|
@ -884,7 +886,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
|
|||
struct hw_perf_event *hwc;
|
||||
struct perf_event *event;
|
||||
int handled = 0, idx;
|
||||
u64 status, mask;
|
||||
u64 reserved, status, mask;
|
||||
bool pmu_enabled;
|
||||
|
||||
/*
|
||||
|
|
@ -909,6 +911,14 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
|
|||
status &= ~GLOBAL_STATUS_LBRS_FROZEN;
|
||||
}
|
||||
|
||||
reserved = status & ~amd_pmu_global_cntr_mask;
|
||||
if (reserved)
|
||||
pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n",
|
||||
reserved);
|
||||
|
||||
/* Clear any reserved bits set by buggy microcode */
|
||||
status &= amd_pmu_global_cntr_mask;
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -253,7 +253,7 @@ static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sev_cpuid_hv(struct cpuid_leaf *leaf)
|
||||
static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
@ -276,6 +276,45 @@ static int sev_cpuid_hv(struct cpuid_leaf *leaf)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
|
||||
{
|
||||
u32 cr4 = native_read_cr4();
|
||||
int ret;
|
||||
|
||||
ghcb_set_rax(ghcb, leaf->fn);
|
||||
ghcb_set_rcx(ghcb, leaf->subfn);
|
||||
|
||||
if (cr4 & X86_CR4_OSXSAVE)
|
||||
/* Safe to read xcr0 */
|
||||
ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
|
||||
else
|
||||
/* xgetbv will cause #UD - use reset value for xcr0 */
|
||||
ghcb_set_xcr0(ghcb, 1);
|
||||
|
||||
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
|
||||
if (ret != ES_OK)
|
||||
return ret;
|
||||
|
||||
if (!(ghcb_rax_is_valid(ghcb) &&
|
||||
ghcb_rbx_is_valid(ghcb) &&
|
||||
ghcb_rcx_is_valid(ghcb) &&
|
||||
ghcb_rdx_is_valid(ghcb)))
|
||||
return ES_VMM_ERROR;
|
||||
|
||||
leaf->eax = ghcb->save.rax;
|
||||
leaf->ebx = ghcb->save.rbx;
|
||||
leaf->ecx = ghcb->save.rcx;
|
||||
leaf->edx = ghcb->save.rdx;
|
||||
|
||||
return ES_OK;
|
||||
}
|
||||
|
||||
static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
|
||||
{
|
||||
return ghcb ? __sev_cpuid_hv_ghcb(ghcb, ctxt, leaf)
|
||||
: __sev_cpuid_hv_msr(leaf);
|
||||
}
|
||||
|
||||
/*
|
||||
* This may be called early while still running on the initial identity
|
||||
* mapping. Use RIP-relative addressing to obtain the correct address
|
||||
|
|
@ -385,19 +424,20 @@ snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void snp_cpuid_hv(struct cpuid_leaf *leaf)
|
||||
static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
|
||||
{
|
||||
if (sev_cpuid_hv(leaf))
|
||||
if (sev_cpuid_hv(ghcb, ctxt, leaf))
|
||||
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
|
||||
}
|
||||
|
||||
static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
|
||||
static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
|
||||
struct cpuid_leaf *leaf)
|
||||
{
|
||||
struct cpuid_leaf leaf_hv = *leaf;
|
||||
|
||||
switch (leaf->fn) {
|
||||
case 0x1:
|
||||
snp_cpuid_hv(&leaf_hv);
|
||||
snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
|
||||
|
||||
/* initial APIC ID */
|
||||
leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0));
|
||||
|
|
@ -416,7 +456,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
|
|||
break;
|
||||
case 0xB:
|
||||
leaf_hv.subfn = 0;
|
||||
snp_cpuid_hv(&leaf_hv);
|
||||
snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
|
||||
|
||||
/* extended APIC ID */
|
||||
leaf->edx = leaf_hv.edx;
|
||||
|
|
@ -464,7 +504,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
|
|||
}
|
||||
break;
|
||||
case 0x8000001E:
|
||||
snp_cpuid_hv(&leaf_hv);
|
||||
snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
|
||||
|
||||
/* extended APIC ID */
|
||||
leaf->eax = leaf_hv.eax;
|
||||
|
|
@ -485,7 +525,7 @@ static int snp_cpuid_postprocess(struct cpuid_leaf *leaf)
|
|||
* Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
|
||||
* should be treated as fatal by caller.
|
||||
*/
|
||||
static int snp_cpuid(struct cpuid_leaf *leaf)
|
||||
static int snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
|
||||
{
|
||||
const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
|
||||
|
||||
|
|
@ -519,7 +559,7 @@ static int snp_cpuid(struct cpuid_leaf *leaf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
return snp_cpuid_postprocess(leaf);
|
||||
return snp_cpuid_postprocess(ghcb, ctxt, leaf);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -541,14 +581,14 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
|
|||
leaf.fn = fn;
|
||||
leaf.subfn = subfn;
|
||||
|
||||
ret = snp_cpuid(&leaf);
|
||||
ret = snp_cpuid(NULL, NULL, &leaf);
|
||||
if (!ret)
|
||||
goto cpuid_done;
|
||||
|
||||
if (ret != -EOPNOTSUPP)
|
||||
goto fail;
|
||||
|
||||
if (sev_cpuid_hv(&leaf))
|
||||
if (__sev_cpuid_hv_msr(&leaf))
|
||||
goto fail;
|
||||
|
||||
cpuid_done:
|
||||
|
|
@ -845,14 +885,15 @@ static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int vc_handle_cpuid_snp(struct pt_regs *regs)
|
||||
static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
||||
{
|
||||
struct pt_regs *regs = ctxt->regs;
|
||||
struct cpuid_leaf leaf;
|
||||
int ret;
|
||||
|
||||
leaf.fn = regs->ax;
|
||||
leaf.subfn = regs->cx;
|
||||
ret = snp_cpuid(&leaf);
|
||||
ret = snp_cpuid(ghcb, ctxt, &leaf);
|
||||
if (!ret) {
|
||||
regs->ax = leaf.eax;
|
||||
regs->bx = leaf.ebx;
|
||||
|
|
@ -871,7 +912,7 @@ static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
|
|||
enum es_result ret;
|
||||
int snp_cpuid_ret;
|
||||
|
||||
snp_cpuid_ret = vc_handle_cpuid_snp(regs);
|
||||
snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
|
||||
if (!snp_cpuid_ret)
|
||||
return ES_OK;
|
||||
if (snp_cpuid_ret != -EOPNOTSUPP)
|
||||
|
|
|
|||
|
|
@ -737,6 +737,7 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
|
|||
struct request_queue *q = container_of(rcu_head, struct request_queue,
|
||||
rcu_head);
|
||||
|
||||
percpu_ref_exit(&q->q_usage_counter);
|
||||
kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q);
|
||||
}
|
||||
|
||||
|
|
@ -762,8 +763,6 @@ static void blk_release_queue(struct kobject *kobj)
|
|||
|
||||
might_sleep();
|
||||
|
||||
percpu_ref_exit(&q->q_usage_counter);
|
||||
|
||||
if (q->poll_stat)
|
||||
blk_stat_remove_callback(q, q->poll_cb);
|
||||
blk_stat_free_callback(q->poll_cb);
|
||||
|
|
|
|||
|
|
@ -5022,11 +5022,27 @@ static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
|
|||
|
||||
static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
|
||||
{
|
||||
/*
|
||||
* We are about to suspend the port, so we do not care about
|
||||
* scsi_rescan_device() calls scheduled by previous resume operations.
|
||||
* The next resume will schedule the rescan again. So cancel any rescan
|
||||
* that is not done yet.
|
||||
*/
|
||||
cancel_delayed_work_sync(&ap->scsi_rescan_task);
|
||||
|
||||
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
|
||||
}
|
||||
|
||||
static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
|
||||
{
|
||||
/*
|
||||
* We are about to suspend the port, so we do not care about
|
||||
* scsi_rescan_device() calls scheduled by previous resume operations.
|
||||
* The next resume will schedule the rescan again. So cancel any rescan
|
||||
* that is not done yet.
|
||||
*/
|
||||
cancel_delayed_work_sync(&ap->scsi_rescan_task);
|
||||
|
||||
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1086,7 +1086,15 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
|||
}
|
||||
} else {
|
||||
sdev->sector_size = ata_id_logical_sector_size(dev->id);
|
||||
sdev->manage_start_stop = 1;
|
||||
/*
|
||||
* Stop the drive on suspend but do not issue START STOP UNIT
|
||||
* on resume as this is not necessary and may fail: the device
|
||||
* will be woken up by ata_port_pm_resume() with a port reset
|
||||
* and device revalidation.
|
||||
*/
|
||||
sdev->manage_system_start_stop = true;
|
||||
sdev->manage_runtime_start_stop = true;
|
||||
sdev->no_start_on_resume = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -4645,7 +4653,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
|||
struct ata_link *link;
|
||||
struct ata_device *dev;
|
||||
unsigned long flags;
|
||||
bool delay_rescan = false;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ap->scsi_scan_mutex);
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
|
@ -4654,37 +4662,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
|||
ata_for_each_dev(dev, link, ENABLED) {
|
||||
struct scsi_device *sdev = dev->sdev;
|
||||
|
||||
/*
|
||||
* If the port was suspended before this was scheduled,
|
||||
* bail out.
|
||||
*/
|
||||
if (ap->pflags & ATA_PFLAG_SUSPENDED)
|
||||
goto unlock;
|
||||
|
||||
if (!sdev)
|
||||
continue;
|
||||
if (scsi_device_get(sdev))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If the rescan work was scheduled because of a resume
|
||||
* event, the port is already fully resumed, but the
|
||||
* SCSI device may not yet be fully resumed. In such
|
||||
* case, executing scsi_rescan_device() may cause a
|
||||
* deadlock with the PM code on device_lock(). Prevent
|
||||
* this by giving up and retrying rescan after a short
|
||||
* delay.
|
||||
*/
|
||||
delay_rescan = sdev->sdev_gendev.power.is_suspended;
|
||||
if (delay_rescan) {
|
||||
scsi_device_put(sdev);
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
scsi_rescan_device(&(sdev->sdev_gendev));
|
||||
ret = scsi_rescan_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
mutex_unlock(&ap->scsi_scan_mutex);
|
||||
|
||||
if (delay_rescan)
|
||||
/* Reschedule with a delay if scsi_rescan_device() returned an error */
|
||||
if (ret)
|
||||
schedule_delayed_work(&ap->scsi_rescan_task,
|
||||
msecs_to_jiffies(5));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
|
|||
if (!rbnode)
|
||||
return -ENOMEM;
|
||||
regcache_rbtree_set_register(map, rbnode,
|
||||
reg - rbnode->base_reg, value);
|
||||
(reg - rbnode->base_reg) / map->reg_stride,
|
||||
value);
|
||||
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
|
||||
rbtree_ctx->cached_rbnode = rbnode;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
|
|||
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
|
||||
|
||||
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
|
||||
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
|
||||
static int rbd_dev_header_info(struct rbd_device *rbd_dev);
|
||||
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
|
||||
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
|
||||
struct rbd_image_header *header);
|
||||
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
|
||||
u64 snap_id);
|
||||
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
|
||||
|
|
@ -995,15 +994,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
|
|||
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
|
||||
}
|
||||
|
||||
static void rbd_image_header_cleanup(struct rbd_image_header *header)
|
||||
{
|
||||
kfree(header->object_prefix);
|
||||
ceph_put_snap_context(header->snapc);
|
||||
kfree(header->snap_sizes);
|
||||
kfree(header->snap_names);
|
||||
|
||||
memset(header, 0, sizeof(*header));
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill an rbd image header with information from the given format 1
|
||||
* on-disk header.
|
||||
*/
|
||||
static int rbd_header_from_disk(struct rbd_device *rbd_dev,
|
||||
struct rbd_image_header_ondisk *ondisk)
|
||||
static int rbd_header_from_disk(struct rbd_image_header *header,
|
||||
struct rbd_image_header_ondisk *ondisk,
|
||||
bool first_time)
|
||||
{
|
||||
struct rbd_image_header *header = &rbd_dev->header;
|
||||
bool first_time = header->object_prefix == NULL;
|
||||
struct ceph_snap_context *snapc;
|
||||
char *object_prefix = NULL;
|
||||
char *snap_names = NULL;
|
||||
|
|
@ -1070,11 +1078,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
|
|||
if (first_time) {
|
||||
header->object_prefix = object_prefix;
|
||||
header->obj_order = ondisk->options.order;
|
||||
rbd_init_layout(rbd_dev);
|
||||
} else {
|
||||
ceph_put_snap_context(header->snapc);
|
||||
kfree(header->snap_names);
|
||||
kfree(header->snap_sizes);
|
||||
}
|
||||
|
||||
/* The remaining fields always get updated (when we refresh) */
|
||||
|
|
@ -4860,7 +4863,9 @@ out_req:
|
|||
* return, the rbd_dev->header field will contain up-to-date
|
||||
* information about the image.
|
||||
*/
|
||||
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
|
||||
struct rbd_image_header *header,
|
||||
bool first_time)
|
||||
{
|
||||
struct rbd_image_header_ondisk *ondisk = NULL;
|
||||
u32 snap_count = 0;
|
||||
|
|
@ -4908,7 +4913,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
|
|||
snap_count = le32_to_cpu(ondisk->snap_count);
|
||||
} while (snap_count != want_count);
|
||||
|
||||
ret = rbd_header_from_disk(rbd_dev, ondisk);
|
||||
ret = rbd_header_from_disk(header, ondisk, first_time);
|
||||
out:
|
||||
kfree(ondisk);
|
||||
|
||||
|
|
@ -4932,39 +4937,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
|
|||
}
|
||||
}
|
||||
|
||||
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
|
||||
{
|
||||
u64 mapping_size;
|
||||
int ret;
|
||||
|
||||
down_write(&rbd_dev->header_rwsem);
|
||||
mapping_size = rbd_dev->mapping.size;
|
||||
|
||||
ret = rbd_dev_header_info(rbd_dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If there is a parent, see if it has disappeared due to the
|
||||
* mapped image getting flattened.
|
||||
*/
|
||||
if (rbd_dev->parent) {
|
||||
ret = rbd_dev_v2_parent_info(rbd_dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
rbd_assert(!rbd_is_snap(rbd_dev));
|
||||
rbd_dev->mapping.size = rbd_dev->header.image_size;
|
||||
|
||||
out:
|
||||
up_write(&rbd_dev->header_rwsem);
|
||||
if (!ret && mapping_size != rbd_dev->mapping.size)
|
||||
rbd_dev_update_size(rbd_dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct blk_mq_ops rbd_mq_ops = {
|
||||
.queue_rq = rbd_queue_rq,
|
||||
};
|
||||
|
|
@ -5504,17 +5476,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
|
||||
{
|
||||
return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
|
||||
&rbd_dev->header.obj_order,
|
||||
&rbd_dev->header.image_size);
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
|
||||
char **pobject_prefix)
|
||||
{
|
||||
size_t size;
|
||||
void *reply_buf;
|
||||
char *object_prefix;
|
||||
int ret;
|
||||
void *p;
|
||||
|
||||
|
|
@ -5532,16 +5499,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
|
|||
goto out;
|
||||
|
||||
p = reply_buf;
|
||||
rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
|
||||
p + ret, NULL, GFP_NOIO);
|
||||
object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
|
||||
GFP_NOIO);
|
||||
if (IS_ERR(object_prefix)) {
|
||||
ret = PTR_ERR(object_prefix);
|
||||
goto out;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
if (IS_ERR(rbd_dev->header.object_prefix)) {
|
||||
ret = PTR_ERR(rbd_dev->header.object_prefix);
|
||||
rbd_dev->header.object_prefix = NULL;
|
||||
} else {
|
||||
dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
|
||||
}
|
||||
*pobject_prefix = object_prefix;
|
||||
dout(" object_prefix = %s\n", object_prefix);
|
||||
out:
|
||||
kfree(reply_buf);
|
||||
|
||||
|
|
@ -5592,13 +5559,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
|
||||
{
|
||||
return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
|
||||
rbd_is_ro(rbd_dev),
|
||||
&rbd_dev->header.features);
|
||||
}
|
||||
|
||||
/*
|
||||
* These are generic image flags, but since they are used only for
|
||||
* object map, store them in rbd_dev->object_map_flags.
|
||||
|
|
@ -5635,6 +5595,14 @@ struct parent_image_info {
|
|||
u64 overlap;
|
||||
};
|
||||
|
||||
static void rbd_parent_info_cleanup(struct parent_image_info *pii)
|
||||
{
|
||||
kfree(pii->pool_ns);
|
||||
kfree(pii->image_id);
|
||||
|
||||
memset(pii, 0, sizeof(*pii));
|
||||
}
|
||||
|
||||
/*
|
||||
* The caller is responsible for @pii.
|
||||
*/
|
||||
|
|
@ -5704,6 +5672,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
|
|||
if (pii->has_overlap)
|
||||
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
|
||||
|
||||
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
|
||||
__func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
|
||||
pii->has_overlap, pii->overlap);
|
||||
return 0;
|
||||
|
||||
e_inval:
|
||||
|
|
@ -5742,14 +5713,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
|
|||
pii->has_overlap = true;
|
||||
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
|
||||
|
||||
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
|
||||
__func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
|
||||
pii->has_overlap, pii->overlap);
|
||||
return 0;
|
||||
|
||||
e_inval:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int get_parent_info(struct rbd_device *rbd_dev,
|
||||
struct parent_image_info *pii)
|
||||
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
|
||||
struct parent_image_info *pii)
|
||||
{
|
||||
struct page *req_page, *reply_page;
|
||||
void *p;
|
||||
|
|
@ -5777,7 +5751,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
|
||||
{
|
||||
struct rbd_spec *parent_spec;
|
||||
struct parent_image_info pii = { 0 };
|
||||
|
|
@ -5787,37 +5761,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
|
|||
if (!parent_spec)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_parent_info(rbd_dev, &pii);
|
||||
ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
|
||||
__func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
|
||||
pii.has_overlap, pii.overlap);
|
||||
|
||||
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
|
||||
/*
|
||||
* Either the parent never existed, or we have
|
||||
* record of it but the image got flattened so it no
|
||||
* longer has a parent. When the parent of a
|
||||
* layered image disappears we immediately set the
|
||||
* overlap to 0. The effect of this is that all new
|
||||
* requests will be treated as if the image had no
|
||||
* parent.
|
||||
*
|
||||
* If !pii.has_overlap, the parent image spec is not
|
||||
* applicable. It's there to avoid duplication in each
|
||||
* snapshot record.
|
||||
*/
|
||||
if (rbd_dev->parent_overlap) {
|
||||
rbd_dev->parent_overlap = 0;
|
||||
rbd_dev_parent_put(rbd_dev);
|
||||
pr_info("%s: clone image has been flattened\n",
|
||||
rbd_dev->disk->disk_name);
|
||||
}
|
||||
|
||||
if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
|
||||
goto out; /* No parent? No problem. */
|
||||
}
|
||||
|
||||
/* The ceph file layout needs to fit pool id in 32 bits */
|
||||
|
||||
|
|
@ -5829,58 +5778,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
|
|||
}
|
||||
|
||||
/*
|
||||
* The parent won't change (except when the clone is
|
||||
* flattened, already handled that). So we only need to
|
||||
* record the parent spec we have not already done so.
|
||||
* The parent won't change except when the clone is flattened,
|
||||
* so we only need to record the parent image spec once.
|
||||
*/
|
||||
if (!rbd_dev->parent_spec) {
|
||||
parent_spec->pool_id = pii.pool_id;
|
||||
if (pii.pool_ns && *pii.pool_ns) {
|
||||
parent_spec->pool_ns = pii.pool_ns;
|
||||
pii.pool_ns = NULL;
|
||||
}
|
||||
parent_spec->image_id = pii.image_id;
|
||||
pii.image_id = NULL;
|
||||
parent_spec->snap_id = pii.snap_id;
|
||||
|
||||
rbd_dev->parent_spec = parent_spec;
|
||||
parent_spec = NULL; /* rbd_dev now owns this */
|
||||
parent_spec->pool_id = pii.pool_id;
|
||||
if (pii.pool_ns && *pii.pool_ns) {
|
||||
parent_spec->pool_ns = pii.pool_ns;
|
||||
pii.pool_ns = NULL;
|
||||
}
|
||||
parent_spec->image_id = pii.image_id;
|
||||
pii.image_id = NULL;
|
||||
parent_spec->snap_id = pii.snap_id;
|
||||
|
||||
rbd_assert(!rbd_dev->parent_spec);
|
||||
rbd_dev->parent_spec = parent_spec;
|
||||
parent_spec = NULL; /* rbd_dev now owns this */
|
||||
|
||||
/*
|
||||
* We always update the parent overlap. If it's zero we issue
|
||||
* a warning, as we will proceed as if there was no parent.
|
||||
* Record the parent overlap. If it's zero, issue a warning as
|
||||
* we will proceed as if there is no parent.
|
||||
*/
|
||||
if (!pii.overlap) {
|
||||
if (parent_spec) {
|
||||
/* refresh, careful to warn just once */
|
||||
if (rbd_dev->parent_overlap)
|
||||
rbd_warn(rbd_dev,
|
||||
"clone now standalone (overlap became 0)");
|
||||
} else {
|
||||
/* initial probe */
|
||||
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
|
||||
}
|
||||
}
|
||||
if (!pii.overlap)
|
||||
rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
|
||||
rbd_dev->parent_overlap = pii.overlap;
|
||||
|
||||
out:
|
||||
ret = 0;
|
||||
out_err:
|
||||
kfree(pii.pool_ns);
|
||||
kfree(pii.image_id);
|
||||
rbd_parent_info_cleanup(&pii);
|
||||
rbd_spec_put(parent_spec);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
|
||||
u64 *stripe_unit, u64 *stripe_count)
|
||||
{
|
||||
struct {
|
||||
__le64 stripe_unit;
|
||||
__le64 stripe_count;
|
||||
} __attribute__ ((packed)) striping_info_buf = { 0 };
|
||||
size_t size = sizeof (striping_info_buf);
|
||||
void *p;
|
||||
int ret;
|
||||
|
||||
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
|
||||
|
|
@ -5892,27 +5829,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
|
|||
if (ret < size)
|
||||
return -ERANGE;
|
||||
|
||||
p = &striping_info_buf;
|
||||
rbd_dev->header.stripe_unit = ceph_decode_64(&p);
|
||||
rbd_dev->header.stripe_count = ceph_decode_64(&p);
|
||||
*stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
|
||||
*stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
|
||||
dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
|
||||
*stripe_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
|
||||
{
|
||||
__le64 data_pool_id;
|
||||
__le64 data_pool_buf;
|
||||
int ret;
|
||||
|
||||
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
|
||||
&rbd_dev->header_oloc, "get_data_pool",
|
||||
NULL, 0, &data_pool_id, sizeof(data_pool_id));
|
||||
NULL, 0, &data_pool_buf,
|
||||
sizeof(data_pool_buf));
|
||||
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret < sizeof(data_pool_id))
|
||||
if (ret < sizeof(data_pool_buf))
|
||||
return -EBADMSG;
|
||||
|
||||
rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
|
||||
WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
|
||||
*data_pool_id = le64_to_cpu(data_pool_buf);
|
||||
dout(" data_pool_id = %lld\n", *data_pool_id);
|
||||
WARN_ON(*data_pool_id == CEPH_NOPOOL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -6104,7 +6047,8 @@ out_err:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
|
||||
struct ceph_snap_context **psnapc)
|
||||
{
|
||||
size_t size;
|
||||
int ret;
|
||||
|
|
@ -6165,9 +6109,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
|
|||
for (i = 0; i < snap_count; i++)
|
||||
snapc->snaps[i] = ceph_decode_64(&p);
|
||||
|
||||
ceph_put_snap_context(rbd_dev->header.snapc);
|
||||
rbd_dev->header.snapc = snapc;
|
||||
|
||||
*psnapc = snapc;
|
||||
dout(" snap context seq = %llu, snap_count = %u\n",
|
||||
(unsigned long long)seq, (unsigned int)snap_count);
|
||||
out:
|
||||
|
|
@ -6216,38 +6158,42 @@ out:
|
|||
return snap_name;
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
|
||||
struct rbd_image_header *header,
|
||||
bool first_time)
|
||||
{
|
||||
bool first_time = rbd_dev->header.object_prefix == NULL;
|
||||
int ret;
|
||||
|
||||
ret = rbd_dev_v2_image_size(rbd_dev);
|
||||
ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
|
||||
first_time ? &header->obj_order : NULL,
|
||||
&header->image_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (first_time) {
|
||||
ret = rbd_dev_v2_header_onetime(rbd_dev);
|
||||
ret = rbd_dev_v2_header_onetime(rbd_dev, header);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = rbd_dev_v2_snap_context(rbd_dev);
|
||||
if (ret && first_time) {
|
||||
kfree(rbd_dev->header.object_prefix);
|
||||
rbd_dev->header.object_prefix = NULL;
|
||||
}
|
||||
ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rbd_dev_header_info(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_header_info(struct rbd_device *rbd_dev,
|
||||
struct rbd_image_header *header,
|
||||
bool first_time)
|
||||
{
|
||||
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
|
||||
rbd_assert(!header->object_prefix && !header->snapc);
|
||||
|
||||
if (rbd_dev->image_format == 1)
|
||||
return rbd_dev_v1_header_info(rbd_dev);
|
||||
return rbd_dev_v1_header_info(rbd_dev, header, first_time);
|
||||
|
||||
return rbd_dev_v2_header_info(rbd_dev);
|
||||
return rbd_dev_v2_header_info(rbd_dev, header, first_time);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -6735,60 +6681,49 @@ out:
|
|||
*/
|
||||
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
|
||||
{
|
||||
struct rbd_image_header *header;
|
||||
|
||||
rbd_dev_parent_put(rbd_dev);
|
||||
rbd_object_map_free(rbd_dev);
|
||||
rbd_dev_mapping_clear(rbd_dev);
|
||||
|
||||
/* Free dynamic fields from the header, then zero it out */
|
||||
|
||||
header = &rbd_dev->header;
|
||||
ceph_put_snap_context(header->snapc);
|
||||
kfree(header->snap_sizes);
|
||||
kfree(header->snap_names);
|
||||
kfree(header->object_prefix);
|
||||
memset(header, 0, sizeof (*header));
|
||||
rbd_image_header_cleanup(&rbd_dev->header);
|
||||
}
|
||||
|
||||
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
|
||||
static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
|
||||
struct rbd_image_header *header)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rbd_dev_v2_object_prefix(rbd_dev);
|
||||
ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Get the and check features for the image. Currently the
|
||||
* features are assumed to never change.
|
||||
*/
|
||||
ret = rbd_dev_v2_features(rbd_dev);
|
||||
ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
|
||||
rbd_is_ro(rbd_dev), &header->features);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
return ret;
|
||||
|
||||
/* If the image supports fancy striping, get its parameters */
|
||||
|
||||
if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
|
||||
ret = rbd_dev_v2_striping_info(rbd_dev);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
|
||||
ret = rbd_dev_v2_data_pool(rbd_dev);
|
||||
if (header->features & RBD_FEATURE_STRIPINGV2) {
|
||||
ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
|
||||
&header->stripe_count);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
rbd_init_layout(rbd_dev);
|
||||
return 0;
|
||||
if (header->features & RBD_FEATURE_DATA_POOL) {
|
||||
ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
out_err:
|
||||
rbd_dev->header.features = 0;
|
||||
kfree(rbd_dev->header.object_prefix);
|
||||
rbd_dev->header.object_prefix = NULL;
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -6983,13 +6918,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
|
|||
if (!depth)
|
||||
down_write(&rbd_dev->header_rwsem);
|
||||
|
||||
ret = rbd_dev_header_info(rbd_dev);
|
||||
ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
|
||||
if (ret) {
|
||||
if (ret == -ENOENT && !need_watch)
|
||||
rbd_print_dne(rbd_dev, false);
|
||||
goto err_out_probe;
|
||||
}
|
||||
|
||||
rbd_init_layout(rbd_dev);
|
||||
|
||||
/*
|
||||
* If this image is the one being mapped, we have pool name and
|
||||
* id, image name and id, and snap name - need to fill snap id.
|
||||
|
|
@ -7018,7 +6955,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
|
|||
}
|
||||
|
||||
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
|
||||
ret = rbd_dev_v2_parent_info(rbd_dev);
|
||||
ret = rbd_dev_setup_parent(rbd_dev);
|
||||
if (ret)
|
||||
goto err_out_probe;
|
||||
}
|
||||
|
|
@ -7044,6 +6981,107 @@ err_out_format:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void rbd_dev_update_header(struct rbd_device *rbd_dev,
|
||||
struct rbd_image_header *header)
|
||||
{
|
||||
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
|
||||
rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
|
||||
|
||||
if (rbd_dev->header.image_size != header->image_size) {
|
||||
rbd_dev->header.image_size = header->image_size;
|
||||
|
||||
if (!rbd_is_snap(rbd_dev)) {
|
||||
rbd_dev->mapping.size = header->image_size;
|
||||
rbd_dev_update_size(rbd_dev);
|
||||
}
|
||||
}
|
||||
|
||||
ceph_put_snap_context(rbd_dev->header.snapc);
|
||||
rbd_dev->header.snapc = header->snapc;
|
||||
header->snapc = NULL;
|
||||
|
||||
if (rbd_dev->image_format == 1) {
|
||||
kfree(rbd_dev->header.snap_names);
|
||||
rbd_dev->header.snap_names = header->snap_names;
|
||||
header->snap_names = NULL;
|
||||
|
||||
kfree(rbd_dev->header.snap_sizes);
|
||||
rbd_dev->header.snap_sizes = header->snap_sizes;
|
||||
header->snap_sizes = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
|
||||
struct parent_image_info *pii)
|
||||
{
|
||||
if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
|
||||
/*
|
||||
* Either the parent never existed, or we have
|
||||
* record of it but the image got flattened so it no
|
||||
* longer has a parent. When the parent of a
|
||||
* layered image disappears we immediately set the
|
||||
* overlap to 0. The effect of this is that all new
|
||||
* requests will be treated as if the image had no
|
||||
* parent.
|
||||
*
|
||||
* If !pii.has_overlap, the parent image spec is not
|
||||
* applicable. It's there to avoid duplication in each
|
||||
* snapshot record.
|
||||
*/
|
||||
if (rbd_dev->parent_overlap) {
|
||||
rbd_dev->parent_overlap = 0;
|
||||
rbd_dev_parent_put(rbd_dev);
|
||||
pr_info("%s: clone has been flattened\n",
|
||||
rbd_dev->disk->disk_name);
|
||||
}
|
||||
} else {
|
||||
rbd_assert(rbd_dev->parent_spec);
|
||||
|
||||
/*
|
||||
* Update the parent overlap. If it became zero, issue
|
||||
* a warning as we will proceed as if there is no parent.
|
||||
*/
|
||||
if (!pii->overlap && rbd_dev->parent_overlap)
|
||||
rbd_warn(rbd_dev,
|
||||
"clone has become standalone (overlap 0)");
|
||||
rbd_dev->parent_overlap = pii->overlap;
|
||||
}
|
||||
}
|
||||
|
||||
static int rbd_dev_refresh(struct rbd_device *rbd_dev)
|
||||
{
|
||||
struct rbd_image_header header = { 0 };
|
||||
struct parent_image_info pii = { 0 };
|
||||
int ret;
|
||||
|
||||
dout("%s rbd_dev %p\n", __func__, rbd_dev);
|
||||
|
||||
ret = rbd_dev_header_info(rbd_dev, &header, false);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If there is a parent, see if it has disappeared due to the
|
||||
* mapped image getting flattened.
|
||||
*/
|
||||
if (rbd_dev->parent) {
|
||||
ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
down_write(&rbd_dev->header_rwsem);
|
||||
rbd_dev_update_header(rbd_dev, &header);
|
||||
if (rbd_dev->parent)
|
||||
rbd_dev_update_parent(rbd_dev, &pii);
|
||||
up_write(&rbd_dev->header_rwsem);
|
||||
|
||||
out:
|
||||
rbd_parent_info_cleanup(&pii);
|
||||
rbd_image_header_cleanup(&header);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t do_rbd_add(struct bus_type *bus,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
|
|
|
|||
|
|
@ -81,7 +81,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
|
|||
*
|
||||
* - power condition
|
||||
* Set the power condition field in the START STOP UNIT commands sent by
|
||||
* sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
|
||||
* sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or
|
||||
* manage_runtime_start_stop is on).
|
||||
* Some disks need this to spin down or to resume properly.
|
||||
*
|
||||
* - override internal blacklist
|
||||
|
|
@ -1517,8 +1518,10 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
|
|||
|
||||
sdev->use_10_for_rw = 1;
|
||||
|
||||
if (sbp2_param_exclusive_login)
|
||||
sdev->manage_start_stop = 1;
|
||||
if (sbp2_param_exclusive_login) {
|
||||
sdev->manage_system_start_stop = true;
|
||||
sdev->manage_runtime_start_stop = true;
|
||||
}
|
||||
|
||||
if (sdev->type == TYPE_ROM)
|
||||
sdev->use_10_for_ms = 1;
|
||||
|
|
|
|||
|
|
@ -963,7 +963,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
|
|||
else if (param == PIN_CONFIG_BIAS_DISABLE ||
|
||||
param == PIN_CONFIG_BIAS_PULL_DOWN ||
|
||||
param == PIN_CONFIG_DRIVE_STRENGTH)
|
||||
return pinctrl_gpio_set_config(offset, config);
|
||||
return pinctrl_gpio_set_config(chip->base + offset, config);
|
||||
else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
|
||||
param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
|
||||
/* Return -ENOTSUPP to trigger emulation, as per datasheet */
|
||||
|
|
|
|||
|
|
@ -243,6 +243,7 @@ static bool pxa_gpio_has_pinctrl(void)
|
|||
switch (gpio_type) {
|
||||
case PXA3XX_GPIO:
|
||||
case MMP2_GPIO:
|
||||
case MMP_GPIO:
|
||||
return false;
|
||||
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -2179,7 +2179,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
|
|||
adev->flags |= AMD_IS_PX;
|
||||
|
||||
if (!(adev->flags & AMD_IS_APU)) {
|
||||
parent = pci_upstream_bridge(adev->pdev);
|
||||
parent = pcie_find_root_port(adev->pdev);
|
||||
adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2344,14 +2344,62 @@ static int dm_late_init(void *handle)
|
|||
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
|
||||
}
|
||||
|
||||
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
|
||||
{
|
||||
int ret;
|
||||
u8 guid[16];
|
||||
u64 tmp64;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
if (!mgr->mst_primary)
|
||||
goto out_fail;
|
||||
|
||||
if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
|
||||
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
|
||||
DP_MST_EN |
|
||||
DP_UP_REQ_EN |
|
||||
DP_UPSTREAM_IS_SRC);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
/* Some hubs forget their guids after they resume */
|
||||
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
|
||||
if (ret != 16) {
|
||||
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
if (memchr_inv(guid, 0, 16) == NULL) {
|
||||
tmp64 = get_jiffies_64();
|
||||
memcpy(&guid[0], &tmp64, sizeof(u64));
|
||||
memcpy(&guid[8], &tmp64, sizeof(u64));
|
||||
|
||||
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
|
||||
|
||||
if (ret != 16) {
|
||||
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
|
||||
goto out_fail;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(mgr->mst_primary->guid, guid, 16);
|
||||
|
||||
out_fail:
|
||||
mutex_unlock(&mgr->lock);
|
||||
}
|
||||
|
||||
static void s3_handle_mst(struct drm_device *dev, bool suspend)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
struct drm_dp_mst_topology_mgr *mgr;
|
||||
int ret;
|
||||
bool need_hotplug = false;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
|
@ -2373,18 +2421,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
|
|||
if (!dp_is_lttpr_present(aconnector->dc_link))
|
||||
dc_link_aux_try_to_configure_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
|
||||
|
||||
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
|
||||
if (ret < 0) {
|
||||
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
|
||||
aconnector->dc_link);
|
||||
need_hotplug = true;
|
||||
}
|
||||
/* TODO: move resume_mst_branch_status() into drm mst resume again
|
||||
* once topology probing work is pulled out from mst resume into mst
|
||||
* resume 2nd step. mst resume 2nd step should be called after old
|
||||
* state getting restored (i.e. drm_atomic_helper_resume()).
|
||||
*/
|
||||
resume_mst_branch_status(mgr);
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
if (need_hotplug)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
|
||||
|
|
@ -2773,7 +2818,8 @@ static int dm_resume(void *handle)
|
|||
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
struct dc_state *dc_state;
|
||||
int i, r, j;
|
||||
int i, r, j, ret;
|
||||
bool need_hotplug = false;
|
||||
|
||||
if (amdgpu_in_reset(adev)) {
|
||||
dc_state = dm->cached_dc_state;
|
||||
|
|
@ -2871,7 +2917,7 @@ static int dm_resume(void *handle)
|
|||
continue;
|
||||
|
||||
/*
|
||||
* this is the case when traversing through already created
|
||||
* this is the case when traversing through already created end sink
|
||||
* MST connectors, should be skipped
|
||||
*/
|
||||
if (aconnector && aconnector->mst_port)
|
||||
|
|
@ -2931,6 +2977,27 @@ static int dm_resume(void *handle)
|
|||
|
||||
dm->cached_state = NULL;
|
||||
|
||||
/* Do mst topology probing after resuming cached state*/
|
||||
drm_connector_list_iter_begin(ddev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
aconnector = to_amdgpu_dm_connector(connector);
|
||||
if (aconnector->dc_link->type != dc_connection_mst_branch ||
|
||||
aconnector->mst_port)
|
||||
continue;
|
||||
|
||||
ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
|
||||
|
||||
if (ret < 0) {
|
||||
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
|
||||
aconnector->dc_link);
|
||||
need_hotplug = true;
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
if (need_hotplug)
|
||||
drm_kms_helper_hotplug_event(ddev);
|
||||
|
||||
amdgpu_dm_irq_resume_late(adev);
|
||||
|
||||
amdgpu_dm_smu_write_watermarks_table(adev);
|
||||
|
|
|
|||
|
|
@ -2081,36 +2081,41 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
|
||||
uint32_t pcie_gen_cap,
|
||||
uint32_t pcie_width_cap)
|
||||
{
|
||||
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
|
||||
struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
|
||||
u32 smu_pcie_arg;
|
||||
uint8_t *table_member1, *table_member2;
|
||||
uint32_t min_gen_speed, max_gen_speed;
|
||||
uint32_t min_lane_width, max_lane_width;
|
||||
uint32_t smu_pcie_arg;
|
||||
int ret, i;
|
||||
|
||||
/* PCIE gen speed and lane width override */
|
||||
GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
|
||||
GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
|
||||
|
||||
min_gen_speed = MAX(0, table_member1[0]);
|
||||
max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
|
||||
min_gen_speed = min_gen_speed > max_gen_speed ?
|
||||
max_gen_speed : min_gen_speed;
|
||||
min_lane_width = MAX(1, table_member2[0]);
|
||||
max_lane_width = MIN(pcie_width_cap, table_member2[1]);
|
||||
min_lane_width = min_lane_width > max_lane_width ?
|
||||
max_lane_width : min_lane_width;
|
||||
|
||||
if (!amdgpu_device_pcie_dynamic_switching_supported()) {
|
||||
if (pcie_table->pcie_gen[NUM_LINK_LEVELS - 1] < pcie_gen_cap)
|
||||
pcie_gen_cap = pcie_table->pcie_gen[NUM_LINK_LEVELS - 1];
|
||||
|
||||
if (pcie_table->pcie_lane[NUM_LINK_LEVELS - 1] < pcie_width_cap)
|
||||
pcie_width_cap = pcie_table->pcie_lane[NUM_LINK_LEVELS - 1];
|
||||
|
||||
/* Force all levels to use the same settings */
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
pcie_table->pcie_gen[0] = max_gen_speed;
|
||||
pcie_table->pcie_lane[0] = max_lane_width;
|
||||
} else {
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
if (pcie_table->pcie_gen[i] > pcie_gen_cap)
|
||||
pcie_table->pcie_gen[i] = pcie_gen_cap;
|
||||
if (pcie_table->pcie_lane[i] > pcie_width_cap)
|
||||
pcie_table->pcie_lane[i] = pcie_width_cap;
|
||||
}
|
||||
pcie_table->pcie_gen[0] = min_gen_speed;
|
||||
pcie_table->pcie_lane[0] = min_lane_width;
|
||||
}
|
||||
pcie_table->pcie_gen[1] = max_gen_speed;
|
||||
pcie_table->pcie_lane[1] = max_lane_width;
|
||||
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
smu_pcie_arg = (i << 16 |
|
||||
|
|
|
|||
|
|
@ -3074,6 +3074,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||
return ret;
|
||||
|
||||
err:
|
||||
usb_free_urb(sc->ghl_urb);
|
||||
|
||||
hid_hw_stop(hdev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -133,6 +133,14 @@ static int enable_gpe(struct device *dev)
|
|||
}
|
||||
wakeup = &adev->wakeup;
|
||||
|
||||
/*
|
||||
* Call acpi_disable_gpe(), so that reference count
|
||||
* gpe_event_info->runtime_count doesn't overflow.
|
||||
* When gpe_event_info->runtime_count = 0, the call
|
||||
* to acpi_disable_gpe() simply return.
|
||||
*/
|
||||
acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
|
||||
|
||||
acpi_sts = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
|
||||
if (ACPI_FAILURE(acpi_sts)) {
|
||||
dev_err(dev, "enable ose_gpe failed\n");
|
||||
|
|
|
|||
|
|
@ -791,6 +791,8 @@ static const struct hid_device_id nzxt_smart2_hid_id_table[] = {
|
|||
{ HID_USB_DEVICE(0x1e71, 0x2009) }, /* NZXT RGB & Fan Controller */
|
||||
{ HID_USB_DEVICE(0x1e71, 0x200e) }, /* NZXT RGB & Fan Controller */
|
||||
{ HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */
|
||||
{ HID_USB_DEVICE(0x1e71, 0x2011) }, /* NZXT RGB & Fan Controller (6 RGB) */
|
||||
{ HID_USB_DEVICE(0x1e71, 0x2019) }, /* NZXT RGB & Fan Controller (6 RGB) */
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1430,6 +1430,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
|||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
|
||||
|
|
@ -1862,6 +1863,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
|
|||
skx_idle_state_table_update();
|
||||
break;
|
||||
case INTEL_FAM6_SAPPHIRERAPIDS_X:
|
||||
case INTEL_FAM6_EMERALDRAPIDS_X:
|
||||
spr_idle_state_table_update();
|
||||
break;
|
||||
case INTEL_FAM6_ALDERLAKE:
|
||||
|
|
|
|||
|
|
@ -4936,7 +4936,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
|
|||
int err = 0;
|
||||
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
|
||||
struct net_device *ndev = NULL;
|
||||
struct ib_sa_multicast ib;
|
||||
struct ib_sa_multicast ib = {};
|
||||
enum ib_gid_type gid_type;
|
||||
bool send_only;
|
||||
|
||||
|
|
|
|||
|
|
@ -217,7 +217,7 @@ static int make_cma_ports(struct cma_dev_group *cma_dev_group,
|
|||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ports_num; i++) {
|
||||
char port_str[10];
|
||||
char port_str[11];
|
||||
|
||||
ports[i].port_num = i + 1;
|
||||
snprintf(port_str, sizeof(port_str), "%u", i + 1);
|
||||
|
|
|
|||
|
|
@ -2501,6 +2501,7 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
|
|||
},
|
||||
[RDMA_NLDEV_CMD_SYS_SET] = {
|
||||
.doit = nldev_set_sys_set_doit,
|
||||
.flags = RDMA_NL_ADMIN_PERM,
|
||||
},
|
||||
[RDMA_NLDEV_CMD_STAT_SET] = {
|
||||
.doit = nldev_stat_set_doit,
|
||||
|
|
|
|||
|
|
@ -535,7 +535,7 @@ static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
|
|||
if (hdr->in_words * 4 != count)
|
||||
return -EINVAL;
|
||||
|
||||
if (count < method_elm->req_size + sizeof(hdr)) {
|
||||
if (count < method_elm->req_size + sizeof(*hdr)) {
|
||||
/*
|
||||
* rdma-core v18 and v19 have a bug where they send DESTROY_CQ
|
||||
* with a 16 byte write instead of 24. Old kernels didn't
|
||||
|
|
|
|||
|
|
@ -223,7 +223,7 @@ void del_sysfs_port_mcg_attr(struct mlx4_ib_dev *device, int port_num,
|
|||
static int add_port_entries(struct mlx4_ib_dev *device, int port_num)
|
||||
{
|
||||
int i;
|
||||
char buff[11];
|
||||
char buff[12];
|
||||
struct mlx4_ib_iov_port *port = NULL;
|
||||
int ret = 0 ;
|
||||
struct ib_port_attr attr;
|
||||
|
|
|
|||
|
|
@ -2471,8 +2471,8 @@ destroy_res:
|
|||
mlx5_steering_anchor_destroy_res(ft_prio);
|
||||
put_flow_table:
|
||||
put_flow_table(dev, ft_prio, true);
|
||||
mutex_unlock(&dev->flow_db->lock);
|
||||
free_obj:
|
||||
mutex_unlock(&dev->flow_db->lock);
|
||||
kfree(obj);
|
||||
|
||||
return err;
|
||||
|
|
|
|||
|
|
@ -2074,7 +2074,7 @@ static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
|
|||
case MLX5_IB_MMAP_DEVICE_MEM:
|
||||
return "Device Memory";
|
||||
default:
|
||||
return NULL;
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -973,6 +973,7 @@ static void siw_accept_newconn(struct siw_cep *cep)
|
|||
siw_cep_put(cep);
|
||||
new_cep->listen_cep = NULL;
|
||||
if (rv) {
|
||||
siw_cancel_mpatimer(new_cep);
|
||||
siw_cep_set_free(new_cep);
|
||||
goto error;
|
||||
}
|
||||
|
|
@ -1097,9 +1098,12 @@ static void siw_cm_work_handler(struct work_struct *w)
|
|||
/*
|
||||
* Socket close before MPA request received.
|
||||
*/
|
||||
siw_dbg_cep(cep, "no mpareq: drop listener\n");
|
||||
siw_cep_put(cep->listen_cep);
|
||||
cep->listen_cep = NULL;
|
||||
if (cep->listen_cep) {
|
||||
siw_dbg_cep(cep,
|
||||
"no mpareq: drop listener\n");
|
||||
siw_cep_put(cep->listen_cep);
|
||||
cep->listen_cep = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
release_cep = 1;
|
||||
|
|
@ -1222,7 +1226,11 @@ static void siw_cm_llp_data_ready(struct sock *sk)
|
|||
if (!cep)
|
||||
goto out;
|
||||
|
||||
siw_dbg_cep(cep, "state: %d\n", cep->state);
|
||||
siw_dbg_cep(cep, "cep state: %d, socket state %d\n",
|
||||
cep->state, sk->sk_state);
|
||||
|
||||
if (sk->sk_state != TCP_ESTABLISHED)
|
||||
goto out;
|
||||
|
||||
switch (cep->state) {
|
||||
case SIW_EPSTATE_RDMA_MODE:
|
||||
|
|
|
|||
|
|
@ -2789,7 +2789,6 @@ static int srp_abort(struct scsi_cmnd *scmnd)
|
|||
u32 tag;
|
||||
u16 ch_idx;
|
||||
struct srp_rdma_ch *ch;
|
||||
int ret;
|
||||
|
||||
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
|
||||
|
||||
|
|
@ -2803,19 +2802,14 @@ static int srp_abort(struct scsi_cmnd *scmnd)
|
|||
shost_printk(KERN_ERR, target->scsi_host,
|
||||
"Sending SRP abort for tag %#x\n", tag);
|
||||
if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
|
||||
SRP_TSK_ABORT_TASK, NULL) == 0)
|
||||
ret = SUCCESS;
|
||||
else if (target->rport->state == SRP_RPORT_LOST)
|
||||
ret = FAST_IO_FAIL;
|
||||
else
|
||||
ret = FAILED;
|
||||
if (ret == SUCCESS) {
|
||||
SRP_TSK_ABORT_TASK, NULL) == 0) {
|
||||
srp_free_req(ch, req, scmnd, 0);
|
||||
scmnd->result = DID_ABORT << 16;
|
||||
scsi_done(scmnd);
|
||||
return SUCCESS;
|
||||
}
|
||||
if (target->rport->state == SRP_RPORT_LOST)
|
||||
return FAST_IO_FAIL;
|
||||
|
||||
return ret;
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
static int srp_reset_device(struct scsi_cmnd *scmnd)
|
||||
|
|
|
|||
|
|
@ -1886,13 +1886,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
|
|||
/* Get the leaf page size */
|
||||
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
|
||||
|
||||
num_pages = size >> tg;
|
||||
|
||||
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
|
||||
cmd->tlbi.tg = (tg - 10) / 2;
|
||||
|
||||
/* Determine what level the granule is at */
|
||||
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
|
||||
|
||||
num_pages = size >> tg;
|
||||
/*
|
||||
* Determine what level the granule is at. For non-leaf, both
|
||||
* io-pgtable and SVA pass a nominal last-level granule because
|
||||
* they don't know what level(s) actually apply, so ignore that
|
||||
* and leave TTL=0. However for various errata reasons we still
|
||||
* want to use a range command, so avoid the SVA corner case
|
||||
* where both scale and num could be 0 as well.
|
||||
*/
|
||||
if (cmd->tlbi.leaf)
|
||||
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
|
||||
else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
|
||||
num_pages++;
|
||||
}
|
||||
|
||||
cmds.num = 0;
|
||||
|
|
|
|||
|
|
@ -3163,13 +3163,6 @@ static int iommu_suspend(void)
|
|||
struct intel_iommu *iommu = NULL;
|
||||
unsigned long flag;
|
||||
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
|
||||
GFP_KERNEL);
|
||||
if (!iommu->iommu_state)
|
||||
goto nomem;
|
||||
}
|
||||
|
||||
iommu_flush_all();
|
||||
|
||||
for_each_active_iommu(iommu, drhd) {
|
||||
|
|
@ -3189,12 +3182,6 @@ static int iommu_suspend(void)
|
|||
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
}
|
||||
return 0;
|
||||
|
||||
nomem:
|
||||
for_each_active_iommu(iommu, drhd)
|
||||
kfree(iommu->iommu_state);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void iommu_resume(void)
|
||||
|
|
@ -3226,9 +3213,6 @@ static void iommu_resume(void)
|
|||
|
||||
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
}
|
||||
|
||||
for_each_active_iommu(iommu, drhd)
|
||||
kfree(iommu->iommu_state);
|
||||
}
|
||||
|
||||
static struct syscore_ops iommu_syscore_ops = {
|
||||
|
|
|
|||
|
|
@ -593,7 +593,7 @@ struct intel_iommu {
|
|||
struct iopf_queue *iopf_queue;
|
||||
unsigned char iopfq_name[16];
|
||||
struct q_inval *qi; /* Queued invalidation info */
|
||||
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
|
||||
u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
struct ir_table *ir_table; /* Interrupt remapping info */
|
||||
|
|
|
|||
|
|
@ -223,7 +223,7 @@ struct mtk_iommu_data {
|
|||
struct device *smicomm_dev;
|
||||
|
||||
struct mtk_iommu_bank_data *bank;
|
||||
struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */
|
||||
struct mtk_iommu_domain *share_dom;
|
||||
|
||||
struct regmap *pericfg;
|
||||
struct mutex mutex; /* Protect m4u_group/m4u_dom above */
|
||||
|
|
@ -579,8 +579,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
|
|||
struct mtk_iommu_domain *share_dom = data->share_dom;
|
||||
const struct mtk_iommu_iova_region *region;
|
||||
|
||||
/* Always use share domain in sharing pgtable case */
|
||||
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
|
||||
/* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */
|
||||
if (share_dom) {
|
||||
dom->iop = share_dom->iop;
|
||||
dom->cfg = share_dom->cfg;
|
||||
dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
|
||||
|
|
@ -613,8 +613,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
|
|||
/* Update our support page sizes bitmap */
|
||||
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
|
||||
|
||||
if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
|
||||
data->share_dom = dom;
|
||||
data->share_dom = dom;
|
||||
|
||||
update_iova_region:
|
||||
/* Update the iova region for this domain */
|
||||
|
|
|
|||
|
|
@ -424,10 +424,6 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
|
|||
|
||||
led_parse_fwnode_props(dev, fwnode, &props);
|
||||
|
||||
/* We want to label LEDs that can produce full range of colors
|
||||
* as RGB, not multicolor */
|
||||
BUG_ON(props.color == LED_COLOR_ID_MULTI);
|
||||
|
||||
if (props.label) {
|
||||
/*
|
||||
* If init_data.devicename is NULL, then it indicates that
|
||||
|
|
|
|||
|
|
@ -748,17 +748,16 @@ err:
|
|||
/*
|
||||
* Cleanup zoned device information.
|
||||
*/
|
||||
static void dmz_put_zoned_device(struct dm_target *ti)
|
||||
static void dmz_put_zoned_devices(struct dm_target *ti)
|
||||
{
|
||||
struct dmz_target *dmz = ti->private;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dmz->nr_ddevs; i++) {
|
||||
if (dmz->ddev[i]) {
|
||||
for (i = 0; i < dmz->nr_ddevs; i++)
|
||||
if (dmz->ddev[i])
|
||||
dm_put_device(ti, dmz->ddev[i]);
|
||||
dmz->ddev[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(dmz->ddev);
|
||||
}
|
||||
|
||||
static int dmz_fixup_devices(struct dm_target *ti)
|
||||
|
|
@ -948,7 +947,7 @@ err_bio:
|
|||
err_meta:
|
||||
dmz_dtr_metadata(dmz->metadata);
|
||||
err_dev:
|
||||
dmz_put_zoned_device(ti);
|
||||
dmz_put_zoned_devices(ti);
|
||||
err:
|
||||
kfree(dmz->dev);
|
||||
kfree(dmz);
|
||||
|
|
@ -978,7 +977,7 @@ static void dmz_dtr(struct dm_target *ti)
|
|||
|
||||
bioset_exit(&dmz->bio_set);
|
||||
|
||||
dmz_put_zoned_device(ti);
|
||||
dmz_put_zoned_devices(ti);
|
||||
|
||||
mutex_destroy(&dmz->chunk_lock);
|
||||
|
||||
|
|
|
|||
|
|
@ -854,6 +854,13 @@ struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
|
|||
|
||||
set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
|
||||
r5l_wake_reclaim(conf->log, 0);
|
||||
|
||||
/* release batch_last before wait to avoid risk of deadlock */
|
||||
if (ctx && ctx->batch_last) {
|
||||
raid5_release_stripe(ctx->batch_last);
|
||||
ctx->batch_last = NULL;
|
||||
}
|
||||
|
||||
wait_event_lock_irq(conf->wait_for_stripe,
|
||||
is_inactive_blocked(conf, hash),
|
||||
*(conf->hash_locks + hash));
|
||||
|
|
|
|||
|
|
@ -893,6 +893,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* UBI cannot work on flashes with zero erasesize. */
|
||||
if (!mtd->erasesize) {
|
||||
pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
|
||||
mtd->index);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ubi_num == UBI_DEV_NUM_AUTO) {
|
||||
/* Search for an empty slot in the @ubi_devices array */
|
||||
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
|
||||
|
|
|
|||
|
|
@ -3012,14 +3012,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
|
|||
* from the wrong location resulting in the switch booting
|
||||
* to wrong mode and inoperable.
|
||||
*/
|
||||
mv88e6xxx_g1_wait_eeprom_done(chip);
|
||||
if (chip->info->ops->get_eeprom)
|
||||
mv88e6xxx_g2_eeprom_wait(chip);
|
||||
|
||||
gpiod_set_value_cansleep(gpiod, 1);
|
||||
usleep_range(10000, 20000);
|
||||
gpiod_set_value_cansleep(gpiod, 0);
|
||||
usleep_range(10000, 20000);
|
||||
|
||||
mv88e6xxx_g1_wait_eeprom_done(chip);
|
||||
if (chip->info->ops->get_eeprom)
|
||||
mv88e6xxx_g2_eeprom_wait(chip);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
|
|||
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
|
||||
}
|
||||
|
||||
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
const unsigned long timeout = jiffies + 1 * HZ;
|
||||
u16 val;
|
||||
int err;
|
||||
|
||||
/* Wait up to 1 second for the switch to finish reading the
|
||||
* EEPROM.
|
||||
*/
|
||||
while (time_before(jiffies, timeout)) {
|
||||
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
|
||||
if (err) {
|
||||
dev_err(chip->dev, "Error reading status");
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the switch is still resetting, it may not
|
||||
* respond on the bus, and so MDIO read returns
|
||||
* 0xffff. Differentiate between that, and waiting for
|
||||
* the EEPROM to be done by bit 0 being set.
|
||||
*/
|
||||
if (val != 0xffff &&
|
||||
val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
|
||||
return;
|
||||
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
dev_err(chip->dev, "Timeout waiting for EEPROM done");
|
||||
}
|
||||
|
||||
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
|
||||
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
|
||||
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
|
||||
|
|
|
|||
|
|
@ -281,7 +281,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
|
|||
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
|
||||
void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
|
||||
|
||||
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
|
||||
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
|
||||
|
|
|
|||
|
|
@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
|
|||
* Offset 0x15: EEPROM Addr (for 8-bit data access)
|
||||
*/
|
||||
|
||||
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
|
||||
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
|
||||
{
|
||||
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
|
||||
int err;
|
||||
|
|
|
|||
|
|
@ -359,6 +359,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
|
|||
|
||||
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
|
||||
int port);
|
||||
int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
|
||||
|
||||
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
|
||||
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
|
||||
|
|
|
|||
|
|
@ -1308,24 +1308,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
|
|||
* the user space for finding a flow. During this process, OVS computes
|
||||
* checksum on the first packet when CHECKSUM_PARTIAL flag is set.
|
||||
*
|
||||
* So, re-compute TCP pseudo header checksum when configured for
|
||||
* trunk mode.
|
||||
* So, re-compute TCP pseudo header checksum.
|
||||
*/
|
||||
|
||||
if (iph_proto == IPPROTO_TCP) {
|
||||
struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
|
||||
|
||||
if (tcph->check == 0x0000) {
|
||||
/* Recompute TCP pseudo header checksum */
|
||||
if (adapter->is_active_trunk) {
|
||||
tcphdrlen = skb->len - iphlen;
|
||||
if (skb_proto == ETH_P_IP)
|
||||
tcph->check =
|
||||
~csum_tcpudp_magic(iph->saddr,
|
||||
iph->daddr, tcphdrlen, iph_proto, 0);
|
||||
else if (skb_proto == ETH_P_IPV6)
|
||||
tcph->check =
|
||||
~csum_ipv6_magic(&iph6->saddr,
|
||||
&iph6->daddr, tcphdrlen, iph_proto, 0);
|
||||
}
|
||||
tcphdrlen = skb->len - iphlen;
|
||||
if (skb_proto == ETH_P_IP)
|
||||
tcph->check =
|
||||
~csum_tcpudp_magic(iph->saddr,
|
||||
iph->daddr, tcphdrlen, iph_proto, 0);
|
||||
else if (skb_proto == ETH_P_IPV6)
|
||||
tcph->check =
|
||||
~csum_ipv6_magic(&iph6->saddr,
|
||||
&iph6->daddr, tcphdrlen, iph_proto, 0);
|
||||
/* Setup SKB fields for checksum offload */
|
||||
skb_partial_csum_set(skb, iphlen,
|
||||
offsetof(struct tcphdr, check));
|
||||
|
|
|
|||
|
|
@ -361,9 +361,9 @@ static int i40e_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
|
|||
1000000ULL << 16);
|
||||
|
||||
if (neg_adj)
|
||||
adj = I40E_PTP_40GB_INCVAL - diff;
|
||||
adj = freq - diff;
|
||||
else
|
||||
adj = I40E_PTP_40GB_INCVAL + diff;
|
||||
adj = freq + diff;
|
||||
|
||||
wr32(hw, I40E_PRTTSYN_INC_L, adj & 0xFFFFFFFF);
|
||||
wr32(hw, I40E_PRTTSYN_INC_H, adj >> 32);
|
||||
|
|
|
|||
|
|
@ -2862,8 +2862,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
|
|||
|
||||
eth->rx_events++;
|
||||
if (likely(napi_schedule_prep(ð->rx_napi))) {
|
||||
__napi_schedule(ð->rx_napi);
|
||||
mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
|
||||
__napi_schedule(ð->rx_napi);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
|
@ -2875,8 +2875,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
|
|||
|
||||
eth->tx_events++;
|
||||
if (likely(napi_schedule_prep(ð->tx_napi))) {
|
||||
__napi_schedule(ð->tx_napi);
|
||||
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
|
||||
__napi_schedule(ð->tx_napi);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
|
|
|||
|
|
@ -110,9 +110,9 @@ struct qed_ll2_info {
|
|||
enum core_tx_dest tx_dest;
|
||||
u8 tx_stats_en;
|
||||
bool main_func_queue;
|
||||
struct qed_ll2_cbs cbs;
|
||||
struct qed_ll2_rx_queue rx_queue;
|
||||
struct qed_ll2_tx_queue tx_queue;
|
||||
struct qed_ll2_cbs cbs;
|
||||
};
|
||||
|
||||
extern const struct qed_ll2_ops qed_ll2_ops_pass;
|
||||
|
|
|
|||
|
|
@ -105,6 +105,7 @@ struct stm32_ops {
|
|||
int (*parse_data)(struct stm32_dwmac *dwmac,
|
||||
struct device *dev);
|
||||
u32 syscfg_eth_mask;
|
||||
bool clk_rx_enable_in_suspend;
|
||||
};
|
||||
|
||||
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
|
||||
|
|
@ -122,7 +123,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!dwmac->dev->power.is_suspended) {
|
||||
if (!dwmac->ops->clk_rx_enable_in_suspend ||
|
||||
!dwmac->dev->power.is_suspended) {
|
||||
ret = clk_prepare_enable(dwmac->clk_rx);
|
||||
if (ret) {
|
||||
clk_disable_unprepare(dwmac->clk_tx);
|
||||
|
|
@ -515,7 +517,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
|
|||
.suspend = stm32mp1_suspend,
|
||||
.resume = stm32mp1_resume,
|
||||
.parse_data = stm32mp1_parse_data,
|
||||
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
|
||||
.syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
|
||||
.clk_rx_enable_in_suspend = true
|
||||
};
|
||||
|
||||
static const struct of_device_id stm32_dwmac_match[] = {
|
||||
|
|
|
|||
|
|
@ -1614,6 +1614,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
|
|||
if (tx_chn->irq <= 0) {
|
||||
dev_err(dev, "Failed to get tx dma irq %d\n",
|
||||
tx_chn->irq);
|
||||
ret = tx_chn->irq ?: -ENXIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
|
|||
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
|
||||
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
0, index, &buf, 4);
|
||||
if (unlikely(ret < 0)) {
|
||||
if (unlikely(ret < 4)) {
|
||||
ret = ret < 0 ? ret : -ENODATA;
|
||||
|
||||
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
|
||||
index, ret);
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -664,7 +664,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
|
|||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->dev = dev;
|
||||
|
||||
rcu_read_lock_bh();
|
||||
rcu_read_lock();
|
||||
nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
|
||||
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
|
||||
if (unlikely(!neigh))
|
||||
|
|
@ -672,10 +672,10 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
|
|||
if (!IS_ERR(neigh)) {
|
||||
sock_confirm_neigh(skb, neigh);
|
||||
ret = neigh_output(neigh, skb, false);
|
||||
rcu_read_unlock_bh();
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
rcu_read_unlock();
|
||||
|
||||
IP6_INC_STATS(dev_net(dst->dev),
|
||||
ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
|
||||
|
|
@ -889,7 +889,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
|||
}
|
||||
}
|
||||
|
||||
rcu_read_lock_bh();
|
||||
rcu_read_lock();
|
||||
|
||||
neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
|
||||
if (!IS_ERR(neigh)) {
|
||||
|
|
@ -898,11 +898,11 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
|||
sock_confirm_neigh(skb, neigh);
|
||||
/* if crossing protocols, can not use the cached header */
|
||||
ret = neigh_output(neigh, skb, is_v6gw);
|
||||
rcu_read_unlock_bh();
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
rcu_read_unlock_bh();
|
||||
rcu_read_unlock();
|
||||
vrf_tx_error(skb->dev, skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1910,7 +1910,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
|
|||
struct vxlan_fdb *f;
|
||||
struct sk_buff *reply;
|
||||
|
||||
if (!(n->nud_state & NUD_CONNECTED)) {
|
||||
if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) {
|
||||
neigh_release(n);
|
||||
goto out;
|
||||
}
|
||||
|
|
@ -2074,7 +2074,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
|
|||
struct vxlan_fdb *f;
|
||||
struct sk_buff *reply;
|
||||
|
||||
if (!(n->nud_state & NUD_CONNECTED)) {
|
||||
if (!(READ_ONCE(n->nud_state) & NUD_CONNECTED)) {
|
||||
neigh_release(n);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,6 +34,8 @@
|
|||
#define TDM_PPPOHT_SLIC_MAXIN
|
||||
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
|
||||
|
||||
static int uhdlc_close(struct net_device *dev);
|
||||
|
||||
static struct ucc_tdm_info utdm_primary_info = {
|
||||
.uf_info = {
|
||||
.tsa = 0,
|
||||
|
|
@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev)
|
|||
hdlc_device *hdlc = dev_to_hdlc(dev);
|
||||
struct ucc_hdlc_private *priv = hdlc->priv;
|
||||
struct ucc_tdm *utdm = priv->utdm;
|
||||
int rc = 0;
|
||||
|
||||
if (priv->hdlc_busy != 1) {
|
||||
if (request_irq(priv->ut_info->uf_info.irq,
|
||||
|
|
@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev)
|
|||
napi_enable(&priv->napi);
|
||||
netdev_reset_queue(dev);
|
||||
netif_start_queue(dev);
|
||||
hdlc_open(dev);
|
||||
|
||||
rc = hdlc_open(dev);
|
||||
if (rc)
|
||||
uhdlc_close(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
|
||||
|
|
@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev)
|
|||
netdev_reset_queue(dev);
|
||||
priv->hdlc_busy = 0;
|
||||
|
||||
hdlc_close(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -295,9 +295,9 @@ struct iwl_fw_ini_fifo_hdr {
|
|||
struct iwl_fw_ini_error_dump_range {
|
||||
__le32 range_data_size;
|
||||
union {
|
||||
__le32 internal_base_addr;
|
||||
__le64 dram_base_addr;
|
||||
__le32 page_num;
|
||||
__le32 internal_base_addr __packed;
|
||||
__le64 dram_base_addr __packed;
|
||||
__le32 page_num __packed;
|
||||
struct iwl_fw_ini_fifo_hdr fifo_hdr;
|
||||
struct iwl_cmd_header fw_pkt_hdr;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -796,7 +796,7 @@ out:
|
|||
mvm->nvm_data->bands[0].n_channels = 1;
|
||||
mvm->nvm_data->bands[0].n_bitrates = 1;
|
||||
mvm->nvm_data->bands[0].bitrates =
|
||||
(void *)((u8 *)mvm->nvm_data->channels + 1);
|
||||
(void *)(mvm->nvm_data->channels + 1);
|
||||
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -965,8 +965,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
|
|||
}
|
||||
}
|
||||
|
||||
tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
|
||||
tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
|
||||
tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
|
||||
tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
|
||||
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
|
|||
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
|
||||
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
|
||||
|
||||
if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
|
||||
if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
|
||||
rx_pkt_off > skb->len) {
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
|
||||
skb->len, rx_pkt_off);
|
||||
|
|
@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
|
|||
return -1;
|
||||
}
|
||||
|
||||
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
|
||||
sizeof(bridge_tunnel_header))) ||
|
||||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
|
||||
sizeof(rfc1042_header)) &&
|
||||
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
|
||||
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
|
||||
if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
|
||||
((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
|
||||
sizeof(bridge_tunnel_header))) ||
|
||||
(!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
|
||||
sizeof(rfc1042_header)) &&
|
||||
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
|
||||
ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
|
||||
/*
|
||||
* Replace the 803 header and rfc1042 header (llc/snap) with an
|
||||
* EthernetII header, keep the src/dst and snap_type
|
||||
|
|
|
|||
|
|
@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
|
|||
s8 *lna_2g, s8 *lna_5g,
|
||||
struct ieee80211_channel *chan)
|
||||
{
|
||||
u16 val;
|
||||
u8 lna;
|
||||
|
||||
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
|
||||
if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
|
||||
*lna_2g = 0;
|
||||
if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
|
||||
memset(lna_5g, 0, sizeof(s8) * 3);
|
||||
|
||||
if (chan->band == NL80211_BAND_2GHZ)
|
||||
lna = *lna_2g;
|
||||
else if (chan->hw_value <= 64)
|
||||
|
|
|
|||
|
|
@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
|
|||
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
|
||||
int channel = chan->hw_value;
|
||||
s8 lna_5g[3], lna_2g;
|
||||
u8 lna;
|
||||
bool use_lna;
|
||||
u8 lna = 0;
|
||||
u16 val;
|
||||
|
||||
if (chan->band == NL80211_BAND_2GHZ)
|
||||
|
|
@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
|
|||
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
|
||||
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
|
||||
|
||||
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
|
||||
val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
|
||||
if (chan->band == NL80211_BAND_2GHZ)
|
||||
use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
|
||||
else
|
||||
use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
|
||||
|
||||
if (use_lna)
|
||||
lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
|
||||
|
||||
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
|
||||
|
|
|
|||
|
|
@ -902,13 +902,13 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action,
|
|||
{
|
||||
struct of_changeset_entry *ce;
|
||||
|
||||
if (WARN_ON(action >= ARRAY_SIZE(action_names)))
|
||||
return -EINVAL;
|
||||
|
||||
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
|
||||
if (!ce)
|
||||
return -ENOMEM;
|
||||
|
||||
if (WARN_ON(action >= ARRAY_SIZE(action_names)))
|
||||
return -EINVAL;
|
||||
|
||||
/* get a reference to the node */
|
||||
ce->action = action;
|
||||
ce->np = of_node_get(np);
|
||||
|
|
|
|||
|
|
@ -40,7 +40,6 @@
|
|||
#define PARF_PHY_REFCLK 0x4c
|
||||
#define PARF_CONFIG_BITS 0x50
|
||||
#define PARF_DBI_BASE_ADDR 0x168
|
||||
#define PARF_SLV_ADDR_SPACE_SIZE_2_3_3 0x16c /* Register offset specific to IP ver 2.3.3 */
|
||||
#define PARF_MHI_CLOCK_RESET_CTRL 0x174
|
||||
#define PARF_AXI_MSTR_WR_ADDR_HALT 0x178
|
||||
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
|
||||
|
|
@ -1148,8 +1147,7 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
|
|||
u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
|
||||
u32 val;
|
||||
|
||||
writel(SLV_ADDR_SPACE_SZ,
|
||||
pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_2_3_3);
|
||||
writel(SLV_ADDR_SPACE_SZ, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE);
|
||||
|
||||
val = readl(pcie->parf + PARF_PHY_CTRL);
|
||||
val &= ~BIT(0);
|
||||
|
|
|
|||
|
|
@ -3532,7 +3532,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
|
|||
return 0;
|
||||
|
||||
out:
|
||||
ptp_ocp_dev_release(&bp->dev);
|
||||
put_device(&bp->dev);
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5543,6 +5543,8 @@ regulator_register(struct device *dev,
|
|||
goto rinse;
|
||||
}
|
||||
device_initialize(&rdev->dev);
|
||||
dev_set_drvdata(&rdev->dev, rdev);
|
||||
rdev->dev.class = ®ulator_class;
|
||||
spin_lock_init(&rdev->err_lock);
|
||||
|
||||
/*
|
||||
|
|
@ -5604,11 +5606,9 @@ regulator_register(struct device *dev,
|
|||
rdev->supply_name = regulator_desc->supply_name;
|
||||
|
||||
/* register with sysfs */
|
||||
rdev->dev.class = ®ulator_class;
|
||||
rdev->dev.parent = config->dev;
|
||||
dev_set_name(&rdev->dev, "regulator.%lu",
|
||||
(unsigned long) atomic_inc_return(®ulator_no));
|
||||
dev_set_drvdata(&rdev->dev, rdev);
|
||||
|
||||
/* set regulator constraints */
|
||||
if (init_data)
|
||||
|
|
|
|||
|
|
@ -35,19 +35,19 @@ struct mt6358_regulator_info {
|
|||
};
|
||||
|
||||
#define MT6358_BUCK(match, vreg, min, max, step, \
|
||||
volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask, \
|
||||
vosel_mask, _da_vsel_reg, _da_vsel_mask, \
|
||||
_modeset_reg, _modeset_shift) \
|
||||
[MT6358_ID_##vreg] = { \
|
||||
.desc = { \
|
||||
.name = #vreg, \
|
||||
.of_match = of_match_ptr(match), \
|
||||
.ops = &mt6358_volt_range_ops, \
|
||||
.ops = &mt6358_buck_ops, \
|
||||
.type = REGULATOR_VOLTAGE, \
|
||||
.id = MT6358_ID_##vreg, \
|
||||
.owner = THIS_MODULE, \
|
||||
.n_voltages = ((max) - (min)) / (step) + 1, \
|
||||
.linear_ranges = volt_ranges, \
|
||||
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
|
||||
.min_uV = (min), \
|
||||
.uV_step = (step), \
|
||||
.vsel_reg = MT6358_BUCK_##vreg##_ELR0, \
|
||||
.vsel_mask = vosel_mask, \
|
||||
.enable_reg = MT6358_BUCK_##vreg##_CON0, \
|
||||
|
|
@ -87,7 +87,7 @@ struct mt6358_regulator_info {
|
|||
}
|
||||
|
||||
#define MT6358_LDO1(match, vreg, min, max, step, \
|
||||
volt_ranges, _da_vsel_reg, _da_vsel_mask, \
|
||||
_da_vsel_reg, _da_vsel_mask, \
|
||||
vosel, vosel_mask) \
|
||||
[MT6358_ID_##vreg] = { \
|
||||
.desc = { \
|
||||
|
|
@ -98,8 +98,8 @@ struct mt6358_regulator_info {
|
|||
.id = MT6358_ID_##vreg, \
|
||||
.owner = THIS_MODULE, \
|
||||
.n_voltages = ((max) - (min)) / (step) + 1, \
|
||||
.linear_ranges = volt_ranges, \
|
||||
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
|
||||
.min_uV = (min), \
|
||||
.uV_step = (step), \
|
||||
.vsel_reg = vosel, \
|
||||
.vsel_mask = vosel_mask, \
|
||||
.enable_reg = MT6358_LDO_##vreg##_CON0, \
|
||||
|
|
@ -131,19 +131,19 @@ struct mt6358_regulator_info {
|
|||
}
|
||||
|
||||
#define MT6366_BUCK(match, vreg, min, max, step, \
|
||||
volt_ranges, vosel_mask, _da_vsel_reg, _da_vsel_mask, \
|
||||
vosel_mask, _da_vsel_reg, _da_vsel_mask, \
|
||||
_modeset_reg, _modeset_shift) \
|
||||
[MT6366_ID_##vreg] = { \
|
||||
.desc = { \
|
||||
.name = #vreg, \
|
||||
.of_match = of_match_ptr(match), \
|
||||
.ops = &mt6358_volt_range_ops, \
|
||||
.ops = &mt6358_buck_ops, \
|
||||
.type = REGULATOR_VOLTAGE, \
|
||||
.id = MT6366_ID_##vreg, \
|
||||
.owner = THIS_MODULE, \
|
||||
.n_voltages = ((max) - (min)) / (step) + 1, \
|
||||
.linear_ranges = volt_ranges, \
|
||||
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
|
||||
.min_uV = (min), \
|
||||
.uV_step = (step), \
|
||||
.vsel_reg = MT6358_BUCK_##vreg##_ELR0, \
|
||||
.vsel_mask = vosel_mask, \
|
||||
.enable_reg = MT6358_BUCK_##vreg##_CON0, \
|
||||
|
|
@ -183,7 +183,7 @@ struct mt6358_regulator_info {
|
|||
}
|
||||
|
||||
#define MT6366_LDO1(match, vreg, min, max, step, \
|
||||
volt_ranges, _da_vsel_reg, _da_vsel_mask, \
|
||||
_da_vsel_reg, _da_vsel_mask, \
|
||||
vosel, vosel_mask) \
|
||||
[MT6366_ID_##vreg] = { \
|
||||
.desc = { \
|
||||
|
|
@ -194,8 +194,8 @@ struct mt6358_regulator_info {
|
|||
.id = MT6366_ID_##vreg, \
|
||||
.owner = THIS_MODULE, \
|
||||
.n_voltages = ((max) - (min)) / (step) + 1, \
|
||||
.linear_ranges = volt_ranges, \
|
||||
.n_linear_ranges = ARRAY_SIZE(volt_ranges), \
|
||||
.min_uV = (min), \
|
||||
.uV_step = (step), \
|
||||
.vsel_reg = vosel, \
|
||||
.vsel_mask = vosel_mask, \
|
||||
.enable_reg = MT6358_LDO_##vreg##_CON0, \
|
||||
|
|
@ -226,21 +226,6 @@ struct mt6358_regulator_info {
|
|||
.qi = BIT(15), \
|
||||
}
|
||||
|
||||
static const struct linear_range buck_volt_range1[] = {
|
||||
REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 6250),
|
||||
};
|
||||
|
||||
static const struct linear_range buck_volt_range2[] = {
|
||||
REGULATOR_LINEAR_RANGE(500000, 0, 0x7f, 12500),
|
||||
};
|
||||
|
||||
static const struct linear_range buck_volt_range3[] = {
|
||||
REGULATOR_LINEAR_RANGE(500000, 0, 0x3f, 50000),
|
||||
};
|
||||
|
||||
static const struct linear_range buck_volt_range4[] = {
|
||||
REGULATOR_LINEAR_RANGE(1000000, 0, 0x7f, 12500),
|
||||
};
|
||||
|
||||
static const unsigned int vdram2_voltages[] = {
|
||||
600000, 1800000,
|
||||
|
|
@ -463,9 +448,9 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
|
|||
}
|
||||
}
|
||||
|
||||
static const struct regulator_ops mt6358_volt_range_ops = {
|
||||
.list_voltage = regulator_list_voltage_linear_range,
|
||||
.map_voltage = regulator_map_voltage_linear_range,
|
||||
static const struct regulator_ops mt6358_buck_ops = {
|
||||
.list_voltage = regulator_list_voltage_linear,
|
||||
.map_voltage = regulator_map_voltage_linear,
|
||||
.set_voltage_sel = regulator_set_voltage_sel_regmap,
|
||||
.get_voltage_sel = mt6358_get_buck_voltage_sel,
|
||||
.set_voltage_time_sel = regulator_set_voltage_time_sel,
|
||||
|
|
@ -477,6 +462,18 @@ static const struct regulator_ops mt6358_volt_range_ops = {
|
|||
.get_mode = mt6358_regulator_get_mode,
|
||||
};
|
||||
|
||||
static const struct regulator_ops mt6358_volt_range_ops = {
|
||||
.list_voltage = regulator_list_voltage_linear,
|
||||
.map_voltage = regulator_map_voltage_linear,
|
||||
.set_voltage_sel = regulator_set_voltage_sel_regmap,
|
||||
.get_voltage_sel = mt6358_get_buck_voltage_sel,
|
||||
.set_voltage_time_sel = regulator_set_voltage_time_sel,
|
||||
.enable = regulator_enable_regmap,
|
||||
.disable = regulator_disable_regmap,
|
||||
.is_enabled = regulator_is_enabled_regmap,
|
||||
.get_status = mt6358_get_status,
|
||||
};
|
||||
|
||||
static const struct regulator_ops mt6358_volt_table_ops = {
|
||||
.list_voltage = regulator_list_voltage_table,
|
||||
.map_voltage = regulator_map_voltage_iterate,
|
||||
|
|
@ -500,35 +497,23 @@ static const struct regulator_ops mt6358_volt_fixed_ops = {
|
|||
/* The array is indexed by id(MT6358_ID_XXX) */
|
||||
static struct mt6358_regulator_info mt6358_regulators[] = {
|
||||
MT6358_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
|
||||
buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
|
||||
MT6358_VDRAM1_ANA_CON0, 8),
|
||||
0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f, MT6358_VDRAM1_ANA_CON0, 8),
|
||||
MT6358_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
|
||||
MT6358_VCORE_VGPU_ANA_CON0, 1),
|
||||
MT6358_BUCK("buck_vcore_sshub", VCORE_SSHUB, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_SSHUB_ELR0, 0x7f,
|
||||
MT6358_VCORE_VGPU_ANA_CON0, 1),
|
||||
0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 1),
|
||||
MT6358_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
|
||||
buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f,
|
||||
MT6358_VPA_ANA_CON0, 3),
|
||||
0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, MT6358_VPA_ANA_CON0, 3),
|
||||
MT6358_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
|
||||
MT6358_VPROC_ANA_CON0, 1),
|
||||
0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 1),
|
||||
MT6358_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
|
||||
MT6358_VPROC_ANA_CON0, 2),
|
||||
0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 2),
|
||||
MT6358_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f,
|
||||
MT6358_VCORE_VGPU_ANA_CON0, 2),
|
||||
0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 2),
|
||||
MT6358_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
|
||||
buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f,
|
||||
MT6358_VS2_ANA_CON0, 8),
|
||||
0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, MT6358_VS2_ANA_CON0, 8),
|
||||
MT6358_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
|
||||
MT6358_VMODEM_ANA_CON0, 8),
|
||||
0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f, MT6358_VMODEM_ANA_CON0, 8),
|
||||
MT6358_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
|
||||
buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f,
|
||||
MT6358_VS1_ANA_CON0, 8),
|
||||
0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, MT6358_VS1_ANA_CON0, 8),
|
||||
MT6358_REG_FIXED("ldo_vrf12", VRF12,
|
||||
MT6358_LDO_VRF12_CON0, 0, 1200000),
|
||||
MT6358_REG_FIXED("ldo_vio18", VIO18,
|
||||
|
|
@ -582,55 +567,35 @@ static struct mt6358_regulator_info mt6358_regulators[] = {
|
|||
MT6358_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
|
||||
MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00),
|
||||
MT6358_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
|
||||
buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00,
|
||||
MT6358_LDO_VSRAM_CON0, 0x7f),
|
||||
MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON0, 0x7f),
|
||||
MT6358_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
|
||||
buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00,
|
||||
MT6358_LDO_VSRAM_CON2, 0x7f),
|
||||
MT6358_LDO1("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB, 500000,
|
||||
1293750, 6250, buck_volt_range1,
|
||||
MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f,
|
||||
MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f),
|
||||
MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON2, 0x7f),
|
||||
MT6358_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
|
||||
buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00,
|
||||
MT6358_LDO_VSRAM_CON3, 0x7f),
|
||||
MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON3, 0x7f),
|
||||
MT6358_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
|
||||
buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00,
|
||||
MT6358_LDO_VSRAM_CON1, 0x7f),
|
||||
MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON1, 0x7f),
|
||||
};
|
||||
|
||||
/* The array is indexed by id(MT6366_ID_XXX) */
|
||||
static struct mt6358_regulator_info mt6366_regulators[] = {
|
||||
MT6366_BUCK("buck_vdram1", VDRAM1, 500000, 2087500, 12500,
|
||||
buck_volt_range2, 0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f,
|
||||
MT6358_VDRAM1_ANA_CON0, 8),
|
||||
0x7f, MT6358_BUCK_VDRAM1_DBG0, 0x7f, MT6358_VDRAM1_ANA_CON0, 8),
|
||||
MT6366_BUCK("buck_vcore", VCORE, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f,
|
||||
MT6358_VCORE_VGPU_ANA_CON0, 1),
|
||||
MT6366_BUCK("buck_vcore_sshub", VCORE_SSHUB, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VCORE_SSHUB_ELR0, 0x7f,
|
||||
MT6358_VCORE_VGPU_ANA_CON0, 1),
|
||||
0x7f, MT6358_BUCK_VCORE_DBG0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 1),
|
||||
MT6366_BUCK("buck_vpa", VPA, 500000, 3650000, 50000,
|
||||
buck_volt_range3, 0x3f, MT6358_BUCK_VPA_DBG0, 0x3f,
|
||||
MT6358_VPA_ANA_CON0, 3),
|
||||
0x3f, MT6358_BUCK_VPA_DBG0, 0x3f, MT6358_VPA_ANA_CON0, 3),
|
||||
MT6366_BUCK("buck_vproc11", VPROC11, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f,
|
||||
MT6358_VPROC_ANA_CON0, 1),
|
||||
0x7f, MT6358_BUCK_VPROC11_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 1),
|
||||
MT6366_BUCK("buck_vproc12", VPROC12, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f,
|
||||
MT6358_VPROC_ANA_CON0, 2),
|
||||
0x7f, MT6358_BUCK_VPROC12_DBG0, 0x7f, MT6358_VPROC_ANA_CON0, 2),
|
||||
MT6366_BUCK("buck_vgpu", VGPU, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f,
|
||||
MT6358_VCORE_VGPU_ANA_CON0, 2),
|
||||
0x7f, MT6358_BUCK_VGPU_ELR0, 0x7f, MT6358_VCORE_VGPU_ANA_CON0, 2),
|
||||
MT6366_BUCK("buck_vs2", VS2, 500000, 2087500, 12500,
|
||||
buck_volt_range2, 0x7f, MT6358_BUCK_VS2_DBG0, 0x7f,
|
||||
MT6358_VS2_ANA_CON0, 8),
|
||||
0x7f, MT6358_BUCK_VS2_DBG0, 0x7f, MT6358_VS2_ANA_CON0, 8),
|
||||
MT6366_BUCK("buck_vmodem", VMODEM, 500000, 1293750, 6250,
|
||||
buck_volt_range1, 0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f,
|
||||
MT6358_VMODEM_ANA_CON0, 8),
|
||||
0x7f, MT6358_BUCK_VMODEM_DBG0, 0x7f, MT6358_VMODEM_ANA_CON0, 8),
|
||||
MT6366_BUCK("buck_vs1", VS1, 1000000, 2587500, 12500,
|
||||
buck_volt_range4, 0x7f, MT6358_BUCK_VS1_DBG0, 0x7f,
|
||||
MT6358_VS1_ANA_CON0, 8),
|
||||
0x7f, MT6358_BUCK_VS1_DBG0, 0x7f, MT6358_VS1_ANA_CON0, 8),
|
||||
MT6366_REG_FIXED("ldo_vrf12", VRF12,
|
||||
MT6358_LDO_VRF12_CON0, 0, 1200000),
|
||||
MT6366_REG_FIXED("ldo_vio18", VIO18,
|
||||
|
|
@ -673,21 +638,13 @@ static struct mt6358_regulator_info mt6366_regulators[] = {
|
|||
MT6366_LDO("ldo_vsim2", VSIM2, vsim_voltages, vsim_idx,
|
||||
MT6358_LDO_VSIM2_CON0, 0, MT6358_VSIM2_ANA_CON0, 0xf00),
|
||||
MT6366_LDO1("ldo_vsram_proc11", VSRAM_PROC11, 500000, 1293750, 6250,
|
||||
buck_volt_range1, MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00,
|
||||
MT6358_LDO_VSRAM_CON0, 0x7f),
|
||||
MT6358_LDO_VSRAM_PROC11_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON0, 0x7f),
|
||||
MT6366_LDO1("ldo_vsram_others", VSRAM_OTHERS, 500000, 1293750, 6250,
|
||||
buck_volt_range1, MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00,
|
||||
MT6358_LDO_VSRAM_CON2, 0x7f),
|
||||
MT6366_LDO1("ldo_vsram_others_sshub", VSRAM_OTHERS_SSHUB, 500000,
|
||||
1293750, 6250, buck_volt_range1,
|
||||
MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f,
|
||||
MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1, 0x7f),
|
||||
MT6358_LDO_VSRAM_OTHERS_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON2, 0x7f),
|
||||
MT6366_LDO1("ldo_vsram_gpu", VSRAM_GPU, 500000, 1293750, 6250,
|
||||
buck_volt_range1, MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00,
|
||||
MT6358_LDO_VSRAM_CON3, 0x7f),
|
||||
MT6358_LDO_VSRAM_GPU_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON3, 0x7f),
|
||||
MT6366_LDO1("ldo_vsram_proc12", VSRAM_PROC12, 500000, 1293750, 6250,
|
||||
buck_volt_range1, MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00,
|
||||
MT6358_LDO_VSRAM_CON1, 0x7f),
|
||||
MT6358_LDO_VSRAM_PROC12_DBG0, 0x7f00, MT6358_LDO_VSRAM_CON1, 0x7f),
|
||||
};
|
||||
|
||||
static int mt6358_regulator_probe(struct platform_device *pdev)
|
||||
|
|
|
|||
|
|
@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
|
|||
if (port) {
|
||||
put_device(&port->dev);
|
||||
retval = -EEXIST;
|
||||
goto err_out;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
|
||||
if (!port)
|
||||
goto err_out;
|
||||
goto err_put;
|
||||
|
||||
rwlock_init(&port->unit_list_lock);
|
||||
INIT_LIST_HEAD(&port->unit_list);
|
||||
|
|
@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
|
|||
|
||||
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
|
||||
kfree(port);
|
||||
goto err_out;
|
||||
goto err_put;
|
||||
}
|
||||
retval = -EINVAL;
|
||||
|
||||
|
|
@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
|
|||
|
||||
return port;
|
||||
|
||||
err_out:
|
||||
err_put:
|
||||
zfcp_ccw_adapter_put(adapter);
|
||||
err_out:
|
||||
return ERR_PTR(retval);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1451,7 +1451,7 @@ retry_next:
|
|||
#endif
|
||||
break;
|
||||
}
|
||||
scsi_rescan_device(&device->sdev_gendev);
|
||||
scsi_rescan_device(device);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -1500,7 +1500,7 @@ static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
|
|||
|
||||
sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
|
||||
if (sdev) {
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_rescan_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2454,7 +2454,7 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
|
|||
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
|
||||
break;
|
||||
case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_rescan_device(sdev);
|
||||
envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
|
||||
break;
|
||||
case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
|
||||
|
|
|
|||
|
|
@ -132,7 +132,6 @@ extern int scsi_complete_async_scans(void);
|
|||
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
|
||||
unsigned int, u64, enum scsi_scan_mode);
|
||||
extern void scsi_forget_host(struct Scsi_Host *);
|
||||
extern void scsi_rescan_device(struct device *);
|
||||
|
||||
/* scsi_sysctl.c */
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
|
|
|||
|
|
@ -1617,12 +1617,24 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
|
|||
}
|
||||
EXPORT_SYMBOL(scsi_add_device);
|
||||
|
||||
void scsi_rescan_device(struct device *dev)
|
||||
int scsi_rescan_device(struct scsi_device *sdev)
|
||||
{
|
||||
struct scsi_device *sdev = to_scsi_device(dev);
|
||||
struct device *dev = &sdev->sdev_gendev;
|
||||
int ret = 0;
|
||||
|
||||
device_lock(dev);
|
||||
|
||||
/*
|
||||
* Bail out if the device is not running. Otherwise, the rescan may
|
||||
* block waiting for commands to be executed, with us holding the
|
||||
* device lock. This can result in a potential deadlock in the power
|
||||
* management core code when system resume is on-going.
|
||||
*/
|
||||
if (sdev->sdev_state != SDEV_RUNNING) {
|
||||
ret = -EWOULDBLOCK;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
scsi_attach_vpd(sdev);
|
||||
|
||||
if (sdev->handler && sdev->handler->rescan)
|
||||
|
|
@ -1635,7 +1647,11 @@ void scsi_rescan_device(struct device *dev)
|
|||
drv->rescan(dev);
|
||||
module_put(dev->driver->owner);
|
||||
}
|
||||
|
||||
unlock:
|
||||
device_unlock(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_rescan_device);
|
||||
|
||||
|
|
|
|||
|
|
@ -764,7 +764,7 @@ static ssize_t
|
|||
store_rescan_field (struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
scsi_rescan_device(dev);
|
||||
scsi_rescan_device(to_scsi_device(dev));
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field);
|
||||
|
|
@ -857,7 +857,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
|
|||
* waiting for pending I/O to finish.
|
||||
*/
|
||||
blk_mq_run_hw_queues(sdev->request_queue, true);
|
||||
scsi_rescan_device(dev);
|
||||
scsi_rescan_device(sdev);
|
||||
}
|
||||
|
||||
return ret == 0 ? count : -EINVAL;
|
||||
|
|
|
|||
|
|
@ -213,18 +213,32 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
|
||||
static ssize_t
|
||||
manage_start_stop_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
manage_start_stop_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
|
||||
return sprintf(buf, "%u\n", sdp->manage_start_stop);
|
||||
return sysfs_emit(buf, "%u\n",
|
||||
sdp->manage_system_start_stop &&
|
||||
sdp->manage_runtime_start_stop);
|
||||
}
|
||||
static DEVICE_ATTR_RO(manage_start_stop);
|
||||
|
||||
static ssize_t
|
||||
manage_system_start_stop_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
|
||||
return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
manage_start_stop_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
manage_system_start_stop_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
|
|
@ -236,11 +250,42 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
|
|||
if (kstrtobool(buf, &v))
|
||||
return -EINVAL;
|
||||
|
||||
sdp->manage_start_stop = v;
|
||||
sdp->manage_system_start_stop = v;
|
||||
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(manage_start_stop);
|
||||
static DEVICE_ATTR_RW(manage_system_start_stop);
|
||||
|
||||
static ssize_t
|
||||
manage_runtime_start_stop_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
|
||||
return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
manage_runtime_start_stop_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||
struct scsi_device *sdp = sdkp->device;
|
||||
bool v;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
if (kstrtobool(buf, &v))
|
||||
return -EINVAL;
|
||||
|
||||
sdp->manage_runtime_start_stop = v;
|
||||
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(manage_runtime_start_stop);
|
||||
|
||||
static ssize_t
|
||||
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
|
|
@ -572,6 +617,8 @@ static struct attribute *sd_disk_attrs[] = {
|
|||
&dev_attr_FUA.attr,
|
||||
&dev_attr_allow_restart.attr,
|
||||
&dev_attr_manage_start_stop.attr,
|
||||
&dev_attr_manage_system_start_stop.attr,
|
||||
&dev_attr_manage_runtime_start_stop.attr,
|
||||
&dev_attr_protection_type.attr,
|
||||
&dev_attr_protection_mode.attr,
|
||||
&dev_attr_app_tag_own.attr,
|
||||
|
|
@ -3599,7 +3646,8 @@ static int sd_remove(struct device *dev)
|
|||
|
||||
device_del(&sdkp->disk_dev);
|
||||
del_gendisk(sdkp->disk);
|
||||
sd_shutdown(dev);
|
||||
if (!sdkp->suspended)
|
||||
sd_shutdown(dev);
|
||||
|
||||
put_disk(sdkp->disk);
|
||||
return 0;
|
||||
|
|
@ -3676,13 +3724,20 @@ static void sd_shutdown(struct device *dev)
|
|||
sd_sync_cache(sdkp, NULL);
|
||||
}
|
||||
|
||||
if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
|
||||
if (system_state != SYSTEM_RESTART &&
|
||||
sdkp->device->manage_system_start_stop) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
|
||||
sd_start_stop_device(sdkp, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
|
||||
static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
|
||||
{
|
||||
return (sdev->manage_system_start_stop && !runtime) ||
|
||||
(sdev->manage_runtime_start_stop && runtime);
|
||||
}
|
||||
|
||||
static int sd_suspend_common(struct device *dev, bool runtime)
|
||||
{
|
||||
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
||||
struct scsi_sense_hdr sshdr;
|
||||
|
|
@ -3714,15 +3769,18 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
|
|||
}
|
||||
}
|
||||
|
||||
if (sdkp->device->manage_start_stop) {
|
||||
if (sd_do_start_stop(sdkp->device, runtime)) {
|
||||
if (!sdkp->device->silence_suspend)
|
||||
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
|
||||
/* an error is not worth aborting a system sleep */
|
||||
ret = sd_start_stop_device(sdkp, 0);
|
||||
if (ignore_stop_errors)
|
||||
if (!runtime)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
sdkp->suspended = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -3731,29 +3789,37 @@ static int sd_suspend_system(struct device *dev)
|
|||
if (pm_runtime_suspended(dev))
|
||||
return 0;
|
||||
|
||||
return sd_suspend_common(dev, true);
|
||||
return sd_suspend_common(dev, false);
|
||||
}
|
||||
|
||||
static int sd_suspend_runtime(struct device *dev)
|
||||
{
|
||||
return sd_suspend_common(dev, false);
|
||||
return sd_suspend_common(dev, true);
|
||||
}
|
||||
|
||||
static int sd_resume(struct device *dev)
|
||||
static int sd_resume(struct device *dev, bool runtime)
|
||||
{
|
||||
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
|
||||
return 0;
|
||||
|
||||
if (!sdkp->device->manage_start_stop)
|
||||
if (!sd_do_start_stop(sdkp->device, runtime)) {
|
||||
sdkp->suspended = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
ret = sd_start_stop_device(sdkp, 1);
|
||||
if (!ret)
|
||||
if (!sdkp->device->no_start_on_resume) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
|
||||
ret = sd_start_stop_device(sdkp, 1);
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
opal_unlock_from_suspend(sdkp->opal_dev);
|
||||
sdkp->suspended = false;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -3762,7 +3828,7 @@ static int sd_resume_system(struct device *dev)
|
|||
if (pm_runtime_suspended(dev))
|
||||
return 0;
|
||||
|
||||
return sd_resume(dev);
|
||||
return sd_resume(dev, false);
|
||||
}
|
||||
|
||||
static int sd_resume_runtime(struct device *dev)
|
||||
|
|
@ -3789,7 +3855,7 @@ static int sd_resume_runtime(struct device *dev)
|
|||
"Failed to clear sense data\n");
|
||||
}
|
||||
|
||||
return sd_resume(dev);
|
||||
return sd_resume(dev, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -131,6 +131,7 @@ struct scsi_disk {
|
|||
u8 provisioning_mode;
|
||||
u8 zeroing_mode;
|
||||
u8 nr_actuators; /* Number of actuators */
|
||||
bool suspended; /* Disk is suspended (stopped) */
|
||||
unsigned ATO : 1; /* state of disk ATO bit */
|
||||
unsigned cache_override : 1; /* temp override of WCE,RCD */
|
||||
unsigned WCE : 1; /* state of disk WCE bit */
|
||||
|
|
|
|||
|
|
@ -2278,7 +2278,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
|
|||
device->advertised_queue_depth = device->queue_depth;
|
||||
scsi_change_queue_depth(device->sdev, device->advertised_queue_depth);
|
||||
if (device->rescan) {
|
||||
scsi_rescan_device(&device->sdev->sdev_gendev);
|
||||
scsi_rescan_device(device->sdev);
|
||||
device->rescan = false;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -471,7 +471,7 @@ static void storvsc_device_scan(struct work_struct *work)
|
|||
sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
|
||||
if (!sdev)
|
||||
goto done;
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_rescan_device(sdev);
|
||||
scsi_device_put(sdev);
|
||||
|
||||
done:
|
||||
|
|
|
|||
|
|
@ -325,7 +325,7 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
|
|||
/* Handle "Parameters changed", "Mode parameters changed", and
|
||||
"Capacity data has changed". */
|
||||
if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
|
||||
scsi_rescan_device(&sdev->sdev_gendev);
|
||||
scsi_rescan_device(sdev);
|
||||
|
||||
scsi_device_put(sdev);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1218,9 +1218,9 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
|
||||
clk_dis_all:
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
clk_disable_unprepare(xqspi->refclk);
|
||||
clk_dis_pclk:
|
||||
clk_disable_unprepare(xqspi->pclk);
|
||||
|
|
@ -1244,11 +1244,15 @@ static int zynqmp_qspi_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct zynqmp_qspi *xqspi = platform_get_drvdata(pdev);
|
||||
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
clk_disable_unprepare(xqspi->refclk);
|
||||
clk_disable_unprepare(xqspi->pclk);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -883,7 +883,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
|
|||
EXPORT_SYMBOL(target_to_linux_sector);
|
||||
|
||||
struct devices_idr_iter {
|
||||
struct config_item *prev_item;
|
||||
int (*fn)(struct se_device *dev, void *data);
|
||||
void *data;
|
||||
};
|
||||
|
|
@ -893,11 +892,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
|
|||
{
|
||||
struct devices_idr_iter *iter = data;
|
||||
struct se_device *dev = p;
|
||||
struct config_item *item;
|
||||
int ret;
|
||||
|
||||
config_item_put(iter->prev_item);
|
||||
iter->prev_item = NULL;
|
||||
|
||||
/*
|
||||
* We add the device early to the idr, so it can be used
|
||||
* by backend modules during configuration. We do not want
|
||||
|
|
@ -907,12 +904,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
|
|||
if (!target_dev_configured(dev))
|
||||
return 0;
|
||||
|
||||
iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
|
||||
if (!iter->prev_item)
|
||||
item = config_item_get_unless_zero(&dev->dev_group.cg_item);
|
||||
if (!item)
|
||||
return 0;
|
||||
mutex_unlock(&device_mutex);
|
||||
|
||||
ret = iter->fn(dev, iter->data);
|
||||
config_item_put(item);
|
||||
|
||||
mutex_lock(&device_mutex);
|
||||
return ret;
|
||||
|
|
@ -935,7 +933,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
|
|||
mutex_lock(&device_mutex);
|
||||
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
|
||||
mutex_unlock(&device_mutex);
|
||||
config_item_put(iter.prev_item);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
|
|||
done += partlen;
|
||||
len -= partlen;
|
||||
ptr += partlen;
|
||||
iov->consumed += partlen;
|
||||
iov->iov[iov->i].iov_len -= partlen;
|
||||
iov->iov[iov->i].iov_base += partlen;
|
||||
|
||||
vringh_kiov_advance(iov, partlen);
|
||||
if (!iov->iov[iov->i].iov_len) {
|
||||
/* Fix up old iov element then increment. */
|
||||
iov->iov[iov->i].iov_len = iov->consumed;
|
||||
iov->iov[iov->i].iov_base -= iov->consumed;
|
||||
|
||||
iov->consumed = 0;
|
||||
iov->i++;
|
||||
}
|
||||
}
|
||||
return done;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/irqnr.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/atomic.h>
|
||||
|
|
@ -96,6 +97,7 @@ enum xen_irq_type {
|
|||
struct irq_info {
|
||||
struct list_head list;
|
||||
struct list_head eoi_list;
|
||||
struct rcu_work rwork;
|
||||
short refcnt;
|
||||
u8 spurious_cnt;
|
||||
u8 is_accounted;
|
||||
|
|
@ -145,23 +147,13 @@ const struct evtchn_ops *evtchn_ops;
|
|||
*/
|
||||
static DEFINE_MUTEX(irq_mapping_update_lock);
|
||||
|
||||
/*
|
||||
* Lock protecting event handling loop against removing event channels.
|
||||
* Adding of event channels is no issue as the associated IRQ becomes active
|
||||
* only after everything is setup (before request_[threaded_]irq() the handler
|
||||
* can't be entered for an event, as the event channel will be unmasked only
|
||||
* then).
|
||||
*/
|
||||
static DEFINE_RWLOCK(evtchn_rwlock);
|
||||
|
||||
/*
|
||||
* Lock hierarchy:
|
||||
*
|
||||
* irq_mapping_update_lock
|
||||
* evtchn_rwlock
|
||||
* IRQ-desc lock
|
||||
* percpu eoi_list_lock
|
||||
* irq_info->lock
|
||||
* IRQ-desc lock
|
||||
* percpu eoi_list_lock
|
||||
* irq_info->lock
|
||||
*/
|
||||
|
||||
static LIST_HEAD(xen_irq_list_head);
|
||||
|
|
@ -305,6 +297,22 @@ static void channels_on_cpu_inc(struct irq_info *info)
|
|||
info->is_accounted = 1;
|
||||
}
|
||||
|
||||
static void delayed_free_irq(struct work_struct *work)
|
||||
{
|
||||
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
|
||||
rwork);
|
||||
unsigned int irq = info->irq;
|
||||
|
||||
/* Remove the info pointer only now, with no potential users left. */
|
||||
set_info_for_irq(irq, NULL);
|
||||
|
||||
kfree(info);
|
||||
|
||||
/* Legacy IRQ descriptors are managed by the arch. */
|
||||
if (irq >= nr_legacy_irqs())
|
||||
irq_free_desc(irq);
|
||||
}
|
||||
|
||||
/* Constructors for packed IRQ information. */
|
||||
static int xen_irq_info_common_setup(struct irq_info *info,
|
||||
unsigned irq,
|
||||
|
|
@ -667,33 +675,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
|
|||
|
||||
eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
|
||||
|
||||
read_lock_irqsave(&evtchn_rwlock, flags);
|
||||
rcu_read_lock();
|
||||
|
||||
while (true) {
|
||||
spin_lock(&eoi->eoi_list_lock);
|
||||
spin_lock_irqsave(&eoi->eoi_list_lock, flags);
|
||||
|
||||
info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
|
||||
eoi_list);
|
||||
|
||||
if (info == NULL || now < info->eoi_time) {
|
||||
spin_unlock(&eoi->eoi_list_lock);
|
||||
if (info == NULL)
|
||||
break;
|
||||
|
||||
if (now < info->eoi_time) {
|
||||
mod_delayed_work_on(info->eoi_cpu, system_wq,
|
||||
&eoi->delayed,
|
||||
info->eoi_time - now);
|
||||
break;
|
||||
}
|
||||
|
||||
list_del_init(&info->eoi_list);
|
||||
|
||||
spin_unlock(&eoi->eoi_list_lock);
|
||||
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
|
||||
|
||||
info->eoi_time = 0;
|
||||
|
||||
xen_irq_lateeoi_locked(info, false);
|
||||
}
|
||||
|
||||
if (info)
|
||||
mod_delayed_work_on(info->eoi_cpu, system_wq,
|
||||
&eoi->delayed, info->eoi_time - now);
|
||||
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
|
||||
|
||||
read_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void xen_cpu_init_eoi(unsigned int cpu)
|
||||
|
|
@ -708,16 +719,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
|
|||
void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
|
||||
{
|
||||
struct irq_info *info;
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&evtchn_rwlock, flags);
|
||||
rcu_read_lock();
|
||||
|
||||
info = info_for_irq(irq);
|
||||
|
||||
if (info)
|
||||
xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
|
||||
|
||||
read_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
|
||||
|
||||
|
|
@ -731,6 +741,7 @@ static void xen_irq_init(unsigned irq)
|
|||
|
||||
info->type = IRQT_UNBOUND;
|
||||
info->refcnt = -1;
|
||||
INIT_RCU_WORK(&info->rwork, delayed_free_irq);
|
||||
|
||||
set_info_for_irq(irq, info);
|
||||
/*
|
||||
|
|
@ -788,31 +799,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
|
|||
static void xen_free_irq(unsigned irq)
|
||||
{
|
||||
struct irq_info *info = info_for_irq(irq);
|
||||
unsigned long flags;
|
||||
|
||||
if (WARN_ON(!info))
|
||||
return;
|
||||
|
||||
write_lock_irqsave(&evtchn_rwlock, flags);
|
||||
|
||||
if (!list_empty(&info->eoi_list))
|
||||
lateeoi_list_del(info);
|
||||
|
||||
list_del(&info->list);
|
||||
|
||||
set_info_for_irq(irq, NULL);
|
||||
|
||||
WARN_ON(info->refcnt > 0);
|
||||
|
||||
write_unlock_irqrestore(&evtchn_rwlock, flags);
|
||||
|
||||
kfree(info);
|
||||
|
||||
/* Legacy IRQ descriptors are managed by the arch. */
|
||||
if (irq < nr_legacy_irqs())
|
||||
return;
|
||||
|
||||
irq_free_desc(irq);
|
||||
queue_rcu_work(system_wq, &info->rwork);
|
||||
}
|
||||
|
||||
static void xen_evtchn_close(evtchn_port_t port)
|
||||
|
|
@ -1716,7 +1714,14 @@ static void __xen_evtchn_do_upcall(void)
|
|||
int cpu = smp_processor_id();
|
||||
struct evtchn_loop_ctrl ctrl = { 0 };
|
||||
|
||||
read_lock(&evtchn_rwlock);
|
||||
/*
|
||||
* When closing an event channel the associated IRQ must not be freed
|
||||
* until all cpus have left the event handling loop. This is ensured
|
||||
* by taking the rcu_read_lock() while handling events, as freeing of
|
||||
* the IRQ is handled via queue_rcu_work() _after_ closing the event
|
||||
* channel.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
do {
|
||||
vcpu_info->evtchn_upcall_pending = 0;
|
||||
|
|
@ -1729,7 +1734,7 @@ static void __xen_evtchn_do_upcall(void)
|
|||
|
||||
} while (vcpu_info->evtchn_upcall_pending);
|
||||
|
||||
read_unlock(&evtchn_rwlock);
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* Increment irq_epoch only now to defer EOIs only for
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/refcount.h>
|
||||
#include <linux/crc32c.h>
|
||||
#include <linux/iomap.h>
|
||||
#include <linux/fscrypt.h>
|
||||
#include "extent-io-tree.h"
|
||||
#include "extent_io.h"
|
||||
#include "extent_map.h"
|
||||
|
|
@ -3238,11 +3239,11 @@ static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
|
|||
|
||||
/* root-item.c */
|
||||
int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
||||
u64 ref_id, u64 dirid, u64 sequence, const char *name,
|
||||
int name_len);
|
||||
u64 ref_id, u64 dirid, u64 sequence,
|
||||
const struct fscrypt_str *name);
|
||||
int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
||||
u64 ref_id, u64 dirid, u64 *sequence, const char *name,
|
||||
int name_len);
|
||||
u64 ref_id, u64 dirid, u64 *sequence,
|
||||
const struct fscrypt_str *name);
|
||||
int btrfs_del_root(struct btrfs_trans_handle *trans,
|
||||
const struct btrfs_key *key);
|
||||
int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
||||
|
|
@ -3271,25 +3272,23 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
|
|||
|
||||
/* dir-item.c */
|
||||
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
|
||||
const char *name, int name_len);
|
||||
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
|
||||
int name_len, struct btrfs_inode *dir,
|
||||
const struct fscrypt_str *name);
|
||||
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
|
||||
const struct fscrypt_str *name, struct btrfs_inode *dir,
|
||||
struct btrfs_key *location, u8 type, u64 index);
|
||||
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 dir,
|
||||
const char *name, int name_len,
|
||||
int mod);
|
||||
const struct fscrypt_str *name, int mod);
|
||||
struct btrfs_dir_item *
|
||||
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 dir,
|
||||
u64 index, const char *name, int name_len,
|
||||
int mod);
|
||||
u64 index, const struct fscrypt_str *name, int mod);
|
||||
struct btrfs_dir_item *
|
||||
btrfs_search_dir_index_item(struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 dirid,
|
||||
const char *name, int name_len);
|
||||
const struct fscrypt_str *name);
|
||||
int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
|
|
@ -3370,10 +3369,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
|
|||
int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
|
||||
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *dir, struct btrfs_inode *inode,
|
||||
const char *name, int name_len);
|
||||
const struct fscrypt_str *name);
|
||||
int btrfs_add_link(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
|
||||
const char *name, int name_len, int add_backref, u64 index);
|
||||
const struct fscrypt_str *name, int add_backref, u64 index);
|
||||
int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry);
|
||||
int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
|
||||
int front);
|
||||
|
|
@ -3398,6 +3397,7 @@ struct btrfs_new_inode_args {
|
|||
*/
|
||||
struct posix_acl *default_acl;
|
||||
struct posix_acl *acl;
|
||||
struct fscrypt_name fname;
|
||||
};
|
||||
int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
|
||||
unsigned int *trans_num_items);
|
||||
|
|
|
|||
|
|
@ -103,8 +103,8 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
|
|||
* to use for the second index (if one is created).
|
||||
* Will return 0 or -ENOMEM
|
||||
*/
|
||||
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
|
||||
int name_len, struct btrfs_inode *dir,
|
||||
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
|
||||
const struct fscrypt_str *name, struct btrfs_inode *dir,
|
||||
struct btrfs_key *location, u8 type, u64 index)
|
||||
{
|
||||
int ret = 0;
|
||||
|
|
@ -120,7 +120,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
|
|||
|
||||
key.objectid = btrfs_ino(dir);
|
||||
key.type = BTRFS_DIR_ITEM_KEY;
|
||||
key.offset = btrfs_name_hash(name, name_len);
|
||||
key.offset = btrfs_name_hash(name->name, name->len);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
|
|
@ -128,9 +128,9 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
|
|||
|
||||
btrfs_cpu_key_to_disk(&disk_key, location);
|
||||
|
||||
data_size = sizeof(*dir_item) + name_len;
|
||||
data_size = sizeof(*dir_item) + name->len;
|
||||
dir_item = insert_with_overflow(trans, root, path, &key, data_size,
|
||||
name, name_len);
|
||||
name->name, name->len);
|
||||
if (IS_ERR(dir_item)) {
|
||||
ret = PTR_ERR(dir_item);
|
||||
if (ret == -EEXIST)
|
||||
|
|
@ -142,11 +142,11 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, const char *name,
|
|||
btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
|
||||
btrfs_set_dir_type(leaf, dir_item, type);
|
||||
btrfs_set_dir_data_len(leaf, dir_item, 0);
|
||||
btrfs_set_dir_name_len(leaf, dir_item, name_len);
|
||||
btrfs_set_dir_name_len(leaf, dir_item, name->len);
|
||||
btrfs_set_dir_transid(leaf, dir_item, trans->transid);
|
||||
name_ptr = (unsigned long)(dir_item + 1);
|
||||
|
||||
write_extent_buffer(leaf, name, name_ptr, name_len);
|
||||
write_extent_buffer(leaf, name->name, name_ptr, name->len);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
|
||||
second_insert:
|
||||
|
|
@ -157,7 +157,7 @@ second_insert:
|
|||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
ret2 = btrfs_insert_delayed_dir_index(trans, name, name_len, dir,
|
||||
ret2 = btrfs_insert_delayed_dir_index(trans, name->name, name->len, dir,
|
||||
&disk_key, type, index);
|
||||
out_free:
|
||||
btrfs_free_path(path);
|
||||
|
|
@ -206,7 +206,7 @@ static struct btrfs_dir_item *btrfs_lookup_match_dir(
|
|||
struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 dir,
|
||||
const char *name, int name_len,
|
||||
const struct fscrypt_str *name,
|
||||
int mod)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
|
|
@ -214,9 +214,10 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
|
|||
|
||||
key.objectid = dir;
|
||||
key.type = BTRFS_DIR_ITEM_KEY;
|
||||
key.offset = btrfs_name_hash(name, name_len);
|
||||
key.offset = btrfs_name_hash(name->name, name->len);
|
||||
|
||||
di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
|
||||
di = btrfs_lookup_match_dir(trans, root, path, &key, name->name,
|
||||
name->len, mod);
|
||||
if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
|
||||
return NULL;
|
||||
|
||||
|
|
@ -224,7 +225,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
|
||||
const char *name, int name_len)
|
||||
const struct fscrypt_str *name)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_key key;
|
||||
|
|
@ -240,9 +241,10 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
|
|||
|
||||
key.objectid = dir;
|
||||
key.type = BTRFS_DIR_ITEM_KEY;
|
||||
key.offset = btrfs_name_hash(name, name_len);
|
||||
key.offset = btrfs_name_hash(name->name, name->len);
|
||||
|
||||
di = btrfs_lookup_match_dir(NULL, root, path, &key, name, name_len, 0);
|
||||
di = btrfs_lookup_match_dir(NULL, root, path, &key, name->name,
|
||||
name->len, 0);
|
||||
if (IS_ERR(di)) {
|
||||
ret = PTR_ERR(di);
|
||||
/* Nothing found, we're safe */
|
||||
|
|
@ -262,11 +264,8 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* see if there is room in the item to insert this
|
||||
* name
|
||||
*/
|
||||
data_size = sizeof(*di) + name_len;
|
||||
/* See if there is room in the item to insert this name. */
|
||||
data_size = sizeof(*di) + name->len;
|
||||
leaf = path->nodes[0];
|
||||
slot = path->slots[0];
|
||||
if (data_size + btrfs_item_size(leaf, slot) +
|
||||
|
|
@ -303,8 +302,7 @@ struct btrfs_dir_item *
|
|||
btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 dir,
|
||||
u64 index, const char *name, int name_len,
|
||||
int mod)
|
||||
u64 index, const struct fscrypt_str *name, int mod)
|
||||
{
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_key key;
|
||||
|
|
@ -313,7 +311,8 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
|
|||
key.type = BTRFS_DIR_INDEX_KEY;
|
||||
key.offset = index;
|
||||
|
||||
di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
|
||||
di = btrfs_lookup_match_dir(trans, root, path, &key, name->name,
|
||||
name->len, mod);
|
||||
if (di == ERR_PTR(-ENOENT))
|
||||
return NULL;
|
||||
|
||||
|
|
@ -321,9 +320,8 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
struct btrfs_dir_item *
|
||||
btrfs_search_dir_index_item(struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 dirid,
|
||||
const char *name, int name_len)
|
||||
btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path,
|
||||
u64 dirid, const struct fscrypt_str *name)
|
||||
{
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_key key;
|
||||
|
|
@ -338,7 +336,7 @@ btrfs_search_dir_index_item(struct btrfs_root *root,
|
|||
break;
|
||||
|
||||
di = btrfs_match_dir_item_name(root->fs_info, path,
|
||||
name, name_len);
|
||||
name->name, name->len);
|
||||
if (di)
|
||||
return di;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1458,8 +1458,13 @@ static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (iocb->ki_flags & IOCB_NOWAIT)
|
||||
ilock_flags |= BTRFS_ILOCK_TRY;
|
||||
|
||||
/* If the write DIO is within EOF, use a shared lock */
|
||||
if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode))
|
||||
/*
|
||||
* If the write DIO is within EOF, use a shared lock and also only if
|
||||
* security bits will likely not be dropped by file_remove_privs() called
|
||||
* from btrfs_write_check(). Either will need to be rechecked after the
|
||||
* lock was acquired.
|
||||
*/
|
||||
if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
|
||||
ilock_flags |= BTRFS_ILOCK_SHARED;
|
||||
|
||||
relock:
|
||||
|
|
@ -1467,6 +1472,13 @@ relock:
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Shared lock cannot be used with security bits set. */
|
||||
if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
|
||||
btrfs_inode_unlock(inode, ilock_flags);
|
||||
ilock_flags &= ~BTRFS_ILOCK_SHARED;
|
||||
goto relock;
|
||||
}
|
||||
|
||||
err = generic_write_checks(iocb, from);
|
||||
if (err <= 0) {
|
||||
btrfs_inode_unlock(inode, ilock_flags);
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@
|
|||
#include "print-tree.h"
|
||||
|
||||
struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
|
||||
int slot, const char *name,
|
||||
int name_len)
|
||||
int slot,
|
||||
const struct fscrypt_str *name)
|
||||
{
|
||||
struct btrfs_inode_ref *ref;
|
||||
unsigned long ptr;
|
||||
|
|
@ -27,9 +27,10 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
|
|||
len = btrfs_inode_ref_name_len(leaf, ref);
|
||||
name_ptr = (unsigned long)(ref + 1);
|
||||
cur_offset += len + sizeof(*ref);
|
||||
if (len != name_len)
|
||||
if (len != name->len)
|
||||
continue;
|
||||
if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)
|
||||
if (memcmp_extent_buffer(leaf, name->name, name_ptr,
|
||||
name->len) == 0)
|
||||
return ref;
|
||||
}
|
||||
return NULL;
|
||||
|
|
@ -37,7 +38,7 @@ struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
|
|||
|
||||
struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
|
||||
struct extent_buffer *leaf, int slot, u64 ref_objectid,
|
||||
const char *name, int name_len)
|
||||
const struct fscrypt_str *name)
|
||||
{
|
||||
struct btrfs_inode_extref *extref;
|
||||
unsigned long ptr;
|
||||
|
|
@ -60,9 +61,10 @@ struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
|
|||
name_ptr = (unsigned long)(&extref->name);
|
||||
ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
|
||||
|
||||
if (ref_name_len == name_len &&
|
||||
if (ref_name_len == name->len &&
|
||||
btrfs_inode_extref_parent(leaf, extref) == ref_objectid &&
|
||||
(memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0))
|
||||
(memcmp_extent_buffer(leaf, name->name, name_ptr,
|
||||
name->len) == 0))
|
||||
return extref;
|
||||
|
||||
cur_offset += ref_name_len + sizeof(*extref);
|
||||
|
|
@ -75,7 +77,7 @@ struct btrfs_inode_extref *
|
|||
btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
const char *name, int name_len,
|
||||
const struct fscrypt_str *name,
|
||||
u64 inode_objectid, u64 ref_objectid, int ins_len,
|
||||
int cow)
|
||||
{
|
||||
|
|
@ -84,7 +86,7 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
|
|||
|
||||
key.objectid = inode_objectid;
|
||||
key.type = BTRFS_INODE_EXTREF_KEY;
|
||||
key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
|
||||
key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
|
||||
|
||||
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
|
||||
if (ret < 0)
|
||||
|
|
@ -92,13 +94,13 @@ btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
|
|||
if (ret > 0)
|
||||
return NULL;
|
||||
return btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
|
||||
ref_objectid, name, name_len);
|
||||
ref_objectid, name);
|
||||
|
||||
}
|
||||
|
||||
static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
const char *name, int name_len,
|
||||
const struct fscrypt_str *name,
|
||||
u64 inode_objectid, u64 ref_objectid,
|
||||
u64 *index)
|
||||
{
|
||||
|
|
@ -107,14 +109,14 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_inode_extref *extref;
|
||||
struct extent_buffer *leaf;
|
||||
int ret;
|
||||
int del_len = name_len + sizeof(*extref);
|
||||
int del_len = name->len + sizeof(*extref);
|
||||
unsigned long ptr;
|
||||
unsigned long item_start;
|
||||
u32 item_size;
|
||||
|
||||
key.objectid = inode_objectid;
|
||||
key.type = BTRFS_INODE_EXTREF_KEY;
|
||||
key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
|
||||
key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
|
|
@ -132,7 +134,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
|
|||
* readonly.
|
||||
*/
|
||||
extref = btrfs_find_name_in_ext_backref(path->nodes[0], path->slots[0],
|
||||
ref_objectid, name, name_len);
|
||||
ref_objectid, name);
|
||||
if (!extref) {
|
||||
btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL);
|
||||
ret = -EROFS;
|
||||
|
|
@ -168,8 +170,7 @@ out:
|
|||
}
|
||||
|
||||
int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
const char *name, int name_len,
|
||||
struct btrfs_root *root, const struct fscrypt_str *name,
|
||||
u64 inode_objectid, u64 ref_objectid, u64 *index)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
|
|
@ -182,7 +183,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
|
|||
u32 sub_item_len;
|
||||
int ret;
|
||||
int search_ext_refs = 0;
|
||||
int del_len = name_len + sizeof(*ref);
|
||||
int del_len = name->len + sizeof(*ref);
|
||||
|
||||
key.objectid = inode_objectid;
|
||||
key.offset = ref_objectid;
|
||||
|
|
@ -201,8 +202,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name,
|
||||
name_len);
|
||||
ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], name);
|
||||
if (!ref) {
|
||||
ret = -ENOENT;
|
||||
search_ext_refs = 1;
|
||||
|
|
@ -219,7 +219,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
ptr = (unsigned long)ref;
|
||||
sub_item_len = name_len + sizeof(*ref);
|
||||
sub_item_len = name->len + sizeof(*ref);
|
||||
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
|
||||
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
|
||||
item_size - (ptr + sub_item_len - item_start));
|
||||
|
|
@ -233,7 +233,7 @@ out:
|
|||
* name in our ref array. Find and remove the extended
|
||||
* inode ref then.
|
||||
*/
|
||||
return btrfs_del_inode_extref(trans, root, name, name_len,
|
||||
return btrfs_del_inode_extref(trans, root, name,
|
||||
inode_objectid, ref_objectid, index);
|
||||
}
|
||||
|
||||
|
|
@ -247,12 +247,13 @@ out:
|
|||
*/
|
||||
static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
const char *name, int name_len,
|
||||
u64 inode_objectid, u64 ref_objectid, u64 index)
|
||||
const struct fscrypt_str *name,
|
||||
u64 inode_objectid, u64 ref_objectid,
|
||||
u64 index)
|
||||
{
|
||||
struct btrfs_inode_extref *extref;
|
||||
int ret;
|
||||
int ins_len = name_len + sizeof(*extref);
|
||||
int ins_len = name->len + sizeof(*extref);
|
||||
unsigned long ptr;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
|
|
@ -260,7 +261,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
|
|||
|
||||
key.objectid = inode_objectid;
|
||||
key.type = BTRFS_INODE_EXTREF_KEY;
|
||||
key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
|
||||
key.offset = btrfs_extref_hash(ref_objectid, name->name, name->len);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
|
|
@ -272,7 +273,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
|
|||
if (btrfs_find_name_in_ext_backref(path->nodes[0],
|
||||
path->slots[0],
|
||||
ref_objectid,
|
||||
name, name_len))
|
||||
name))
|
||||
goto out;
|
||||
|
||||
btrfs_extend_item(path, ins_len);
|
||||
|
|
@ -286,12 +287,12 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
|
|||
ptr += btrfs_item_size(leaf, path->slots[0]) - ins_len;
|
||||
extref = (struct btrfs_inode_extref *)ptr;
|
||||
|
||||
btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len);
|
||||
btrfs_set_inode_extref_name_len(path->nodes[0], extref, name->len);
|
||||
btrfs_set_inode_extref_index(path->nodes[0], extref, index);
|
||||
btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid);
|
||||
|
||||
ptr = (unsigned long)&extref->name;
|
||||
write_extent_buffer(path->nodes[0], name, ptr, name_len);
|
||||
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
|
||||
btrfs_mark_buffer_dirty(path->nodes[0]);
|
||||
|
||||
out:
|
||||
|
|
@ -301,8 +302,7 @@ out:
|
|||
|
||||
/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
|
||||
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
const char *name, int name_len,
|
||||
struct btrfs_root *root, const struct fscrypt_str *name,
|
||||
u64 inode_objectid, u64 ref_objectid, u64 index)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
|
@ -311,7 +311,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_inode_ref *ref;
|
||||
unsigned long ptr;
|
||||
int ret;
|
||||
int ins_len = name_len + sizeof(*ref);
|
||||
int ins_len = name->len + sizeof(*ref);
|
||||
|
||||
key.objectid = inode_objectid;
|
||||
key.offset = ref_objectid;
|
||||
|
|
@ -327,7 +327,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
|
|||
if (ret == -EEXIST) {
|
||||
u32 old_size;
|
||||
ref = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
|
||||
name, name_len);
|
||||
name);
|
||||
if (ref)
|
||||
goto out;
|
||||
|
||||
|
|
@ -336,7 +336,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
|
|||
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
||||
struct btrfs_inode_ref);
|
||||
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
|
||||
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
|
||||
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len);
|
||||
btrfs_set_inode_ref_index(path->nodes[0], ref, index);
|
||||
ptr = (unsigned long)(ref + 1);
|
||||
ret = 0;
|
||||
|
|
@ -344,7 +344,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
|
|||
if (ret == -EOVERFLOW) {
|
||||
if (btrfs_find_name_in_backref(path->nodes[0],
|
||||
path->slots[0],
|
||||
name, name_len))
|
||||
name))
|
||||
ret = -EEXIST;
|
||||
else
|
||||
ret = -EMLINK;
|
||||
|
|
@ -353,11 +353,11 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
|
|||
} else {
|
||||
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
||||
struct btrfs_inode_ref);
|
||||
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
|
||||
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name->len);
|
||||
btrfs_set_inode_ref_index(path->nodes[0], ref, index);
|
||||
ptr = (unsigned long)(ref + 1);
|
||||
}
|
||||
write_extent_buffer(path->nodes[0], name, ptr, name_len);
|
||||
write_extent_buffer(path->nodes[0], name->name, ptr, name->len);
|
||||
btrfs_mark_buffer_dirty(path->nodes[0]);
|
||||
|
||||
out:
|
||||
|
|
@ -370,7 +370,6 @@ out:
|
|||
if (btrfs_super_incompat_flags(disk_super)
|
||||
& BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
|
||||
ret = btrfs_insert_inode_extref(trans, root, name,
|
||||
name_len,
|
||||
inode_objectid,
|
||||
ref_objectid, index);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -64,33 +64,31 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *root,
|
||||
struct btrfs_truncate_control *control);
|
||||
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
const char *name, int name_len,
|
||||
struct btrfs_root *root, const struct fscrypt_str *name,
|
||||
u64 inode_objectid, u64 ref_objectid, u64 index);
|
||||
int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
const char *name, int name_len,
|
||||
u64 inode_objectid, u64 ref_objectid, u64 *index);
|
||||
struct btrfs_root *root, const struct fscrypt_str *name,
|
||||
u64 inode_objectid, u64 ref_objectid, u64 *index);
|
||||
int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path, u64 objectid);
|
||||
int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
|
||||
*root, struct btrfs_path *path,
|
||||
int btrfs_lookup_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, struct btrfs_path *path,
|
||||
struct btrfs_key *location, int mod);
|
||||
|
||||
struct btrfs_inode_extref *btrfs_lookup_inode_extref(
|
||||
struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
const char *name, int name_len,
|
||||
const struct fscrypt_str *name,
|
||||
u64 inode_objectid, u64 ref_objectid, int ins_len,
|
||||
int cow);
|
||||
|
||||
struct btrfs_inode_ref *btrfs_find_name_in_backref(struct extent_buffer *leaf,
|
||||
int slot, const char *name,
|
||||
int name_len);
|
||||
int slot,
|
||||
const struct fscrypt_str *name);
|
||||
struct btrfs_inode_extref *btrfs_find_name_in_ext_backref(
|
||||
struct extent_buffer *leaf, int slot, u64 ref_objectid,
|
||||
const char *name, int name_len);
|
||||
const struct fscrypt_str *name);
|
||||
|
||||
#endif
|
||||
|
|
|
|||
267
fs/btrfs/inode.c
267
fs/btrfs/inode.c
|
|
@ -3627,7 +3627,7 @@ void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
|
|||
spin_unlock(&fs_info->delayed_iput_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* Wait for flushing all delayed iputs
|
||||
*
|
||||
* @fs_info: the filesystem
|
||||
|
|
@ -4272,7 +4272,7 @@ int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
|
|||
static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *dir,
|
||||
struct btrfs_inode *inode,
|
||||
const char *name, int name_len,
|
||||
const struct fscrypt_str *name,
|
||||
struct btrfs_rename_ctx *rename_ctx)
|
||||
{
|
||||
struct btrfs_root *root = dir->root;
|
||||
|
|
@ -4290,8 +4290,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
|
||||
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
|
||||
name, name_len, -1);
|
||||
di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1);
|
||||
if (IS_ERR_OR_NULL(di)) {
|
||||
ret = di ? PTR_ERR(di) : -ENOENT;
|
||||
goto err;
|
||||
|
|
@ -4319,12 +4318,11 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
}
|
||||
|
||||
ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
|
||||
dir_ino, &index);
|
||||
ret = btrfs_del_inode_ref(trans, root, name, ino, dir_ino, &index);
|
||||
if (ret) {
|
||||
btrfs_info(fs_info,
|
||||
"failed to delete reference to %.*s, inode %llu parent %llu",
|
||||
name_len, name, ino, dir_ino);
|
||||
name->len, name->name, ino, dir_ino);
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto err;
|
||||
}
|
||||
|
|
@ -4345,10 +4343,8 @@ skip_backref:
|
|||
* operations on the log tree, increasing latency for applications.
|
||||
*/
|
||||
if (!rename_ctx) {
|
||||
btrfs_del_inode_ref_in_log(trans, root, name, name_len, inode,
|
||||
dir_ino);
|
||||
btrfs_del_dir_entries_in_log(trans, root, name, name_len, dir,
|
||||
index);
|
||||
btrfs_del_inode_ref_in_log(trans, root, name, inode, dir_ino);
|
||||
btrfs_del_dir_entries_in_log(trans, root, name, dir, index);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -4366,7 +4362,7 @@ err:
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2);
|
||||
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
|
||||
inode_inc_iversion(&inode->vfs_inode);
|
||||
inode_inc_iversion(&dir->vfs_inode);
|
||||
inode->vfs_inode.i_ctime = current_time(&inode->vfs_inode);
|
||||
|
|
@ -4379,10 +4375,11 @@ out:
|
|||
|
||||
int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *dir, struct btrfs_inode *inode,
|
||||
const char *name, int name_len)
|
||||
const struct fscrypt_str *name)
|
||||
{
|
||||
int ret;
|
||||
ret = __btrfs_unlink_inode(trans, dir, inode, name, name_len, NULL);
|
||||
|
||||
ret = __btrfs_unlink_inode(trans, dir, inode, name, NULL);
|
||||
if (!ret) {
|
||||
drop_nlink(&inode->vfs_inode);
|
||||
ret = btrfs_update_inode(trans, inode->root, inode);
|
||||
|
|
@ -4418,29 +4415,39 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
struct btrfs_trans_handle *trans;
|
||||
struct inode *inode = d_inode(dentry);
|
||||
int ret;
|
||||
struct fscrypt_name fname;
|
||||
|
||||
ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* This needs to handle no-key deletions later on */
|
||||
|
||||
trans = __unlink_start_trans(dir);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto fscrypt_free;
|
||||
}
|
||||
|
||||
btrfs_record_unlink_dir(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
|
||||
0);
|
||||
|
||||
ret = btrfs_unlink_inode(trans, BTRFS_I(dir),
|
||||
BTRFS_I(d_inode(dentry)), dentry->d_name.name,
|
||||
dentry->d_name.len);
|
||||
ret = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
|
||||
&fname.disk_name);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto end_trans;
|
||||
|
||||
if (inode->i_nlink == 0) {
|
||||
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
|
||||
if (ret)
|
||||
goto out;
|
||||
goto end_trans;
|
||||
}
|
||||
|
||||
out:
|
||||
end_trans:
|
||||
btrfs_end_transaction(trans);
|
||||
btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info);
|
||||
fscrypt_free:
|
||||
fscrypt_free_filename(&fname);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -4453,12 +4460,17 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
|||
struct extent_buffer *leaf;
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_key key;
|
||||
const char *name = dentry->d_name.name;
|
||||
int name_len = dentry->d_name.len;
|
||||
u64 index;
|
||||
int ret;
|
||||
u64 objectid;
|
||||
u64 dir_ino = btrfs_ino(BTRFS_I(dir));
|
||||
struct fscrypt_name fname;
|
||||
|
||||
ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* This needs to handle no-key deletions later on */
|
||||
|
||||
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) {
|
||||
objectid = inode->root->root_key.objectid;
|
||||
|
|
@ -4466,15 +4478,18 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
|||
objectid = inode->location.objectid;
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
fscrypt_free_filename(&fname);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
|
||||
name, name_len, -1);
|
||||
&fname.disk_name, -1);
|
||||
if (IS_ERR_OR_NULL(di)) {
|
||||
ret = di ? PTR_ERR(di) : -ENOENT;
|
||||
goto out;
|
||||
|
|
@ -4500,8 +4515,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
|||
* call btrfs_del_root_ref, and it _shouldn't_ fail.
|
||||
*/
|
||||
if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
|
||||
di = btrfs_search_dir_index_item(root, path, dir_ino,
|
||||
name, name_len);
|
||||
di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
|
||||
if (IS_ERR_OR_NULL(di)) {
|
||||
if (!di)
|
||||
ret = -ENOENT;
|
||||
|
|
@ -4518,7 +4532,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
|||
} else {
|
||||
ret = btrfs_del_root_ref(trans, objectid,
|
||||
root->root_key.objectid, dir_ino,
|
||||
&index, name, name_len);
|
||||
&index, &fname.disk_name);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out;
|
||||
|
|
@ -4531,7 +4545,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
|
||||
btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2);
|
||||
btrfs_i_size_write(BTRFS_I(dir), dir->i_size - fname.disk_name.len * 2);
|
||||
inode_inc_iversion(dir);
|
||||
dir->i_mtime = current_time(dir);
|
||||
dir->i_ctime = dir->i_mtime;
|
||||
|
|
@ -4540,6 +4554,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
|
|||
btrfs_abort_transaction(trans, ret);
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
fscrypt_free_filename(&fname);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -4553,6 +4568,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
|
|||
struct btrfs_path *path;
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_key key;
|
||||
struct fscrypt_str name = FSTR_INIT("default", 7);
|
||||
u64 dir_id;
|
||||
int ret;
|
||||
|
||||
|
|
@ -4563,7 +4579,7 @@ static noinline int may_destroy_subvol(struct btrfs_root *root)
|
|||
/* Make sure this root isn't set as the default subvol */
|
||||
dir_id = btrfs_super_root_dir(fs_info->super_copy);
|
||||
di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path,
|
||||
dir_id, "default", 7, 0);
|
||||
dir_id, &name, 0);
|
||||
if (di && !IS_ERR(di)) {
|
||||
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
|
||||
if (key.objectid == root->root_key.objectid) {
|
||||
|
|
@ -4802,6 +4818,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
int err = 0;
|
||||
struct btrfs_trans_handle *trans;
|
||||
u64 last_unlink_trans;
|
||||
struct fscrypt_name fname;
|
||||
|
||||
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
|
||||
return -ENOTEMPTY;
|
||||
|
|
@ -4814,9 +4831,17 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
return btrfs_delete_subvolume(dir, dentry);
|
||||
}
|
||||
|
||||
err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* This needs to handle no-key deletions later on */
|
||||
|
||||
trans = __unlink_start_trans(dir);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
if (IS_ERR(trans)) {
|
||||
err = PTR_ERR(trans);
|
||||
goto out_notrans;
|
||||
}
|
||||
|
||||
if (unlikely(btrfs_ino(BTRFS_I(inode)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
|
||||
err = btrfs_unlink_subvol(trans, dir, dentry);
|
||||
|
|
@ -4830,9 +4855,8 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
|
||||
|
||||
/* now the directory is empty */
|
||||
err = btrfs_unlink_inode(trans, BTRFS_I(dir),
|
||||
BTRFS_I(d_inode(dentry)), dentry->d_name.name,
|
||||
dentry->d_name.len);
|
||||
err = btrfs_unlink_inode(trans, BTRFS_I(dir), BTRFS_I(d_inode(dentry)),
|
||||
&fname.disk_name);
|
||||
if (!err) {
|
||||
btrfs_i_size_write(BTRFS_I(inode), 0);
|
||||
/*
|
||||
|
|
@ -4851,7 +4875,9 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
}
|
||||
out:
|
||||
btrfs_end_transaction(trans);
|
||||
out_notrans:
|
||||
btrfs_btree_balance_dirty(fs_info);
|
||||
fscrypt_free_filename(&fname);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
@ -5532,19 +5558,24 @@ no_delete:
|
|||
static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
|
||||
struct btrfs_key *location, u8 *type)
|
||||
{
|
||||
const char *name = dentry->d_name.name;
|
||||
int namelen = dentry->d_name.len;
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_root *root = BTRFS_I(dir)->root;
|
||||
int ret = 0;
|
||||
struct fscrypt_name fname;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* This needs to handle no-key deletions later on */
|
||||
|
||||
di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(BTRFS_I(dir)),
|
||||
name, namelen, 0);
|
||||
&fname.disk_name, 0);
|
||||
if (IS_ERR_OR_NULL(di)) {
|
||||
ret = di ? PTR_ERR(di) : -ENOENT;
|
||||
goto out;
|
||||
|
|
@ -5556,12 +5587,13 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
|
|||
ret = -EUCLEAN;
|
||||
btrfs_warn(root->fs_info,
|
||||
"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
|
||||
__func__, name, btrfs_ino(BTRFS_I(dir)),
|
||||
__func__, fname.disk_name.name, btrfs_ino(BTRFS_I(dir)),
|
||||
location->objectid, location->type, location->offset);
|
||||
}
|
||||
if (!ret)
|
||||
*type = btrfs_dir_type(path->nodes[0], di);
|
||||
out:
|
||||
fscrypt_free_filename(&fname);
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -5584,6 +5616,11 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_key key;
|
||||
int ret;
|
||||
int err = 0;
|
||||
struct fscrypt_name fname;
|
||||
|
||||
ret = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
|
|
@ -5606,12 +5643,11 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
|
|||
leaf = path->nodes[0];
|
||||
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
|
||||
if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(BTRFS_I(dir)) ||
|
||||
btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
|
||||
btrfs_root_ref_name_len(leaf, ref) != fname.disk_name.len)
|
||||
goto out;
|
||||
|
||||
ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
|
||||
(unsigned long)(ref + 1),
|
||||
dentry->d_name.len);
|
||||
ret = memcmp_extent_buffer(leaf, fname.disk_name.name,
|
||||
(unsigned long)(ref + 1), fname.disk_name.len);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
@ -5630,6 +5666,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
|
|||
err = 0;
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
fscrypt_free_filename(&fname);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
@ -6238,9 +6275,18 @@ int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
|
|||
struct inode *inode = args->inode;
|
||||
int ret;
|
||||
|
||||
if (!args->orphan) {
|
||||
ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0,
|
||||
&args->fname);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
fscrypt_free_filename(&args->fname);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* 1 to add inode item */
|
||||
*trans_num_items = 1;
|
||||
|
|
@ -6280,6 +6326,7 @@ void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args)
|
|||
{
|
||||
posix_acl_release(args->acl);
|
||||
posix_acl_release(args->default_acl);
|
||||
fscrypt_free_filename(&args->fname);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -6315,8 +6362,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
|
|||
{
|
||||
struct inode *dir = args->dir;
|
||||
struct inode *inode = args->inode;
|
||||
const char *name = args->orphan ? NULL : args->dentry->d_name.name;
|
||||
int name_len = args->orphan ? 0 : args->dentry->d_name.len;
|
||||
const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name;
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_inode_item *inode_item;
|
||||
|
|
@ -6417,7 +6463,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
|
|||
sizes[1] = 2 + sizeof(*ref);
|
||||
} else {
|
||||
key[1].offset = btrfs_ino(BTRFS_I(dir));
|
||||
sizes[1] = name_len + sizeof(*ref);
|
||||
sizes[1] = name->len + sizeof(*ref);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -6456,10 +6502,12 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
|
|||
btrfs_set_inode_ref_index(path->nodes[0], ref, 0);
|
||||
write_extent_buffer(path->nodes[0], "..", ptr, 2);
|
||||
} else {
|
||||
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
|
||||
btrfs_set_inode_ref_name_len(path->nodes[0], ref,
|
||||
name->len);
|
||||
btrfs_set_inode_ref_index(path->nodes[0], ref,
|
||||
BTRFS_I(inode)->dir_index);
|
||||
write_extent_buffer(path->nodes[0], name, ptr, name_len);
|
||||
write_extent_buffer(path->nodes[0], name->name, ptr,
|
||||
name->len);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -6520,7 +6568,7 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
|
|||
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
|
||||
} else {
|
||||
ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
|
||||
name_len, 0, BTRFS_I(inode)->dir_index);
|
||||
0, BTRFS_I(inode)->dir_index);
|
||||
}
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
|
|
@ -6549,7 +6597,7 @@ out:
|
|||
*/
|
||||
int btrfs_add_link(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
|
||||
const char *name, int name_len, int add_backref, u64 index)
|
||||
const struct fscrypt_str *name, int add_backref, u64 index)
|
||||
{
|
||||
int ret = 0;
|
||||
struct btrfs_key key;
|
||||
|
|
@ -6568,17 +6616,17 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
|
|||
if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
|
||||
ret = btrfs_add_root_ref(trans, key.objectid,
|
||||
root->root_key.objectid, parent_ino,
|
||||
index, name, name_len);
|
||||
index, name);
|
||||
} else if (add_backref) {
|
||||
ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
|
||||
parent_ino, index);
|
||||
ret = btrfs_insert_inode_ref(trans, root, name,
|
||||
ino, parent_ino, index);
|
||||
}
|
||||
|
||||
/* Nothing to clean up yet */
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_insert_dir_item(trans, name, name_len, parent_inode, &key,
|
||||
ret = btrfs_insert_dir_item(trans, name, parent_inode, &key,
|
||||
btrfs_inode_type(&inode->vfs_inode), index);
|
||||
if (ret == -EEXIST || ret == -EOVERFLOW)
|
||||
goto fail_dir_item;
|
||||
|
|
@ -6588,7 +6636,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size +
|
||||
name_len * 2);
|
||||
name->len * 2);
|
||||
inode_inc_iversion(&parent_inode->vfs_inode);
|
||||
/*
|
||||
* If we are replaying a log tree, we do not want to update the mtime
|
||||
|
|
@ -6613,15 +6661,15 @@ fail_dir_item:
|
|||
int err;
|
||||
err = btrfs_del_root_ref(trans, key.objectid,
|
||||
root->root_key.objectid, parent_ino,
|
||||
&local_index, name, name_len);
|
||||
&local_index, name);
|
||||
if (err)
|
||||
btrfs_abort_transaction(trans, err);
|
||||
} else if (add_backref) {
|
||||
u64 local_index;
|
||||
int err;
|
||||
|
||||
err = btrfs_del_inode_ref(trans, root, name, name_len,
|
||||
ino, parent_ino, &local_index);
|
||||
err = btrfs_del_inode_ref(trans, root, name, ino, parent_ino,
|
||||
&local_index);
|
||||
if (err)
|
||||
btrfs_abort_transaction(trans, err);
|
||||
}
|
||||
|
|
@ -6704,6 +6752,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
|||
struct btrfs_root *root = BTRFS_I(dir)->root;
|
||||
struct inode *inode = d_inode(old_dentry);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
struct fscrypt_name fname;
|
||||
u64 index;
|
||||
int err;
|
||||
int drop_inode = 0;
|
||||
|
|
@ -6715,6 +6764,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
|||
if (inode->i_nlink >= BTRFS_LINK_MAX)
|
||||
return -EMLINK;
|
||||
|
||||
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
err = btrfs_set_inode_index(BTRFS_I(dir), &index);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
|
@ -6741,7 +6794,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
|||
set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
|
||||
|
||||
err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
|
||||
dentry->d_name.name, dentry->d_name.len, 1, index);
|
||||
&fname.disk_name, 1, index);
|
||||
|
||||
if (err) {
|
||||
drop_inode = 1;
|
||||
|
|
@ -6765,6 +6818,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
|
|||
}
|
||||
|
||||
fail:
|
||||
fscrypt_free_filename(&fname);
|
||||
if (trans)
|
||||
btrfs_end_transaction(trans);
|
||||
if (drop_inode) {
|
||||
|
|
@ -9037,6 +9091,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
int ret;
|
||||
int ret2;
|
||||
bool need_abort = false;
|
||||
struct fscrypt_name old_fname, new_fname;
|
||||
struct fscrypt_str *old_name, *new_name;
|
||||
|
||||
/*
|
||||
* For non-subvolumes allow exchange only within one subvolume, in the
|
||||
|
|
@ -9048,6 +9104,19 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
new_ino != BTRFS_FIRST_FREE_OBJECTID))
|
||||
return -EXDEV;
|
||||
|
||||
ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
|
||||
if (ret) {
|
||||
fscrypt_free_filename(&old_fname);
|
||||
return ret;
|
||||
}
|
||||
|
||||
old_name = &old_fname.disk_name;
|
||||
new_name = &new_fname.disk_name;
|
||||
|
||||
/* close the race window with snapshot create/destroy ioctl */
|
||||
if (old_ino == BTRFS_FIRST_FREE_OBJECTID ||
|
||||
new_ino == BTRFS_FIRST_FREE_OBJECTID)
|
||||
|
|
@ -9115,10 +9184,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
/* force full log commit if subvolume involved. */
|
||||
btrfs_set_log_full_commit(trans);
|
||||
} else {
|
||||
ret = btrfs_insert_inode_ref(trans, dest,
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len,
|
||||
old_ino,
|
||||
ret = btrfs_insert_inode_ref(trans, dest, new_name, old_ino,
|
||||
btrfs_ino(BTRFS_I(new_dir)),
|
||||
old_idx);
|
||||
if (ret)
|
||||
|
|
@ -9131,10 +9197,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
/* force full log commit if subvolume involved. */
|
||||
btrfs_set_log_full_commit(trans);
|
||||
} else {
|
||||
ret = btrfs_insert_inode_ref(trans, root,
|
||||
old_dentry->d_name.name,
|
||||
old_dentry->d_name.len,
|
||||
new_ino,
|
||||
ret = btrfs_insert_inode_ref(trans, root, old_name, new_ino,
|
||||
btrfs_ino(BTRFS_I(old_dir)),
|
||||
new_idx);
|
||||
if (ret) {
|
||||
|
|
@ -9169,9 +9232,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
} else { /* src is an inode */
|
||||
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
|
||||
BTRFS_I(old_dentry->d_inode),
|
||||
old_dentry->d_name.name,
|
||||
old_dentry->d_name.len,
|
||||
&old_rename_ctx);
|
||||
old_name, &old_rename_ctx);
|
||||
if (!ret)
|
||||
ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
|
||||
}
|
||||
|
|
@ -9186,9 +9247,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
} else { /* dest is an inode */
|
||||
ret = __btrfs_unlink_inode(trans, BTRFS_I(new_dir),
|
||||
BTRFS_I(new_dentry->d_inode),
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len,
|
||||
&new_rename_ctx);
|
||||
new_name, &new_rename_ctx);
|
||||
if (!ret)
|
||||
ret = btrfs_update_inode(trans, dest, BTRFS_I(new_inode));
|
||||
}
|
||||
|
|
@ -9198,16 +9257,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
|
|||
}
|
||||
|
||||
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len, 0, old_idx);
|
||||
new_name, 0, old_idx);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode),
|
||||
old_dentry->d_name.name,
|
||||
old_dentry->d_name.len, 0, new_idx);
|
||||
old_name, 0, new_idx);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_fail;
|
||||
|
|
@ -9250,6 +9307,8 @@ out_notrans:
|
|||
old_ino == BTRFS_FIRST_FREE_OBJECTID)
|
||||
up_read(&fs_info->subvol_sem);
|
||||
|
||||
fscrypt_free_filename(&new_fname);
|
||||
fscrypt_free_filename(&old_fname);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -9289,6 +9348,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
|
|||
int ret;
|
||||
int ret2;
|
||||
u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
|
||||
struct fscrypt_name old_fname, new_fname;
|
||||
|
||||
if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
|
||||
return -EPERM;
|
||||
|
|
@ -9305,22 +9365,28 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
|
|||
new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
|
||||
return -ENOTEMPTY;
|
||||
|
||||
ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname);
|
||||
if (ret) {
|
||||
fscrypt_free_filename(&old_fname);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* check for collisions, even if the name isn't there */
|
||||
ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len);
|
||||
|
||||
ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name);
|
||||
if (ret) {
|
||||
if (ret == -EEXIST) {
|
||||
/* we shouldn't get
|
||||
* eexist without a new_inode */
|
||||
if (WARN_ON(!new_inode)) {
|
||||
return ret;
|
||||
goto out_fscrypt_names;
|
||||
}
|
||||
} else {
|
||||
/* maybe -EOVERFLOW */
|
||||
return ret;
|
||||
goto out_fscrypt_names;
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
|
|
@ -9334,8 +9400,10 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
|
|||
|
||||
if (flags & RENAME_WHITEOUT) {
|
||||
whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir);
|
||||
if (!whiteout_args.inode)
|
||||
return -ENOMEM;
|
||||
if (!whiteout_args.inode) {
|
||||
ret = -ENOMEM;
|
||||
goto out_fscrypt_names;
|
||||
}
|
||||
ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
|
||||
if (ret)
|
||||
goto out_whiteout_inode;
|
||||
|
|
@ -9403,11 +9471,9 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
|
|||
/* force full log commit if subvolume involved. */
|
||||
btrfs_set_log_full_commit(trans);
|
||||
} else {
|
||||
ret = btrfs_insert_inode_ref(trans, dest,
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len,
|
||||
old_ino,
|
||||
btrfs_ino(BTRFS_I(new_dir)), index);
|
||||
ret = btrfs_insert_inode_ref(trans, dest, &new_fname.disk_name,
|
||||
old_ino, btrfs_ino(BTRFS_I(new_dir)),
|
||||
index);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
}
|
||||
|
|
@ -9429,10 +9495,8 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
|
|||
ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
|
||||
} else {
|
||||
ret = __btrfs_unlink_inode(trans, BTRFS_I(old_dir),
|
||||
BTRFS_I(d_inode(old_dentry)),
|
||||
old_dentry->d_name.name,
|
||||
old_dentry->d_name.len,
|
||||
&rename_ctx);
|
||||
BTRFS_I(d_inode(old_dentry)),
|
||||
&old_fname.disk_name, &rename_ctx);
|
||||
if (!ret)
|
||||
ret = btrfs_update_inode(trans, root, BTRFS_I(old_inode));
|
||||
}
|
||||
|
|
@ -9451,8 +9515,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
|
|||
} else {
|
||||
ret = btrfs_unlink_inode(trans, BTRFS_I(new_dir),
|
||||
BTRFS_I(d_inode(new_dentry)),
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len);
|
||||
&new_fname.disk_name);
|
||||
}
|
||||
if (!ret && new_inode->i_nlink == 0)
|
||||
ret = btrfs_orphan_add(trans,
|
||||
|
|
@ -9464,8 +9527,7 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
|
|||
}
|
||||
|
||||
ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode),
|
||||
new_dentry->d_name.name,
|
||||
new_dentry->d_name.len, 0, index);
|
||||
&new_fname.disk_name, 0, index);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
goto out_fail;
|
||||
|
|
@ -9500,6 +9562,9 @@ out_notrans:
|
|||
out_whiteout_inode:
|
||||
if (flags & RENAME_WHITEOUT)
|
||||
iput(whiteout_args.inode);
|
||||
out_fscrypt_names:
|
||||
fscrypt_free_filename(&old_fname);
|
||||
fscrypt_free_filename(&new_fname);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -951,6 +951,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
|
|||
struct inode *dir = d_inode(parent->dentry);
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
|
||||
struct dentry *dentry;
|
||||
struct fscrypt_str name_str = FSTR_INIT((char *)name, namelen);
|
||||
int error;
|
||||
|
||||
error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
|
||||
|
|
@ -971,8 +972,7 @@ static noinline int btrfs_mksubvol(const struct path *parent,
|
|||
* check for them now when we can safely fail
|
||||
*/
|
||||
error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
|
||||
dir->i_ino, name,
|
||||
namelen);
|
||||
dir->i_ino, &name_str);
|
||||
if (error)
|
||||
goto out_dput;
|
||||
|
||||
|
|
@ -3782,6 +3782,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
|
|||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_path *path = NULL;
|
||||
struct btrfs_disk_key disk_key;
|
||||
struct fscrypt_str name = FSTR_INIT("default", 7);
|
||||
u64 objectid = 0;
|
||||
u64 dir_id;
|
||||
int ret;
|
||||
|
|
@ -3825,7 +3826,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
|
|||
|
||||
dir_id = btrfs_super_root_dir(fs_info->super_copy);
|
||||
di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path,
|
||||
dir_id, "default", 7, 1);
|
||||
dir_id, &name, 1);
|
||||
if (IS_ERR_OR_NULL(di)) {
|
||||
btrfs_release_path(path);
|
||||
btrfs_end_transaction(trans);
|
||||
|
|
|
|||
|
|
@ -327,9 +327,8 @@ out:
|
|||
}
|
||||
|
||||
int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
||||
u64 ref_id, u64 dirid, u64 *sequence, const char *name,
|
||||
int name_len)
|
||||
|
||||
u64 ref_id, u64 dirid, u64 *sequence,
|
||||
const struct fscrypt_str *name)
|
||||
{
|
||||
struct btrfs_root *tree_root = trans->fs_info->tree_root;
|
||||
struct btrfs_path *path;
|
||||
|
|
@ -356,8 +355,8 @@ again:
|
|||
struct btrfs_root_ref);
|
||||
ptr = (unsigned long)(ref + 1);
|
||||
if ((btrfs_root_ref_dirid(leaf, ref) != dirid) ||
|
||||
(btrfs_root_ref_name_len(leaf, ref) != name_len) ||
|
||||
memcmp_extent_buffer(leaf, name, ptr, name_len)) {
|
||||
(btrfs_root_ref_name_len(leaf, ref) != name->len) ||
|
||||
memcmp_extent_buffer(leaf, name->name, ptr, name->len)) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
|
@ -400,8 +399,8 @@ out:
|
|||
* Will return 0, -ENOMEM, or anything from the CoW path
|
||||
*/
|
||||
int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
||||
u64 ref_id, u64 dirid, u64 sequence, const char *name,
|
||||
int name_len)
|
||||
u64 ref_id, u64 dirid, u64 sequence,
|
||||
const struct fscrypt_str *name)
|
||||
{
|
||||
struct btrfs_root *tree_root = trans->fs_info->tree_root;
|
||||
struct btrfs_key key;
|
||||
|
|
@ -420,7 +419,7 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
|
|||
key.offset = ref_id;
|
||||
again:
|
||||
ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
|
||||
sizeof(*ref) + name_len);
|
||||
sizeof(*ref) + name->len);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, ret);
|
||||
btrfs_free_path(path);
|
||||
|
|
@ -431,9 +430,9 @@ again:
|
|||
ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
|
||||
btrfs_set_root_ref_dirid(leaf, ref, dirid);
|
||||
btrfs_set_root_ref_sequence(leaf, ref, sequence);
|
||||
btrfs_set_root_ref_name_len(leaf, ref, name_len);
|
||||
btrfs_set_root_ref_name_len(leaf, ref, name->len);
|
||||
ptr = (unsigned long)(ref + 1);
|
||||
write_extent_buffer(leaf, name, ptr, name_len);
|
||||
write_extent_buffer(leaf, name->name, ptr, name->len);
|
||||
btrfs_mark_buffer_dirty(leaf);
|
||||
|
||||
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
|
||||
|
|
|
|||
|
|
@ -1596,13 +1596,17 @@ static int gen_unique_name(struct send_ctx *sctx,
|
|||
return -ENOMEM;
|
||||
|
||||
while (1) {
|
||||
struct fscrypt_str tmp_name;
|
||||
|
||||
len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
|
||||
ino, gen, idx);
|
||||
ASSERT(len < sizeof(tmp));
|
||||
tmp_name.name = tmp;
|
||||
tmp_name.len = strlen(tmp);
|
||||
|
||||
di = btrfs_lookup_dir_item(NULL, sctx->send_root,
|
||||
path, BTRFS_FIRST_FREE_OBJECTID,
|
||||
tmp, strlen(tmp), 0);
|
||||
&tmp_name, 0);
|
||||
btrfs_release_path(path);
|
||||
if (IS_ERR(di)) {
|
||||
ret = PTR_ERR(di);
|
||||
|
|
@ -1622,7 +1626,7 @@ static int gen_unique_name(struct send_ctx *sctx,
|
|||
|
||||
di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
|
||||
path, BTRFS_FIRST_FREE_OBJECTID,
|
||||
tmp, strlen(tmp), 0);
|
||||
&tmp_name, 0);
|
||||
btrfs_release_path(path);
|
||||
if (IS_ERR(di)) {
|
||||
ret = PTR_ERR(di);
|
||||
|
|
@ -1752,13 +1756,13 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
|
|||
struct btrfs_dir_item *di;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_path *path;
|
||||
struct fscrypt_str name_str = FSTR_INIT((char *)name, name_len);
|
||||
|
||||
path = alloc_path_for_send();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
di = btrfs_lookup_dir_item(NULL, root, path,
|
||||
dir, name, name_len, 0);
|
||||
di = btrfs_lookup_dir_item(NULL, root, path, dir, &name_str, 0);
|
||||
if (IS_ERR_OR_NULL(di)) {
|
||||
ret = di ? PTR_ERR(di) : -ENOENT;
|
||||
goto out;
|
||||
|
|
|
|||
|
|
@ -1399,6 +1399,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
|
|||
struct btrfs_dir_item *di;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key location;
|
||||
struct fscrypt_str name = FSTR_INIT("default", 7);
|
||||
u64 dir_id;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
|
|
@ -1411,7 +1412,7 @@ static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objec
|
|||
* to mount.
|
||||
*/
|
||||
dir_id = btrfs_super_root_dir(fs_info->super_copy);
|
||||
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
|
||||
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, &name, 0);
|
||||
if (IS_ERR(di)) {
|
||||
btrfs_free_path(path);
|
||||
return PTR_ERR(di);
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue