Merge f296bfd5cd ("Merge tag 'nfs-for-5.12-2' of git://git.linux-nfs.org/projects/anna/linux-nfs") into android-mainline
Steps on the way to 5.12-rc3 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ib12b73b1880ba7f89c14276b5cfcca936149b875
This commit is contained in:
commit
9bd8d344e5
122 changed files with 1231 additions and 874 deletions
|
|
@ -19179,7 +19179,7 @@ S: Maintained
|
|||
F: drivers/infiniband/hw/vmw_pvrdma/
|
||||
|
||||
VMware PVSCSI driver
|
||||
M: Jim Gill <jgill@vmware.com>
|
||||
M: Vishal Bhakta <vbhakta@vmware.com>
|
||||
M: VMware PV-Drivers <pv-drivers@vmware.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/memory.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/page.h>
|
||||
#include <xen/swiotlb-xen.h>
|
||||
|
||||
|
|
@ -109,7 +110,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
map_ops[i].status = GNTST_general_error;
|
||||
unmap.host_addr = map_ops[i].host_addr,
|
||||
unmap.handle = map_ops[i].handle;
|
||||
map_ops[i].handle = ~0;
|
||||
map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (map_ops[i].flags & GNTMAP_device_map)
|
||||
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
|
||||
else
|
||||
|
|
@ -130,7 +131,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
|
||||
|
||||
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||
struct gnttab_unmap_grant_ref *kunmap_ops,
|
||||
|
|
@ -145,7 +145,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
||||
|
||||
bool __set_phys_to_machine_multi(unsigned long pfn,
|
||||
unsigned long mfn, unsigned long nr_pages)
|
||||
|
|
|
|||
|
|
@ -1055,8 +1055,6 @@ config HW_PERF_EVENTS
|
|||
config SYS_SUPPORTS_HUGETLBFS
|
||||
def_bool y
|
||||
|
||||
config ARCH_WANT_HUGE_PMD_SHARE
|
||||
|
||||
config ARCH_HAS_CACHE_LINE_SIZE
|
||||
def_bool y
|
||||
|
||||
|
|
@ -1157,8 +1155,8 @@ config XEN
|
|||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int
|
||||
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
|
||||
default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
|
||||
default "14" if ARM64_64K_PAGES
|
||||
default "12" if ARM64_16K_PAGES
|
||||
default "11"
|
||||
help
|
||||
The kernel memory allocator divides physically contiguous memory
|
||||
|
|
@ -1855,12 +1853,6 @@ config CMDLINE_FROM_BOOTLOADER
|
|||
the boot loader doesn't provide any, the default kernel command
|
||||
string provided in CMDLINE will be used.
|
||||
|
||||
config CMDLINE_EXTEND
|
||||
bool "Extend bootloader kernel arguments"
|
||||
help
|
||||
The command-line arguments provided by the boot loader will be
|
||||
appended to the default kernel command string.
|
||||
|
||||
config CMDLINE_FORCE
|
||||
bool "Always use the default kernel command string"
|
||||
help
|
||||
|
|
|
|||
|
|
@ -328,6 +328,11 @@ static inline void *phys_to_virt(phys_addr_t x)
|
|||
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
|
||||
|
||||
#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
|
||||
#define page_to_virt(x) ({ \
|
||||
__typeof__(x) __page = x; \
|
||||
void *__addr = __va(page_to_phys(__page)); \
|
||||
(void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
|
||||
})
|
||||
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
|
||||
#else
|
||||
#define page_to_virt(x) ({ \
|
||||
|
|
|
|||
|
|
@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
|
|||
extern u64 idmap_t0sz;
|
||||
extern u64 idmap_ptrs_per_pgd;
|
||||
|
||||
static inline bool __cpu_uses_extended_idmap(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
|
||||
return false;
|
||||
|
||||
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
|
||||
}
|
||||
|
||||
/*
|
||||
* True if the extended ID map requires an extra level of translation table
|
||||
* to be configured.
|
||||
*/
|
||||
static inline bool __cpu_uses_extended_idmap_level(void)
|
||||
{
|
||||
return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure TCR.T0SZ is set to the provided value.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -66,7 +66,6 @@ extern bool arm64_use_ng_mappings;
|
|||
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
|
||||
#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
|
||||
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
|
||||
|
|
|
|||
|
|
@ -486,6 +486,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
|||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_device(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
|
||||
#define pgprot_tagged(prot) \
|
||||
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
|
||||
#define pgprot_mhp pgprot_tagged
|
||||
/*
|
||||
* DMA allocations for non-coherent devices use what the Arm architecture calls
|
||||
* "Normal non-cacheable" memory, which permits speculation, unaligned accesses
|
||||
|
|
|
|||
|
|
@ -796,6 +796,11 @@
|
|||
#define ID_AA64MMFR0_PARANGE_48 0x5
|
||||
#define ID_AA64MMFR0_PARANGE_52 0x6
|
||||
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
|
||||
#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
|
||||
#else
|
||||
|
|
@ -961,14 +966,17 @@
|
|||
#define ID_PFR1_PROGMOD_SHIFT 0
|
||||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
|
||||
#elif defined(CONFIG_ARM64_16K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
|
||||
#elif defined(CONFIG_ARM64_64K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
|
||||
#endif
|
||||
|
||||
#define MVFR2_FPMISC_SHIFT 4
|
||||
|
|
|
|||
|
|
@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
|
|||
*/
|
||||
adrp x5, __idmap_text_end
|
||||
clz x5, x5
|
||||
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
|
||||
cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
|
||||
b.ge 1f // .. then skip VA range extension
|
||||
|
||||
adr_l x6, idmap_t0sz
|
||||
|
|
@ -655,8 +655,10 @@ SYM_FUNC_END(__secondary_too_slow)
|
|||
SYM_FUNC_START(__enable_mmu)
|
||||
mrs x2, ID_AA64MMFR0_EL1
|
||||
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
|
||||
b.ne __no_granule_support
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
|
||||
b.lt __no_granule_support
|
||||
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
|
||||
b.gt __no_granule_support
|
||||
update_early_cpu_boot_status 0, x2, x3
|
||||
adrp x2, idmap_pg_dir
|
||||
phys_to_ttbr x1, x1
|
||||
|
|
|
|||
|
|
@ -163,33 +163,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
|
|||
} while (1);
|
||||
}
|
||||
|
||||
static __init const u8 *get_bootargs_cmdline(void)
|
||||
{
|
||||
const u8 *prop;
|
||||
void *fdt;
|
||||
int node;
|
||||
|
||||
fdt = get_early_fdt_ptr();
|
||||
if (!fdt)
|
||||
return NULL;
|
||||
|
||||
node = fdt_path_offset(fdt, "/chosen");
|
||||
if (node < 0)
|
||||
return NULL;
|
||||
|
||||
prop = fdt_getprop(fdt, node, "bootargs", NULL);
|
||||
if (!prop)
|
||||
return NULL;
|
||||
|
||||
return strlen(prop) ? prop : NULL;
|
||||
}
|
||||
|
||||
static __init void parse_cmdline(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
|
||||
const u8 *prop;
|
||||
void *fdt;
|
||||
int node;
|
||||
const u8 *prop = get_bootargs_cmdline();
|
||||
|
||||
fdt = get_early_fdt_ptr();
|
||||
if (!fdt)
|
||||
goto out;
|
||||
|
||||
node = fdt_path_offset(fdt, "/chosen");
|
||||
if (node < 0)
|
||||
goto out;
|
||||
|
||||
prop = fdt_getprop(fdt, node, "bootargs", NULL);
|
||||
if (!prop)
|
||||
goto out;
|
||||
if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
|
||||
__parse_cmdline(CONFIG_CMDLINE, true);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
|
||||
__parse_cmdline(prop, true);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
|
||||
return;
|
||||
}
|
||||
|
||||
out:
|
||||
__parse_cmdline(CONFIG_CMDLINE, true);
|
||||
}
|
||||
|
||||
/* Keep checkers quiet */
|
||||
|
|
|
|||
|
|
@ -460,7 +460,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
|
|||
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
|
||||
}
|
||||
|
||||
static inline u32 armv8pmu_read_evcntr(int idx)
|
||||
static inline u64 armv8pmu_read_evcntr(int idx)
|
||||
{
|
||||
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
|
||||
|
||||
|
|
|
|||
|
|
@ -311,16 +311,18 @@ int kvm_set_ipa_limit(void)
|
|||
}
|
||||
|
||||
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
|
||||
default:
|
||||
case 1:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
|
||||
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
|
||||
return -EINVAL;
|
||||
case 0:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
|
||||
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
|
||||
break;
|
||||
case 2:
|
||||
case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
|
||||
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
|
||||
break;
|
||||
default:
|
||||
kvm_err("Unsupported value for TGRAN_2, giving up\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
|
||||
|
|
|
|||
|
|
@ -219,17 +219,40 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
|
||||
int pfn_valid(unsigned long pfn)
|
||||
{
|
||||
phys_addr_t addr = pfn << PAGE_SHIFT;
|
||||
phys_addr_t addr = PFN_PHYS(pfn);
|
||||
|
||||
if ((addr >> PAGE_SHIFT) != pfn)
|
||||
/*
|
||||
* Ensure the upper PAGE_SHIFT bits are clear in the
|
||||
* pfn. Else it might lead to false positives when
|
||||
* some of the upper bits are set, but the lower bits
|
||||
* match a valid pfn.
|
||||
*/
|
||||
if (PHYS_PFN(addr) != pfn)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_SPARSEMEM
|
||||
{
|
||||
struct mem_section *ms;
|
||||
|
||||
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
|
||||
return 0;
|
||||
|
||||
if (!valid_section(__pfn_to_section(pfn)))
|
||||
ms = __pfn_to_section(pfn);
|
||||
if (!valid_section(ms))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* ZONE_DEVICE memory does not have the memblock entries.
|
||||
* memblock_is_map_memory() check for ZONE_DEVICE based
|
||||
* addresses will always fail. Even the normal hotplugged
|
||||
* memory will never have MEMBLOCK_NOMAP flag set in their
|
||||
* memblock entries. Skip memblock search for all non early
|
||||
* memory sections covering all of hotplug memory including
|
||||
* both normal and ZONE_DEVICE based.
|
||||
*/
|
||||
if (!early_section(ms))
|
||||
return pfn_section_valid(ms, pfn);
|
||||
}
|
||||
#endif
|
||||
return memblock_is_map_memory(addr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@
|
|||
#define NO_BLOCK_MAPPINGS BIT(0)
|
||||
#define NO_CONT_MAPPINGS BIT(1)
|
||||
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
|
||||
u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
|
||||
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
|
||||
|
||||
u64 __section(".mmuoff.data.write") vabits_actual;
|
||||
|
|
@ -512,7 +512,8 @@ static void __init map_mem(pgd_t *pgdp)
|
|||
* if MTE is present. Otherwise, it has the same attributes as
|
||||
* PAGE_KERNEL.
|
||||
*/
|
||||
__map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
|
||||
__map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
|
||||
flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -741,7 +741,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
map_ops[i].status = GNTST_general_error;
|
||||
unmap[0].host_addr = map_ops[i].host_addr,
|
||||
unmap[0].handle = map_ops[i].handle;
|
||||
map_ops[i].handle = ~0;
|
||||
map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (map_ops[i].flags & GNTMAP_device_map)
|
||||
unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
|
||||
else
|
||||
|
|
@ -751,7 +751,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
kmap_ops[i].status = GNTST_general_error;
|
||||
unmap[1].host_addr = kmap_ops[i].host_addr,
|
||||
unmap[1].handle = kmap_ops[i].handle;
|
||||
kmap_ops[i].handle = ~0;
|
||||
kmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (kmap_ops[i].flags & GNTMAP_device_map)
|
||||
unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
|
||||
else
|
||||
|
|
@ -776,7 +776,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
|
|||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
|
||||
|
||||
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
||||
struct gnttab_unmap_grant_ref *kunmap_ops,
|
||||
|
|
@ -802,7 +801,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
|
||||
|
||||
#ifdef CONFIG_XEN_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
|
|
|
|||
14
block/bio.c
14
block/bio.c
|
|
@ -33,7 +33,7 @@ static struct biovec_slab {
|
|||
{ .nr_vecs = 16, .name = "biovec-16" },
|
||||
{ .nr_vecs = 64, .name = "biovec-64" },
|
||||
{ .nr_vecs = 128, .name = "biovec-128" },
|
||||
{ .nr_vecs = BIO_MAX_PAGES, .name = "biovec-max" },
|
||||
{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
|
||||
};
|
||||
|
||||
static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
|
||||
|
|
@ -46,7 +46,7 @@ static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
|
|||
return &bvec_slabs[1];
|
||||
case 65 ... 128:
|
||||
return &bvec_slabs[2];
|
||||
case 129 ... BIO_MAX_PAGES:
|
||||
case 129 ... BIO_MAX_VECS:
|
||||
return &bvec_slabs[3];
|
||||
default:
|
||||
BUG();
|
||||
|
|
@ -151,9 +151,9 @@ out:
|
|||
|
||||
void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
|
||||
{
|
||||
BIO_BUG_ON(nr_vecs > BIO_MAX_PAGES);
|
||||
BIO_BUG_ON(nr_vecs > BIO_MAX_VECS);
|
||||
|
||||
if (nr_vecs == BIO_MAX_PAGES)
|
||||
if (nr_vecs == BIO_MAX_VECS)
|
||||
mempool_free(bv, pool);
|
||||
else if (nr_vecs > BIO_INLINE_VECS)
|
||||
kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
|
||||
|
|
@ -186,15 +186,15 @@ struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
|
|||
/*
|
||||
* Try a slab allocation first for all smaller allocations. If that
|
||||
* fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
|
||||
* The mempool is sized to handle up to BIO_MAX_PAGES entries.
|
||||
* The mempool is sized to handle up to BIO_MAX_VECS entries.
|
||||
*/
|
||||
if (*nr_vecs < BIO_MAX_PAGES) {
|
||||
if (*nr_vecs < BIO_MAX_VECS) {
|
||||
struct bio_vec *bvl;
|
||||
|
||||
bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
|
||||
if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
|
||||
return bvl;
|
||||
*nr_vecs = BIO_MAX_PAGES;
|
||||
*nr_vecs = BIO_MAX_VECS;
|
||||
}
|
||||
|
||||
return mempool_alloc(pool, gfp_mask);
|
||||
|
|
|
|||
|
|
@ -109,6 +109,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
|
|||
|
||||
lockdep_assert_held(&blkg->q->queue_lock);
|
||||
|
||||
memset(sum, 0, sizeof(*sum));
|
||||
rcu_read_lock();
|
||||
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
|
||||
struct blkg_rwstat *rwstat;
|
||||
|
|
@ -122,7 +123,7 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
|
|||
rwstat = (void *)pos_blkg + off;
|
||||
|
||||
for (i = 0; i < BLKG_RWSTAT_NR; i++)
|
||||
sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
|
||||
sum->cnt[i] += blkg_rwstat_read_counter(rwstat, i);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ static bool blk_crypto_split_bio_if_needed(struct bio **bio_ptr)
|
|||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
num_sectors += bv.bv_len >> SECTOR_SHIFT;
|
||||
if (++i == BIO_MAX_PAGES)
|
||||
if (++i == BIO_MAX_VECS)
|
||||
break;
|
||||
}
|
||||
if (num_sectors < bio_sectors(bio)) {
|
||||
|
|
|
|||
|
|
@ -296,7 +296,7 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
|
|||
{
|
||||
sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
|
||||
|
||||
return min(pages, (sector_t)BIO_MAX_PAGES);
|
||||
return min(pages, (sector_t)BIO_MAX_VECS);
|
||||
}
|
||||
|
||||
static int __blkdev_issue_zero_pages(struct block_device *bdev,
|
||||
|
|
|
|||
|
|
@ -249,7 +249,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
|
|||
if (!iov_iter_count(iter))
|
||||
return -EINVAL;
|
||||
|
||||
bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_PAGES));
|
||||
bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS));
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
bio->bi_opf |= req_op(rq);
|
||||
|
|
|
|||
|
|
@ -240,7 +240,7 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
|
|||
*/
|
||||
if (op == REQ_OP_ZONE_RESET &&
|
||||
blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
|
||||
bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
|
||||
bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -318,6 +318,22 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
|
||||
const struct blk_zone_range *zrange)
|
||||
{
|
||||
loff_t start, end;
|
||||
|
||||
if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
|
||||
zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
|
||||
/* Out of range */
|
||||
return -EINVAL;
|
||||
|
||||
start = zrange->sector << SECTOR_SHIFT;
|
||||
end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
|
||||
|
||||
return truncate_bdev_range(bdev, mode, start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
* BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
|
||||
* Called from blkdev_ioctl.
|
||||
|
|
@ -329,6 +345,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
struct request_queue *q;
|
||||
struct blk_zone_range zrange;
|
||||
enum req_opf op;
|
||||
int ret;
|
||||
|
||||
if (!argp)
|
||||
return -EINVAL;
|
||||
|
|
@ -352,6 +369,11 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
switch (cmd) {
|
||||
case BLKRESETZONE:
|
||||
op = REQ_OP_ZONE_RESET;
|
||||
|
||||
/* Invalidate the page cache, including dirty pages. */
|
||||
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
case BLKOPENZONE:
|
||||
op = REQ_OP_ZONE_OPEN;
|
||||
|
|
@ -366,8 +388,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
return -ENOTTY;
|
||||
}
|
||||
|
||||
return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
|
||||
GFP_KERNEL);
|
||||
ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
|
||||
GFP_KERNEL);
|
||||
|
||||
/*
|
||||
* Invalidate the page cache again for zone reset: writes can only be
|
||||
* direct for zoned devices so concurrent writes would not add any page
|
||||
* to the page cache after/during reset. The page cache may be filled
|
||||
* again due to concurrent reads though and dropping the pages for
|
||||
* these is fine.
|
||||
*/
|
||||
if (!ret && cmd == BLKRESETZONE)
|
||||
ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned long *blk_alloc_zone_bitmap(int node,
|
||||
|
|
|
|||
|
|
@ -229,10 +229,10 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
|
|||
* - The point of cloning the biovec is to produce a bio with a biovec
|
||||
* the caller can modify: bi_idx and bi_bvec_done should be 0.
|
||||
*
|
||||
* - The original bio could've had more than BIO_MAX_PAGES biovecs; if
|
||||
* - The original bio could've had more than BIO_MAX_VECS biovecs; if
|
||||
* we tried to clone the whole thing bio_alloc_bioset() would fail.
|
||||
* But the clone should succeed as long as the number of biovecs we
|
||||
* actually need to allocate is fewer than BIO_MAX_PAGES.
|
||||
* actually need to allocate is fewer than BIO_MAX_VECS.
|
||||
*
|
||||
* - Lastly, bi_vcnt should not be looked at or relied upon by code
|
||||
* that does not own the bio - reason being drivers don't use it for
|
||||
|
|
@ -299,7 +299,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
int sectors = 0;
|
||||
|
||||
bio_for_each_segment(from, *bio_orig, iter) {
|
||||
if (i++ < BIO_MAX_PAGES)
|
||||
if (i++ < BIO_MAX_VECS)
|
||||
sectors += from.bv_len >> 9;
|
||||
if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn)
|
||||
bounce = true;
|
||||
|
|
|
|||
|
|
@ -534,10 +534,8 @@ static void register_disk(struct device *parent, struct gendisk *disk,
|
|||
kobject_create_and_add("holders", &ddev->kobj);
|
||||
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
|
||||
|
||||
if (disk->flags & GENHD_FL_HIDDEN) {
|
||||
dev_set_uevent_suppress(ddev, 0);
|
||||
if (disk->flags & GENHD_FL_HIDDEN)
|
||||
return;
|
||||
}
|
||||
|
||||
disk_scan_partitions(disk);
|
||||
|
||||
|
|
|
|||
|
|
@ -938,6 +938,9 @@ int software_node_register(const struct software_node *node)
|
|||
if (software_node_to_swnode(node))
|
||||
return -EEXIST;
|
||||
|
||||
if (node->parent && !parent)
|
||||
return -EINVAL;
|
||||
|
||||
return PTR_ERR_OR_ZERO(swnode_register(node, parent, 0));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(software_node_register);
|
||||
|
|
@ -1002,25 +1005,33 @@ EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
|
|||
/**
|
||||
* device_add_software_node - Assign software node to a device
|
||||
* @dev: The device the software node is meant for.
|
||||
* @swnode: The software node.
|
||||
* @node: The software node.
|
||||
*
|
||||
* This function will register @swnode and make it the secondary firmware node
|
||||
* pointer of @dev. If @dev has no primary node, then @swnode will become the primary
|
||||
* node.
|
||||
* This function will make @node the secondary firmware node pointer of @dev. If
|
||||
* @dev has no primary node, then @node will become the primary node. The
|
||||
* function will register @node automatically if it wasn't already registered.
|
||||
*/
|
||||
int device_add_software_node(struct device *dev, const struct software_node *swnode)
|
||||
int device_add_software_node(struct device *dev, const struct software_node *node)
|
||||
{
|
||||
struct swnode *swnode;
|
||||
int ret;
|
||||
|
||||
/* Only one software node per device. */
|
||||
if (dev_to_swnode(dev))
|
||||
return -EBUSY;
|
||||
|
||||
ret = software_node_register(swnode);
|
||||
if (ret)
|
||||
return ret;
|
||||
swnode = software_node_to_swnode(node);
|
||||
if (swnode) {
|
||||
kobject_get(&swnode->kobj);
|
||||
} else {
|
||||
ret = software_node_register(node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
set_secondary_fwnode(dev, software_node_fwnode(swnode));
|
||||
swnode = software_node_to_swnode(node);
|
||||
}
|
||||
|
||||
set_secondary_fwnode(dev, &swnode->fwnode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1324,7 +1324,7 @@ struct bm_extent {
|
|||
* A followup commit may allow even bigger BIO sizes,
|
||||
* once we thought that through. */
|
||||
#define DRBD_MAX_BIO_SIZE (1U << 20)
|
||||
#if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
|
||||
#if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT)
|
||||
#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
|
||||
#endif
|
||||
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
|
||||
|
|
|
|||
|
|
@ -871,6 +871,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
|
|||
card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
|
||||
if (!card->event_wq) {
|
||||
dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
|
||||
st = -ENOMEM;
|
||||
goto failed_event_handler;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -877,6 +877,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
if (card->mm_pages[0].desc == NULL ||
|
||||
card->mm_pages[1].desc == NULL) {
|
||||
dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto failed_alloc;
|
||||
}
|
||||
reset_page(&card->mm_pages[0]);
|
||||
|
|
@ -888,8 +889,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
spin_lock_init(&card->lock);
|
||||
|
||||
card->queue = blk_alloc_queue(NUMA_NO_NODE);
|
||||
if (!card->queue)
|
||||
if (!card->queue) {
|
||||
ret = -ENOMEM;
|
||||
goto failed_alloc;
|
||||
}
|
||||
|
||||
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
|
||||
|
||||
|
|
|
|||
|
|
@ -103,6 +103,8 @@ static const struct of_device_id whitelist[] __initconst = {
|
|||
static const struct of_device_id blacklist[] __initconst = {
|
||||
{ .compatible = "allwinner,sun50i-h6", },
|
||||
|
||||
{ .compatible = "arm,vexpress", },
|
||||
|
||||
{ .compatible = "calxeda,highbank", },
|
||||
{ .compatible = "calxeda,ecx-2000", },
|
||||
|
||||
|
|
|
|||
|
|
@ -317,9 +317,9 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
}
|
||||
|
||||
base = ioremap(res->start, resource_size(res));
|
||||
if (IS_ERR(base)) {
|
||||
if (!base) {
|
||||
dev_err(dev, "failed to map resource %pR\n", res);
|
||||
ret = PTR_ERR(base);
|
||||
ret = -ENOMEM;
|
||||
goto release_region;
|
||||
}
|
||||
|
||||
|
|
@ -374,7 +374,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
|
|||
error:
|
||||
kfree(data);
|
||||
unmap_base:
|
||||
iounmap(data->base);
|
||||
iounmap(base);
|
||||
release_region:
|
||||
release_mem_region(res->start, resource_size(res));
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ efi_status_t check_platform_features(void)
|
|||
return EFI_SUCCESS;
|
||||
|
||||
tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
|
||||
if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
|
||||
if (tg < ID_AA64MMFR0_TGRAN_SUPPORTED_MIN || tg > ID_AA64MMFR0_TGRAN_SUPPORTED_MAX) {
|
||||
if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
|
||||
efi_err("This 64 KB granular kernel is not supported by your CPU\n");
|
||||
else
|
||||
|
|
|
|||
|
|
@ -965,7 +965,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
|
|||
q->limits.max_hw_sectors = UINT_MAX;
|
||||
q->limits.max_sectors = UINT_MAX;
|
||||
q->limits.max_segment_size = UINT_MAX;
|
||||
q->limits.max_segments = BIO_MAX_PAGES;
|
||||
q->limits.max_segments = BIO_MAX_VECS;
|
||||
blk_queue_max_discard_sectors(q, UINT_MAX);
|
||||
q->limits.discard_granularity = 512;
|
||||
q->limits.io_min = block_size;
|
||||
|
|
|
|||
|
|
@ -229,7 +229,7 @@ static DEFINE_SPINLOCK(dm_crypt_clients_lock);
|
|||
static unsigned dm_crypt_clients_n = 0;
|
||||
static volatile unsigned long dm_crypt_pages_per_client;
|
||||
#define DM_CRYPT_MEMORY_PERCENT 2
|
||||
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
|
||||
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
|
||||
|
||||
static void clone_init(struct dm_crypt_io *, struct bio *);
|
||||
static void kcryptd_queue_crypt(struct dm_crypt_io *io);
|
||||
|
|
@ -3246,7 +3246,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
|
||||
ARCH_KMALLOC_MINALIGN);
|
||||
|
||||
ret = mempool_init(&cc->page_pool, BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
|
||||
ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
|
||||
if (ret) {
|
||||
ti->error = "Cannot allocate page mempool";
|
||||
goto bad;
|
||||
|
|
@ -3373,9 +3373,9 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
|
|||
/*
|
||||
* Check if bio is too large, split as needed.
|
||||
*/
|
||||
if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
|
||||
if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_VECS << PAGE_SHIFT)) &&
|
||||
(bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
|
||||
dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
|
||||
dm_accept_partial_bio(bio, ((BIO_MAX_VECS << PAGE_SHIFT) >> SECTOR_SHIFT));
|
||||
|
||||
/*
|
||||
* Ensure that bio is a multiple of internal sector encryption size
|
||||
|
|
|
|||
|
|
@ -1892,10 +1892,10 @@ restart:
|
|||
list_add(&g->lru, &wbl.list);
|
||||
wbl.size++;
|
||||
g->write_in_progress = true;
|
||||
g->wc_list_contiguous = BIO_MAX_PAGES;
|
||||
g->wc_list_contiguous = BIO_MAX_VECS;
|
||||
f = g;
|
||||
e->wc_list_contiguous++;
|
||||
if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
|
||||
if (unlikely(e->wc_list_contiguous == BIO_MAX_VECS)) {
|
||||
if (unlikely(wc->writeback_all)) {
|
||||
next_node = rb_next(&f->rb_node);
|
||||
if (likely(next_node))
|
||||
|
|
|
|||
|
|
@ -735,7 +735,7 @@ static void r5l_submit_current_io(struct r5l_log *log)
|
|||
|
||||
static struct bio *r5l_bio_alloc(struct r5l_log *log)
|
||||
{
|
||||
struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, &log->bs);
|
||||
struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS, &log->bs);
|
||||
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
bio_set_dev(bio, log->rdev->bdev);
|
||||
|
|
@ -1634,7 +1634,7 @@ static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
|
|||
{
|
||||
struct page *page;
|
||||
|
||||
ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs);
|
||||
ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_VECS, &log->bs);
|
||||
if (!ctx->ra_bio)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
|||
|
|
@ -496,7 +496,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
|
|||
if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) {
|
||||
struct bio *prev = bio;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
|
||||
bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_VECS,
|
||||
&ppl_conf->bs);
|
||||
bio->bi_opf = prev->bi_opf;
|
||||
bio->bi_write_hint = prev->bi_write_hint;
|
||||
|
|
|
|||
|
|
@ -399,11 +399,6 @@ void mmc_remove_card(struct mmc_card *card)
|
|||
mmc_remove_card_debugfs(card);
|
||||
#endif
|
||||
|
||||
if (host->cqe_enabled) {
|
||||
host->cqe_ops->cqe_disable(host);
|
||||
host->cqe_enabled = false;
|
||||
}
|
||||
|
||||
if (mmc_card_present(card)) {
|
||||
if (mmc_host_is_spi(card->host)) {
|
||||
pr_info("%s: SPI card removed\n",
|
||||
|
|
@ -416,6 +411,10 @@ void mmc_remove_card(struct mmc_card *card)
|
|||
of_node_put(card->dev.of_node);
|
||||
}
|
||||
|
||||
if (host->cqe_enabled) {
|
||||
host->cqe_ops->cqe_disable(host);
|
||||
host->cqe_enabled = false;
|
||||
}
|
||||
|
||||
put_device(&card->dev);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -423,10 +423,6 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
|||
|
||||
/* EXT_CSD value is in units of 10ms, but we store in ms */
|
||||
card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
|
||||
/* Some eMMC set the value too low so set a minimum */
|
||||
if (card->ext_csd.part_time &&
|
||||
card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
|
||||
card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
|
||||
|
||||
/* Sleep / awake timeout in 100ns units */
|
||||
if (sa_shift > 0 && sa_shift <= 0x17)
|
||||
|
|
@ -616,6 +612,17 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
|
|||
card->ext_csd.data_sector_size = 512;
|
||||
}
|
||||
|
||||
/*
|
||||
* GENERIC_CMD6_TIME is to be used "unless a specific timeout is defined
|
||||
* when accessing a specific field", so use it here if there is no
|
||||
* PARTITION_SWITCH_TIME.
|
||||
*/
|
||||
if (!card->ext_csd.part_time)
|
||||
card->ext_csd.part_time = card->ext_csd.generic_cmd6_time;
|
||||
/* Some eMMC set the value too low so set a minimum */
|
||||
if (card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
|
||||
card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
|
||||
|
||||
/* eMMC v5 or later */
|
||||
if (card->ext_csd.rev >= 7) {
|
||||
memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
|
||||
|
|
|
|||
|
|
@ -1242,7 +1242,11 @@ mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
|
|||
if (!cmd->busy_timeout)
|
||||
cmd->busy_timeout = 10 * MSEC_PER_SEC;
|
||||
|
||||
clks = (unsigned long long)cmd->busy_timeout * host->cclk;
|
||||
if (cmd->busy_timeout > host->mmc->max_busy_timeout)
|
||||
clks = (unsigned long long)host->mmc->max_busy_timeout * host->cclk;
|
||||
else
|
||||
clks = (unsigned long long)cmd->busy_timeout * host->cclk;
|
||||
|
||||
do_div(clks, MSEC_PER_SEC);
|
||||
writel_relaxed(clks, host->base + MMCIDATATIMER);
|
||||
}
|
||||
|
|
@ -2151,6 +2155,10 @@ static int mmci_probe(struct amba_device *dev,
|
|||
mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
|
||||
}
|
||||
|
||||
/* Variants with mandatory busy timeout in HW needs R1B responses. */
|
||||
if (variant->busy_timeout)
|
||||
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
|
||||
|
||||
/* Prepare a CMD12 - needed to clear the DPSM on some variants. */
|
||||
host->stop_abort.opcode = MMC_STOP_TRANSMISSION;
|
||||
host->stop_abort.arg = 0;
|
||||
|
|
|
|||
|
|
@ -380,6 +380,7 @@ bool nvme_cancel_request(struct request *req, void *data, bool reserved)
|
|||
return true;
|
||||
|
||||
nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
|
||||
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
|
||||
blk_mq_complete_request(req);
|
||||
return true;
|
||||
}
|
||||
|
|
@ -1440,7 +1441,7 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
|
|||
goto out_free_id;
|
||||
}
|
||||
|
||||
error = -ENODEV;
|
||||
error = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
if ((*id)->ncap == 0) /* namespace not allocated or attached */
|
||||
goto out_free_id;
|
||||
|
||||
|
|
@ -4038,7 +4039,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid)
|
|||
static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
|
||||
{
|
||||
struct nvme_id_ns *id;
|
||||
int ret = -ENODEV;
|
||||
int ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
|
||||
if (test_bit(NVME_NS_DEAD, &ns->flags))
|
||||
goto out;
|
||||
|
|
@ -4047,7 +4048,7 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = -ENODEV;
|
||||
ret = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
if (!nvme_ns_ids_equal(&ns->head->ids, ids)) {
|
||||
dev_err(ns->ctrl->device,
|
||||
"identifiers changed for nsid %d\n", ns->head->ns_id);
|
||||
|
|
@ -4065,7 +4066,7 @@ out:
|
|||
*
|
||||
* TODO: we should probably schedule a delayed retry here.
|
||||
*/
|
||||
if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR)))
|
||||
if (ret > 0 && (ret & NVME_SC_DNR))
|
||||
nvme_ns_remove(ns);
|
||||
}
|
||||
|
||||
|
|
@ -4095,6 +4096,12 @@ static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
nsid);
|
||||
break;
|
||||
}
|
||||
if (!nvme_multi_css(ctrl)) {
|
||||
dev_warn(ctrl->device,
|
||||
"command set not reported for nsid: %d\n",
|
||||
nsid);
|
||||
break;
|
||||
}
|
||||
nvme_alloc_ns(ctrl, nsid, &ids);
|
||||
break;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -1956,7 +1956,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
|||
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
|
||||
|
||||
if (opstate == FCPOP_STATE_ABORTED)
|
||||
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
|
||||
status = cpu_to_le16(NVME_SC_HOST_ABORTED_CMD << 1);
|
||||
else if (freq->status) {
|
||||
status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
|
||||
dev_info(ctrl->ctrl.device,
|
||||
|
|
@ -2055,7 +2055,7 @@ done:
|
|||
nvme_fc_complete_rq(rq);
|
||||
|
||||
check_error:
|
||||
if (terminate_assoc)
|
||||
if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
|
||||
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
|
||||
}
|
||||
|
||||
|
|
@ -2443,6 +2443,7 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
|
|||
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
|
||||
|
||||
op->nreq.flags |= NVME_REQ_CANCELLED;
|
||||
__nvme_fc_abort_op(ctrl, op);
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3246,6 +3246,7 @@ static const struct pci_device_id nvme_id_table[] = {
|
|||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
|
||||
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
|
||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
|
||||
NVME_QUIRK_DISABLE_WRITE_ZEROES|
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
|
|
|
|||
|
|
@ -9,7 +9,13 @@
|
|||
|
||||
int nvme_revalidate_zones(struct nvme_ns *ns)
|
||||
{
|
||||
return blk_revalidate_disk_zones(ns->disk, NULL);
|
||||
struct request_queue *q = ns->queue;
|
||||
int ret;
|
||||
|
||||
ret = blk_revalidate_disk_zones(ns->disk, NULL);
|
||||
if (!ret)
|
||||
blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nvme_set_max_append(struct nvme_ctrl *ctrl)
|
||||
|
|
@ -107,7 +113,6 @@ int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
|
|||
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
|
||||
blk_queue_max_open_zones(q, le32_to_cpu(id->mor) + 1);
|
||||
blk_queue_max_active_zones(q, le32_to_cpu(id->mar) + 1);
|
||||
blk_queue_max_zone_append_sectors(q, ns->ctrl->max_zone_append);
|
||||
free_data:
|
||||
kfree(id);
|
||||
return status;
|
||||
|
|
|
|||
|
|
@ -50,9 +50,9 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
|
|||
|
||||
/*
|
||||
* nvmet_passthru_map_sg is limitted to using a single bio so limit
|
||||
* the mdts based on BIO_MAX_PAGES as well
|
||||
* the mdts based on BIO_MAX_VECS as well
|
||||
*/
|
||||
max_hw_sectors = min_not_zero(BIO_MAX_PAGES << (PAGE_SHIFT - 9),
|
||||
max_hw_sectors = min_not_zero(BIO_MAX_VECS << (PAGE_SHIFT - 9),
|
||||
max_hw_sectors);
|
||||
|
||||
page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
|
||||
|
|
@ -191,7 +191,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
|
|||
struct bio *bio;
|
||||
int i;
|
||||
|
||||
if (req->sg_cnt > BIO_MAX_PAGES)
|
||||
if (req->sg_cnt > BIO_MAX_VECS)
|
||||
return -EINVAL;
|
||||
|
||||
if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
|
||||
|
|
|
|||
|
|
@ -802,9 +802,8 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
|
|||
nvmet_req_uninit(&rsp->req);
|
||||
nvmet_rdma_release_rsp(rsp);
|
||||
if (wc->status != IB_WC_WR_FLUSH_ERR) {
|
||||
pr_info("RDMA WRITE for CQE 0x%p failed with status %s (%d).\n",
|
||||
wc->wr_cqe, ib_wc_status_msg(wc->status),
|
||||
wc->status);
|
||||
pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
|
||||
ib_wc_status_msg(wc->status), wc->status);
|
||||
nvmet_rdma_error_comp(queue);
|
||||
}
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -1492,7 +1492,11 @@ static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
|
|||
|
||||
mutex_lock(&opp_table->lock);
|
||||
list_for_each_entry(temp, &opp_table->opp_list, node) {
|
||||
if (dynamic == temp->dynamic) {
|
||||
/*
|
||||
* Refcount must be dropped only once for each OPP by OPP core,
|
||||
* do that with help of "removed" flag.
|
||||
*/
|
||||
if (!temp->removed && dynamic == temp->dynamic) {
|
||||
opp = temp;
|
||||
break;
|
||||
}
|
||||
|
|
@ -1502,10 +1506,27 @@ static struct dev_pm_opp *_opp_get_next(struct opp_table *opp_table,
|
|||
return opp;
|
||||
}
|
||||
|
||||
bool _opp_remove_all_static(struct opp_table *opp_table)
|
||||
/*
|
||||
* Can't call dev_pm_opp_put() from under the lock as debugfs removal needs to
|
||||
* happen lock less to avoid circular dependency issues. This routine must be
|
||||
* called without the opp_table->lock held.
|
||||
*/
|
||||
static void _opp_remove_all(struct opp_table *opp_table, bool dynamic)
|
||||
{
|
||||
struct dev_pm_opp *opp;
|
||||
|
||||
while ((opp = _opp_get_next(opp_table, dynamic))) {
|
||||
opp->removed = true;
|
||||
dev_pm_opp_put(opp);
|
||||
|
||||
/* Drop the references taken by dev_pm_opp_add() */
|
||||
if (dynamic)
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
}
|
||||
}
|
||||
|
||||
bool _opp_remove_all_static(struct opp_table *opp_table)
|
||||
{
|
||||
mutex_lock(&opp_table->lock);
|
||||
|
||||
if (!opp_table->parsed_static_opps) {
|
||||
|
|
@ -1520,13 +1541,7 @@ bool _opp_remove_all_static(struct opp_table *opp_table)
|
|||
|
||||
mutex_unlock(&opp_table->lock);
|
||||
|
||||
/*
|
||||
* Can't remove the OPP from under the lock, debugfs removal needs to
|
||||
* happen lock less to avoid circular dependency issues.
|
||||
*/
|
||||
while ((opp = _opp_get_next(opp_table, false)))
|
||||
dev_pm_opp_put(opp);
|
||||
|
||||
_opp_remove_all(opp_table, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -1539,25 +1554,12 @@ bool _opp_remove_all_static(struct opp_table *opp_table)
|
|||
void dev_pm_opp_remove_all_dynamic(struct device *dev)
|
||||
{
|
||||
struct opp_table *opp_table;
|
||||
struct dev_pm_opp *opp;
|
||||
int count = 0;
|
||||
|
||||
opp_table = _find_opp_table(dev);
|
||||
if (IS_ERR(opp_table))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Can't remove the OPP from under the lock, debugfs removal needs to
|
||||
* happen lock less to avoid circular dependency issues.
|
||||
*/
|
||||
while ((opp = _opp_get_next(opp_table, true))) {
|
||||
dev_pm_opp_put(opp);
|
||||
count++;
|
||||
}
|
||||
|
||||
/* Drop the references taken by dev_pm_opp_add() */
|
||||
while (count--)
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
_opp_remove_all(opp_table, true);
|
||||
|
||||
/* Drop the reference taken by _find_opp_table() */
|
||||
dev_pm_opp_put_opp_table(opp_table);
|
||||
|
|
|
|||
|
|
@ -56,6 +56,7 @@ extern struct list_head opp_tables, lazy_opp_tables;
|
|||
* @dynamic: not-created from static DT entries.
|
||||
* @turbo: true if turbo (boost) OPP
|
||||
* @suspend: true if suspend OPP
|
||||
* @removed: flag indicating that OPP's reference is dropped by OPP core.
|
||||
* @pstate: Device's power domain's performance state.
|
||||
* @rate: Frequency in hertz
|
||||
* @level: Performance level
|
||||
|
|
@ -78,6 +79,7 @@ struct dev_pm_opp {
|
|||
bool dynamic;
|
||||
bool turbo;
|
||||
bool suspend;
|
||||
bool removed;
|
||||
unsigned int pstate;
|
||||
unsigned long rate;
|
||||
unsigned int level;
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@
|
|||
#include <xen/platform_pci.h>
|
||||
|
||||
#include <asm/xen/swiotlb-xen.h>
|
||||
#define INVALID_GRANT_REF (0)
|
||||
|
||||
#define INVALID_EVTCHN (-1)
|
||||
|
||||
struct pci_bus_entry {
|
||||
|
|
@ -42,7 +42,7 @@ struct pcifront_device {
|
|||
struct list_head root_buses;
|
||||
|
||||
int evtchn;
|
||||
int gnt_ref;
|
||||
grant_ref_t gnt_ref;
|
||||
|
||||
int irq;
|
||||
|
||||
|
|
|
|||
|
|
@ -681,6 +681,7 @@ static int dmc620_pmu_device_probe(struct platform_device *pdev)
|
|||
if (!name) {
|
||||
dev_err(&pdev->dev,
|
||||
"Create name failed, PMU @%pa\n", &res->start);
|
||||
ret = -ENOMEM;
|
||||
goto out_teardown_dev;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ struct mt6315_chip {
|
|||
.type = REGULATOR_VOLTAGE, \
|
||||
.id = _bid, \
|
||||
.owner = THIS_MODULE, \
|
||||
.n_voltages = 0xbf, \
|
||||
.n_voltages = 0xc0, \
|
||||
.linear_ranges = mt_volt_range1, \
|
||||
.n_linear_ranges = ARRAY_SIZE(mt_volt_range1), \
|
||||
.vsel_reg = _vsel, \
|
||||
|
|
@ -69,7 +69,7 @@ static unsigned int mt6315_map_mode(u32 mode)
|
|||
case MT6315_BUCK_MODE_LP:
|
||||
return REGULATOR_MODE_IDLE;
|
||||
default:
|
||||
return -EINVAL;
|
||||
return REGULATOR_MODE_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -797,6 +797,14 @@ static int pca9450_i2c_probe(struct i2c_client *i2c,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */
|
||||
ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS,
|
||||
BUCK123_PRESET_EN);
|
||||
if (ret) {
|
||||
dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Set reset behavior on assertion of WDOG_B signal */
|
||||
ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL,
|
||||
WDOG_B_CFG_MASK, WDOG_B_CFG_COLD_LDO12);
|
||||
|
|
@ -814,7 +822,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c,
|
|||
|
||||
if (IS_ERR(pca9450->sd_vsel_gpio)) {
|
||||
dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
|
||||
return ret;
|
||||
return PTR_ERR(pca9450->sd_vsel_gpio);
|
||||
}
|
||||
|
||||
dev_info(&i2c->dev, "%s probed.\n",
|
||||
|
|
|
|||
|
|
@ -726,8 +726,8 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
|
|||
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
|
||||
.regulator_type = VRM,
|
||||
.ops = &rpmh_regulator_vrm_ops,
|
||||
.voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
|
||||
.n_voltages = 5,
|
||||
.voltage_range = REGULATOR_LINEAR_RANGE(320000, 0, 235, 16000),
|
||||
.n_voltages = 236,
|
||||
.pmic_mode_map = pmic_mode_map_pmic5_smps,
|
||||
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
|
||||
};
|
||||
|
|
@ -901,7 +901,7 @@ static const struct rpmh_vreg_init_data pm8350_vreg_data[] = {
|
|||
};
|
||||
|
||||
static const struct rpmh_vreg_init_data pm8350c_vreg_data[] = {
|
||||
RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps510, "vdd-s1"),
|
||||
RPMH_VREG("smps1", "smp%s1", &pmic5_hfsmps515, "vdd-s1"),
|
||||
RPMH_VREG("smps2", "smp%s2", &pmic5_ftsmps510, "vdd-s2"),
|
||||
RPMH_VREG("smps3", "smp%s3", &pmic5_ftsmps510, "vdd-s3"),
|
||||
RPMH_VREG("smps4", "smp%s4", &pmic5_ftsmps510, "vdd-s4"),
|
||||
|
|
|
|||
|
|
@ -153,9 +153,9 @@ static int rt4831_regulator_probe(struct platform_device *pdev)
|
|||
int i, ret;
|
||||
|
||||
regmap = dev_get_regmap(pdev->dev.parent, NULL);
|
||||
if (IS_ERR(regmap)) {
|
||||
if (!regmap) {
|
||||
dev_err(&pdev->dev, "Failed to init regmap\n");
|
||||
return PTR_ERR(regmap);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Configure DSV mode to normal by default */
|
||||
|
|
|
|||
|
|
@ -3052,7 +3052,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
basedev = block->base;
|
||||
spin_lock_irq(&dq->lock);
|
||||
if (basedev->state < DASD_STATE_READY) {
|
||||
if (basedev->state < DASD_STATE_READY ||
|
||||
test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
|
||||
DBF_DEV_EVENT(DBF_ERR, basedev,
|
||||
"device not ready for request %p", req);
|
||||
rc = BLK_STS_IOERR;
|
||||
|
|
@ -3487,8 +3488,6 @@ void dasd_generic_remove(struct ccw_device *cdev)
|
|||
struct dasd_device *device;
|
||||
struct dasd_block *block;
|
||||
|
||||
cdev->handler = NULL;
|
||||
|
||||
device = dasd_device_from_cdev(cdev);
|
||||
if (IS_ERR(device)) {
|
||||
dasd_remove_sysfs_files(cdev);
|
||||
|
|
@ -3507,6 +3506,7 @@ void dasd_generic_remove(struct ccw_device *cdev)
|
|||
* no quite down yet.
|
||||
*/
|
||||
dasd_set_target_state(device, DASD_STATE_NEW);
|
||||
cdev->handler = NULL;
|
||||
/* dasd_delete_device destroys the device reference. */
|
||||
block = device->block;
|
||||
dasd_delete_device(device);
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/bsg-lib.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/rtas.h>
|
||||
#include <asm/vio.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
|
@ -158,6 +159,9 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *);
|
|||
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
|
||||
static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
|
||||
|
||||
static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
|
||||
static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
|
||||
|
||||
static const char *unknown_error = "unknown error";
|
||||
|
||||
static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
|
||||
|
|
@ -899,6 +903,9 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
|
|||
{
|
||||
int rc = 0;
|
||||
struct vio_dev *vdev = to_vio_dev(vhost->dev);
|
||||
unsigned long flags;
|
||||
|
||||
ibmvfc_release_sub_crqs(vhost);
|
||||
|
||||
/* Re-enable the CRQ */
|
||||
do {
|
||||
|
|
@ -910,6 +917,15 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
|
|||
if (rc)
|
||||
dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
|
||||
|
||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||
spin_lock(vhost->crq.q_lock);
|
||||
vhost->do_enquiry = 1;
|
||||
vhost->using_channels = 0;
|
||||
spin_unlock(vhost->crq.q_lock);
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
|
||||
ibmvfc_init_sub_crqs(vhost);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
@ -926,8 +942,8 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
|
|||
unsigned long flags;
|
||||
struct vio_dev *vdev = to_vio_dev(vhost->dev);
|
||||
struct ibmvfc_queue *crq = &vhost->crq;
|
||||
struct ibmvfc_queue *scrq;
|
||||
int i;
|
||||
|
||||
ibmvfc_release_sub_crqs(vhost);
|
||||
|
||||
/* Close the CRQ */
|
||||
do {
|
||||
|
|
@ -947,16 +963,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
|
|||
memset(crq->msgs.crq, 0, PAGE_SIZE);
|
||||
crq->cur = 0;
|
||||
|
||||
if (vhost->scsi_scrqs.scrqs) {
|
||||
for (i = 0; i < nr_scsi_hw_queues; i++) {
|
||||
scrq = &vhost->scsi_scrqs.scrqs[i];
|
||||
spin_lock(scrq->q_lock);
|
||||
memset(scrq->msgs.scrq, 0, PAGE_SIZE);
|
||||
scrq->cur = 0;
|
||||
spin_unlock(scrq->q_lock);
|
||||
}
|
||||
}
|
||||
|
||||
/* And re-open it again */
|
||||
rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
|
||||
crq->msg_token, PAGE_SIZE);
|
||||
|
|
@ -966,9 +972,12 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
|
|||
dev_warn(vhost->dev, "Partner adapter not ready\n");
|
||||
else if (rc != 0)
|
||||
dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
|
||||
|
||||
spin_unlock(vhost->crq.q_lock);
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
|
||||
ibmvfc_init_sub_crqs(vhost);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
@ -5642,7 +5651,8 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
|
|||
rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
|
||||
&scrq->cookie, &scrq->hw_irq);
|
||||
|
||||
if (rc) {
|
||||
/* H_CLOSED indicates successful register, but no CRQ partner */
|
||||
if (rc && rc != H_CLOSED) {
|
||||
dev_warn(dev, "Error registering sub-crq: %d\n", rc);
|
||||
if (rc == H_PARAMETER)
|
||||
dev_warn_once(dev, "Firmware may not support MQ\n");
|
||||
|
|
@ -5675,8 +5685,8 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
|
|||
|
||||
irq_failed:
|
||||
do {
|
||||
plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
|
||||
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
|
||||
rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
|
||||
} while (rtas_busy_delay(rc));
|
||||
reg_failed:
|
||||
ibmvfc_free_queue(vhost, scrq);
|
||||
LEAVE;
|
||||
|
|
@ -5694,6 +5704,7 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
|
|||
|
||||
free_irq(scrq->irq, scrq);
|
||||
irq_dispose_mapping(scrq->irq);
|
||||
scrq->irq = 0;
|
||||
|
||||
do {
|
||||
rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
|
||||
|
|
@ -5707,17 +5718,21 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
|
|||
LEAVE;
|
||||
}
|
||||
|
||||
static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
|
||||
static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
ENTER;
|
||||
if (!vhost->mq_enabled)
|
||||
return;
|
||||
|
||||
vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
|
||||
sizeof(*vhost->scsi_scrqs.scrqs),
|
||||
GFP_KERNEL);
|
||||
if (!vhost->scsi_scrqs.scrqs)
|
||||
return -1;
|
||||
if (!vhost->scsi_scrqs.scrqs) {
|
||||
vhost->do_enquiry = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_scsi_hw_queues; i++) {
|
||||
if (ibmvfc_register_scsi_channel(vhost, i)) {
|
||||
|
|
@ -5726,13 +5741,12 @@ static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
|
|||
kfree(vhost->scsi_scrqs.scrqs);
|
||||
vhost->scsi_scrqs.scrqs = NULL;
|
||||
vhost->scsi_scrqs.active_queues = 0;
|
||||
LEAVE;
|
||||
return -1;
|
||||
vhost->do_enquiry = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
LEAVE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
|
||||
|
|
@ -5999,11 +6013,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|||
goto remove_shost;
|
||||
}
|
||||
|
||||
if (vhost->mq_enabled) {
|
||||
rc = ibmvfc_init_sub_crqs(vhost);
|
||||
if (rc)
|
||||
dev_warn(dev, "Failed to allocate Sub-CRQs. rc=%d\n", rc);
|
||||
}
|
||||
ibmvfc_init_sub_crqs(vhost);
|
||||
|
||||
if (shost_to_fc_host(shost)->rqst_q)
|
||||
blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
|
||||
|
|
|
|||
|
|
@ -253,12 +253,17 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
|
|||
{
|
||||
int ret = 0;
|
||||
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
|
||||
bool reenable_intr = false;
|
||||
|
||||
if (!host->core_reset) {
|
||||
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
reenable_intr = hba->is_irq_enabled;
|
||||
disable_irq(hba->irq);
|
||||
hba->is_irq_enabled = false;
|
||||
|
||||
ret = reset_control_assert(host->core_reset);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
|
||||
|
|
@ -280,6 +285,11 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
|
|||
|
||||
usleep_range(1000, 1100);
|
||||
|
||||
if (reenable_intr) {
|
||||
enable_irq(hba->irq);
|
||||
hba->is_irq_enabled = true;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -98,8 +98,6 @@
|
|||
16, 4, buf, __len, false); \
|
||||
} while (0)
|
||||
|
||||
static bool early_suspend;
|
||||
|
||||
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
|
||||
const char *prefix)
|
||||
{
|
||||
|
|
@ -1537,7 +1535,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
|
|||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
|
||||
return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
|
||||
}
|
||||
|
||||
static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
|
||||
|
|
@ -5008,6 +5006,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
|
|||
* UFS device needs urgent BKOPs.
|
||||
*/
|
||||
if (!hba->pm_op_in_progress &&
|
||||
!ufshcd_eh_in_progress(hba) &&
|
||||
ufshcd_is_exception_event(lrbp->ucd_rsp_ptr) &&
|
||||
schedule_work(&hba->eeh_work)) {
|
||||
/*
|
||||
|
|
@ -5808,13 +5807,20 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
|
|||
ufshcd_suspend_clkscaling(hba);
|
||||
ufshcd_clk_scaling_allow(hba, false);
|
||||
}
|
||||
ufshcd_scsi_block_requests(hba);
|
||||
/* Drain ufshcd_queuecommand() */
|
||||
down_write(&hba->clk_scaling_lock);
|
||||
up_write(&hba->clk_scaling_lock);
|
||||
cancel_work_sync(&hba->eeh_work);
|
||||
}
|
||||
|
||||
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
ufshcd_release(hba);
|
||||
if (ufshcd_is_clkscaling_supported(hba))
|
||||
ufshcd_clk_scaling_suspend(hba, false);
|
||||
ufshcd_clear_ua_wluns(hba);
|
||||
pm_runtime_put(hba->dev);
|
||||
}
|
||||
|
||||
|
|
@ -5906,8 +5912,8 @@ static void ufshcd_err_handler(struct work_struct *work)
|
|||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_err_handling_prepare(hba);
|
||||
spin_lock_irqsave(hba->host->host_lock, flags);
|
||||
ufshcd_scsi_block_requests(hba);
|
||||
hba->ufshcd_state = UFSHCD_STATE_RESET;
|
||||
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
|
||||
hba->ufshcd_state = UFSHCD_STATE_RESET;
|
||||
|
||||
/* Complete requests that have door-bell cleared by h/w */
|
||||
ufshcd_complete_requests(hba);
|
||||
|
|
@ -6066,12 +6072,8 @@ skip_err_handling:
|
|||
}
|
||||
ufshcd_clear_eh_in_progress(hba);
|
||||
spin_unlock_irqrestore(hba->host->host_lock, flags);
|
||||
ufshcd_scsi_unblock_requests(hba);
|
||||
ufshcd_err_handling_unprepare(hba);
|
||||
up(&hba->host_sem);
|
||||
|
||||
if (!err && needs_reset)
|
||||
ufshcd_clear_ua_wluns(hba);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -7882,6 +7884,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
|
|||
unsigned long flags;
|
||||
ktime_t start = ktime_get();
|
||||
|
||||
hba->ufshcd_state = UFSHCD_STATE_RESET;
|
||||
|
||||
ret = ufshcd_link_startup(hba);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
|
@ -8996,11 +9000,6 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
|
|||
int ret = 0;
|
||||
ktime_t start = ktime_get();
|
||||
|
||||
if (!hba) {
|
||||
early_suspend = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
down(&hba->host_sem);
|
||||
|
||||
if (!hba->is_powered)
|
||||
|
|
@ -9052,14 +9051,6 @@ int ufshcd_system_resume(struct ufs_hba *hba)
|
|||
int ret = 0;
|
||||
ktime_t start = ktime_get();
|
||||
|
||||
if (!hba)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(early_suspend)) {
|
||||
early_suspend = false;
|
||||
down(&hba->host_sem);
|
||||
}
|
||||
|
||||
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
|
||||
/*
|
||||
* Let the runtime resume take care of resuming
|
||||
|
|
@ -9092,9 +9083,6 @@ int ufshcd_runtime_suspend(struct ufs_hba *hba)
|
|||
int ret = 0;
|
||||
ktime_t start = ktime_get();
|
||||
|
||||
if (!hba)
|
||||
return -EINVAL;
|
||||
|
||||
if (!hba->is_powered)
|
||||
goto out;
|
||||
else
|
||||
|
|
@ -9133,9 +9121,6 @@ int ufshcd_runtime_resume(struct ufs_hba *hba)
|
|||
int ret = 0;
|
||||
ktime_t start = ktime_get();
|
||||
|
||||
if (!hba)
|
||||
return -EINVAL;
|
||||
|
||||
if (!hba->is_powered)
|
||||
goto out;
|
||||
else
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Maintained by: Jim Gill <jgill@vmware.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@
|
|||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Maintained by: Jim Gill <jgill@vmware.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _VMW_PVSCSI_H_
|
||||
|
|
|
|||
|
|
@ -47,6 +47,11 @@ static unsigned evtchn_2l_max_channels(void)
|
|||
return EVTCHN_2L_NR_CHANNELS;
|
||||
}
|
||||
|
||||
static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
|
||||
{
|
||||
clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
|
||||
}
|
||||
|
||||
static void evtchn_2l_bind_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
|
||||
unsigned int old_cpu)
|
||||
{
|
||||
|
|
@ -72,12 +77,6 @@ static bool evtchn_2l_is_pending(evtchn_port_t port)
|
|||
return sync_test_bit(port, BM(&s->evtchn_pending[0]));
|
||||
}
|
||||
|
||||
static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
|
||||
}
|
||||
|
||||
static void evtchn_2l_mask(evtchn_port_t port)
|
||||
{
|
||||
struct shared_info *s = HYPERVISOR_shared_info;
|
||||
|
|
@ -355,18 +354,27 @@ static void evtchn_2l_resume(void)
|
|||
EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
|
||||
}
|
||||
|
||||
static int evtchn_2l_percpu_deinit(unsigned int cpu)
|
||||
{
|
||||
memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
|
||||
EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct evtchn_ops evtchn_ops_2l = {
|
||||
.max_channels = evtchn_2l_max_channels,
|
||||
.nr_channels = evtchn_2l_max_channels,
|
||||
.remove = evtchn_2l_remove,
|
||||
.bind_to_cpu = evtchn_2l_bind_to_cpu,
|
||||
.clear_pending = evtchn_2l_clear_pending,
|
||||
.set_pending = evtchn_2l_set_pending,
|
||||
.is_pending = evtchn_2l_is_pending,
|
||||
.test_and_set_mask = evtchn_2l_test_and_set_mask,
|
||||
.mask = evtchn_2l_mask,
|
||||
.unmask = evtchn_2l_unmask,
|
||||
.handle_events = evtchn_2l_handle_events,
|
||||
.resume = evtchn_2l_resume,
|
||||
.percpu_deinit = evtchn_2l_percpu_deinit,
|
||||
};
|
||||
|
||||
void __init xen_evtchn_2l_init(void)
|
||||
|
|
|
|||
|
|
@ -98,13 +98,19 @@ struct irq_info {
|
|||
short refcnt;
|
||||
u8 spurious_cnt;
|
||||
u8 is_accounted;
|
||||
enum xen_irq_type type; /* type */
|
||||
short type; /* type: IRQT_* */
|
||||
u8 mask_reason; /* Why is event channel masked */
|
||||
#define EVT_MASK_REASON_EXPLICIT 0x01
|
||||
#define EVT_MASK_REASON_TEMPORARY 0x02
|
||||
#define EVT_MASK_REASON_EOI_PENDING 0x04
|
||||
u8 is_active; /* Is event just being handled? */
|
||||
unsigned irq;
|
||||
evtchn_port_t evtchn; /* event channel */
|
||||
unsigned short cpu; /* cpu bound */
|
||||
unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
|
||||
unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
|
||||
u64 eoi_time; /* Time in jiffies when to EOI. */
|
||||
spinlock_t lock;
|
||||
|
||||
union {
|
||||
unsigned short virq;
|
||||
|
|
@ -154,6 +160,7 @@ static DEFINE_RWLOCK(evtchn_rwlock);
|
|||
* evtchn_rwlock
|
||||
* IRQ-desc lock
|
||||
* percpu eoi_list_lock
|
||||
* irq_info->lock
|
||||
*/
|
||||
|
||||
static LIST_HEAD(xen_irq_list_head);
|
||||
|
|
@ -304,6 +311,8 @@ static int xen_irq_info_common_setup(struct irq_info *info,
|
|||
info->irq = irq;
|
||||
info->evtchn = evtchn;
|
||||
info->cpu = cpu;
|
||||
info->mask_reason = EVT_MASK_REASON_EXPLICIT;
|
||||
spin_lock_init(&info->lock);
|
||||
|
||||
ret = set_evtchn_to_irq(evtchn, irq);
|
||||
if (ret < 0)
|
||||
|
|
@ -377,6 +386,7 @@ static int xen_irq_info_pirq_setup(unsigned irq,
|
|||
static void xen_irq_info_cleanup(struct irq_info *info)
|
||||
{
|
||||
set_evtchn_to_irq(info->evtchn, -1);
|
||||
xen_evtchn_port_remove(info->evtchn, info->cpu);
|
||||
info->evtchn = 0;
|
||||
channels_on_cpu_dec(info);
|
||||
}
|
||||
|
|
@ -458,6 +468,34 @@ unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void do_mask(struct irq_info *info, u8 reason)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&info->lock, flags);
|
||||
|
||||
if (!info->mask_reason)
|
||||
mask_evtchn(info->evtchn);
|
||||
|
||||
info->mask_reason |= reason;
|
||||
|
||||
spin_unlock_irqrestore(&info->lock, flags);
|
||||
}
|
||||
|
||||
static void do_unmask(struct irq_info *info, u8 reason)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&info->lock, flags);
|
||||
|
||||
info->mask_reason &= ~reason;
|
||||
|
||||
if (!info->mask_reason)
|
||||
unmask_evtchn(info->evtchn);
|
||||
|
||||
spin_unlock_irqrestore(&info->lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static bool pirq_check_eoi_map(unsigned irq)
|
||||
{
|
||||
|
|
@ -604,7 +642,7 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
|
|||
}
|
||||
|
||||
info->eoi_time = 0;
|
||||
unmask_evtchn(evtchn);
|
||||
do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
|
||||
}
|
||||
|
||||
static void xen_irq_lateeoi_worker(struct work_struct *work)
|
||||
|
|
@ -773,6 +811,12 @@ static void xen_evtchn_close(evtchn_port_t port)
|
|||
BUG();
|
||||
}
|
||||
|
||||
static void event_handler_exit(struct irq_info *info)
|
||||
{
|
||||
smp_store_release(&info->is_active, 0);
|
||||
clear_evtchn(info->evtchn);
|
||||
}
|
||||
|
||||
static void pirq_query_unmask(int irq)
|
||||
{
|
||||
struct physdev_irq_status_query irq_status;
|
||||
|
|
@ -791,14 +835,15 @@ static void pirq_query_unmask(int irq)
|
|||
|
||||
static void eoi_pirq(struct irq_data *data)
|
||||
{
|
||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
||||
struct irq_info *info = info_for_irq(data->irq);
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
|
||||
int rc = 0;
|
||||
|
||||
if (!VALID_EVTCHN(evtchn))
|
||||
return;
|
||||
|
||||
clear_evtchn(evtchn);
|
||||
event_handler_exit(info);
|
||||
|
||||
if (pirq_needs_eoi(data->irq)) {
|
||||
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
|
||||
|
|
@ -849,7 +894,8 @@ static unsigned int __startup_pirq(unsigned int irq)
|
|||
goto err;
|
||||
|
||||
out:
|
||||
unmask_evtchn(evtchn);
|
||||
do_unmask(info, EVT_MASK_REASON_EXPLICIT);
|
||||
|
||||
eoi_pirq(irq_get_irq_data(irq));
|
||||
|
||||
return 0;
|
||||
|
|
@ -876,7 +922,7 @@ static void shutdown_pirq(struct irq_data *data)
|
|||
if (!VALID_EVTCHN(evtchn))
|
||||
return;
|
||||
|
||||
mask_evtchn(evtchn);
|
||||
do_mask(info, EVT_MASK_REASON_EXPLICIT);
|
||||
xen_evtchn_close(evtchn);
|
||||
xen_irq_info_cleanup(info);
|
||||
}
|
||||
|
|
@ -1628,6 +1674,8 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
|
|||
}
|
||||
|
||||
info = info_for_irq(irq);
|
||||
if (xchg_acquire(&info->is_active, 1))
|
||||
return;
|
||||
|
||||
dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
|
||||
if (dev)
|
||||
|
|
@ -1720,10 +1768,10 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
|
|||
}
|
||||
|
||||
/* Rebind an evtchn so that it gets delivered to a specific cpu */
|
||||
static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
|
||||
static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
|
||||
{
|
||||
struct evtchn_bind_vcpu bind_vcpu;
|
||||
int masked;
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
|
||||
if (!VALID_EVTCHN(evtchn))
|
||||
return -1;
|
||||
|
|
@ -1739,7 +1787,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
|
|||
* Mask the event while changing the VCPU binding to prevent
|
||||
* it being delivered on an unexpected VCPU.
|
||||
*/
|
||||
masked = test_and_set_mask(evtchn);
|
||||
do_mask(info, EVT_MASK_REASON_TEMPORARY);
|
||||
|
||||
/*
|
||||
* If this fails, it usually just indicates that we're dealing with a
|
||||
|
|
@ -1749,8 +1797,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
|
|||
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
|
||||
bind_evtchn_to_cpu(evtchn, tcpu, false);
|
||||
|
||||
if (!masked)
|
||||
unmask_evtchn(evtchn);
|
||||
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1789,7 +1836,7 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
|
|||
unsigned int tcpu = select_target_cpu(dest);
|
||||
int ret;
|
||||
|
||||
ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
|
||||
ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
|
||||
if (!ret)
|
||||
irq_data_update_effective_affinity(data, cpumask_of(tcpu));
|
||||
|
||||
|
|
@ -1798,28 +1845,29 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
|
|||
|
||||
static void enable_dynirq(struct irq_data *data)
|
||||
{
|
||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
||||
struct irq_info *info = info_for_irq(data->irq);
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
|
||||
if (VALID_EVTCHN(evtchn))
|
||||
unmask_evtchn(evtchn);
|
||||
do_unmask(info, EVT_MASK_REASON_EXPLICIT);
|
||||
}
|
||||
|
||||
static void disable_dynirq(struct irq_data *data)
|
||||
{
|
||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
||||
struct irq_info *info = info_for_irq(data->irq);
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
|
||||
if (VALID_EVTCHN(evtchn))
|
||||
mask_evtchn(evtchn);
|
||||
do_mask(info, EVT_MASK_REASON_EXPLICIT);
|
||||
}
|
||||
|
||||
static void ack_dynirq(struct irq_data *data)
|
||||
{
|
||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
||||
struct irq_info *info = info_for_irq(data->irq);
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
|
||||
if (!VALID_EVTCHN(evtchn))
|
||||
return;
|
||||
|
||||
clear_evtchn(evtchn);
|
||||
if (VALID_EVTCHN(evtchn))
|
||||
event_handler_exit(info);
|
||||
}
|
||||
|
||||
static void mask_ack_dynirq(struct irq_data *data)
|
||||
|
|
@ -1828,18 +1876,39 @@ static void mask_ack_dynirq(struct irq_data *data)
|
|||
ack_dynirq(data);
|
||||
}
|
||||
|
||||
static void lateeoi_ack_dynirq(struct irq_data *data)
|
||||
{
|
||||
struct irq_info *info = info_for_irq(data->irq);
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
|
||||
if (VALID_EVTCHN(evtchn)) {
|
||||
do_mask(info, EVT_MASK_REASON_EOI_PENDING);
|
||||
event_handler_exit(info);
|
||||
}
|
||||
}
|
||||
|
||||
static void lateeoi_mask_ack_dynirq(struct irq_data *data)
|
||||
{
|
||||
struct irq_info *info = info_for_irq(data->irq);
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
|
||||
if (VALID_EVTCHN(evtchn)) {
|
||||
do_mask(info, EVT_MASK_REASON_EXPLICIT);
|
||||
event_handler_exit(info);
|
||||
}
|
||||
}
|
||||
|
||||
static int retrigger_dynirq(struct irq_data *data)
|
||||
{
|
||||
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
|
||||
int masked;
|
||||
struct irq_info *info = info_for_irq(data->irq);
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
|
||||
if (!VALID_EVTCHN(evtchn))
|
||||
return 0;
|
||||
|
||||
masked = test_and_set_mask(evtchn);
|
||||
do_mask(info, EVT_MASK_REASON_TEMPORARY);
|
||||
set_evtchn(evtchn);
|
||||
if (!masked)
|
||||
unmask_evtchn(evtchn);
|
||||
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
@ -1938,10 +2007,11 @@ static void restore_cpu_ipis(unsigned int cpu)
|
|||
/* Clear an irq's pending state, in preparation for polling on it */
|
||||
void xen_clear_irq_pending(int irq)
|
||||
{
|
||||
evtchn_port_t evtchn = evtchn_from_irq(irq);
|
||||
struct irq_info *info = info_for_irq(irq);
|
||||
evtchn_port_t evtchn = info ? info->evtchn : 0;
|
||||
|
||||
if (VALID_EVTCHN(evtchn))
|
||||
clear_evtchn(evtchn);
|
||||
event_handler_exit(info);
|
||||
}
|
||||
EXPORT_SYMBOL(xen_clear_irq_pending);
|
||||
void xen_set_irq_pending(int irq)
|
||||
|
|
@ -2053,8 +2123,8 @@ static struct irq_chip xen_lateeoi_chip __read_mostly = {
|
|||
.irq_mask = disable_dynirq,
|
||||
.irq_unmask = enable_dynirq,
|
||||
|
||||
.irq_ack = mask_ack_dynirq,
|
||||
.irq_mask_ack = mask_ack_dynirq,
|
||||
.irq_ack = lateeoi_ack_dynirq,
|
||||
.irq_mask_ack = lateeoi_mask_ack_dynirq,
|
||||
|
||||
.irq_set_affinity = set_affinity_irq,
|
||||
.irq_retrigger = retrigger_dynirq,
|
||||
|
|
|
|||
|
|
@ -209,12 +209,6 @@ static bool evtchn_fifo_is_pending(evtchn_port_t port)
|
|||
return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
|
||||
}
|
||||
|
||||
static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port)
|
||||
{
|
||||
event_word_t *word = event_word_from_port(port);
|
||||
return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
|
||||
}
|
||||
|
||||
static void evtchn_fifo_mask(evtchn_port_t port)
|
||||
{
|
||||
event_word_t *word = event_word_from_port(port);
|
||||
|
|
@ -423,7 +417,6 @@ static const struct evtchn_ops evtchn_ops_fifo = {
|
|||
.clear_pending = evtchn_fifo_clear_pending,
|
||||
.set_pending = evtchn_fifo_set_pending,
|
||||
.is_pending = evtchn_fifo_is_pending,
|
||||
.test_and_set_mask = evtchn_fifo_test_and_set_mask,
|
||||
.mask = evtchn_fifo_mask,
|
||||
.unmask = evtchn_fifo_unmask,
|
||||
.handle_events = evtchn_fifo_handle_events,
|
||||
|
|
|
|||
|
|
@ -14,13 +14,13 @@ struct evtchn_ops {
|
|||
unsigned (*nr_channels)(void);
|
||||
|
||||
int (*setup)(evtchn_port_t port);
|
||||
void (*remove)(evtchn_port_t port, unsigned int cpu);
|
||||
void (*bind_to_cpu)(evtchn_port_t evtchn, unsigned int cpu,
|
||||
unsigned int old_cpu);
|
||||
|
||||
void (*clear_pending)(evtchn_port_t port);
|
||||
void (*set_pending)(evtchn_port_t port);
|
||||
bool (*is_pending)(evtchn_port_t port);
|
||||
bool (*test_and_set_mask)(evtchn_port_t port);
|
||||
void (*mask)(evtchn_port_t port);
|
||||
void (*unmask)(evtchn_port_t port);
|
||||
|
||||
|
|
@ -54,6 +54,13 @@ static inline int xen_evtchn_port_setup(evtchn_port_t evtchn)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
|
||||
unsigned int cpu)
|
||||
{
|
||||
if (evtchn_ops->remove)
|
||||
evtchn_ops->remove(evtchn, cpu);
|
||||
}
|
||||
|
||||
static inline void xen_evtchn_port_bind_to_cpu(evtchn_port_t evtchn,
|
||||
unsigned int cpu,
|
||||
unsigned int old_cpu)
|
||||
|
|
@ -76,11 +83,6 @@ static inline bool test_evtchn(evtchn_port_t port)
|
|||
return evtchn_ops->is_pending(port);
|
||||
}
|
||||
|
||||
static inline bool test_and_set_mask(evtchn_port_t port)
|
||||
{
|
||||
return evtchn_ops->test_and_set_mask(port);
|
||||
}
|
||||
|
||||
static inline void mask_evtchn(evtchn_port_t port)
|
||||
{
|
||||
return evtchn_ops->mask(port);
|
||||
|
|
|
|||
|
|
@ -133,20 +133,26 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
|
|||
if (NULL == add)
|
||||
return NULL;
|
||||
|
||||
add->grants = kvcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
|
||||
add->map_ops = kvcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
|
||||
add->unmap_ops = kvcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
|
||||
add->kmap_ops = kvcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
|
||||
add->kunmap_ops = kvcalloc(count,
|
||||
sizeof(add->kunmap_ops[0]), GFP_KERNEL);
|
||||
add->grants = kvmalloc_array(count, sizeof(add->grants[0]),
|
||||
GFP_KERNEL);
|
||||
add->map_ops = kvmalloc_array(count, sizeof(add->map_ops[0]),
|
||||
GFP_KERNEL);
|
||||
add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]),
|
||||
GFP_KERNEL);
|
||||
add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
|
||||
if (NULL == add->grants ||
|
||||
NULL == add->map_ops ||
|
||||
NULL == add->unmap_ops ||
|
||||
NULL == add->kmap_ops ||
|
||||
NULL == add->kunmap_ops ||
|
||||
NULL == add->pages)
|
||||
goto err;
|
||||
if (use_ptemod) {
|
||||
add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]),
|
||||
GFP_KERNEL);
|
||||
add->kunmap_ops = kvmalloc_array(count, sizeof(add->kunmap_ops[0]),
|
||||
GFP_KERNEL);
|
||||
if (NULL == add->kmap_ops || NULL == add->kunmap_ops)
|
||||
goto err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
|
||||
add->dma_flags = dma_flags;
|
||||
|
|
@ -183,10 +189,14 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
|
|||
goto err;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
add->map_ops[i].handle = -1;
|
||||
add->unmap_ops[i].handle = -1;
|
||||
add->kmap_ops[i].handle = -1;
|
||||
add->kunmap_ops[i].handle = -1;
|
||||
add->grants[i].domid = DOMID_INVALID;
|
||||
add->grants[i].ref = INVALID_GRANT_REF;
|
||||
add->map_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
add->unmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
if (use_ptemod) {
|
||||
add->kmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
add->kunmap_ops[i].handle = INVALID_GRANT_HANDLE;
|
||||
}
|
||||
}
|
||||
|
||||
add->index = 0;
|
||||
|
|
@ -274,7 +284,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data)
|
|||
map->grants[pgnr].ref,
|
||||
map->grants[pgnr].domid);
|
||||
gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
|
||||
-1 /* handle */);
|
||||
INVALID_GRANT_HANDLE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -292,7 +302,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
|
|||
|
||||
if (!use_ptemod) {
|
||||
/* Note: it could already be mapped */
|
||||
if (map->map_ops[0].handle != -1)
|
||||
if (map->map_ops[0].handle != INVALID_GRANT_HANDLE)
|
||||
return 0;
|
||||
for (i = 0; i < map->count; i++) {
|
||||
unsigned long addr = (unsigned long)
|
||||
|
|
@ -301,7 +311,7 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
|
|||
map->grants[i].ref,
|
||||
map->grants[i].domid);
|
||||
gnttab_set_unmap_op(&map->unmap_ops[i], addr,
|
||||
map->flags, -1 /* handle */);
|
||||
map->flags, INVALID_GRANT_HANDLE);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
|
|
@ -327,13 +337,13 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map)
|
|||
map->grants[i].ref,
|
||||
map->grants[i].domid);
|
||||
gnttab_set_unmap_op(&map->kunmap_ops[i], address,
|
||||
flags, -1);
|
||||
flags, INVALID_GRANT_HANDLE);
|
||||
}
|
||||
}
|
||||
|
||||
pr_debug("map %d+%d\n", map->index, map->count);
|
||||
err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
|
||||
map->pages, map->count);
|
||||
err = gnttab_map_refs(map->map_ops, map->kmap_ops, map->pages,
|
||||
map->count);
|
||||
|
||||
for (i = 0; i < map->count; i++) {
|
||||
if (map->map_ops[i].status == GNTST_okay)
|
||||
|
|
@ -385,7 +395,7 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
|
|||
pr_debug("unmap handle=%d st=%d\n",
|
||||
map->unmap_ops[offset+i].handle,
|
||||
map->unmap_ops[offset+i].status);
|
||||
map->unmap_ops[offset+i].handle = -1;
|
||||
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
|
@ -401,13 +411,15 @@ static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
|
|||
* already unmapped some of the grants. Only unmap valid ranges.
|
||||
*/
|
||||
while (pages && !err) {
|
||||
while (pages && map->unmap_ops[offset].handle == -1) {
|
||||
while (pages &&
|
||||
map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) {
|
||||
offset++;
|
||||
pages--;
|
||||
}
|
||||
range = 0;
|
||||
while (range < pages) {
|
||||
if (map->unmap_ops[offset+range].handle == -1)
|
||||
if (map->unmap_ops[offset + range].handle ==
|
||||
INVALID_GRANT_HANDLE)
|
||||
break;
|
||||
range++;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -118,13 +118,22 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
|
|||
if (!(mode & FMODE_EXCL)) {
|
||||
int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
|
||||
if (err)
|
||||
return err;
|
||||
goto invalidate;
|
||||
}
|
||||
|
||||
truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
bd_abort_claiming(bdev, truncate_bdev_range);
|
||||
return 0;
|
||||
|
||||
invalidate:
|
||||
/*
|
||||
* Someone else has handle exclusively open. Try invalidating instead.
|
||||
* The 'end' argument is inclusive so the rounding is safe.
|
||||
*/
|
||||
return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
|
||||
lstart >> PAGE_SHIFT,
|
||||
lend >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static void set_init_blocksize(struct block_device *bdev)
|
||||
|
|
@ -423,7 +432,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
|
|||
dio->size += bio->bi_iter.bi_size;
|
||||
pos += bio->bi_iter.bi_size;
|
||||
|
||||
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_PAGES);
|
||||
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
|
||||
if (!nr_pages) {
|
||||
bool polled = false;
|
||||
|
||||
|
|
@ -491,8 +500,8 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
|||
if (!iov_iter_count(iter))
|
||||
return 0;
|
||||
|
||||
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_PAGES + 1);
|
||||
if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES)
|
||||
nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
|
||||
if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_VECS)
|
||||
return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
|
||||
|
||||
return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
|
||||
|
|
|
|||
|
|
@ -3059,7 +3059,7 @@ struct bio *btrfs_bio_alloc(u64 first_byte)
|
|||
{
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &btrfs_bioset);
|
||||
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &btrfs_bioset);
|
||||
bio->bi_iter.bi_sector = first_byte >> 9;
|
||||
btrfs_io_bio_init(btrfs_io_bio(bio));
|
||||
return bio;
|
||||
|
|
|
|||
|
|
@ -1428,7 +1428,7 @@ static void scrub_recheck_block_on_raid56(struct btrfs_fs_info *fs_info,
|
|||
if (!first_page->dev->bdev)
|
||||
goto out;
|
||||
|
||||
bio = btrfs_io_bio_alloc(BIO_MAX_PAGES);
|
||||
bio = btrfs_io_bio_alloc(BIO_MAX_VECS);
|
||||
bio_set_dev(bio, first_page->dev->bdev);
|
||||
|
||||
for (page_num = 0; page_num < sblock->page_count; page_num++) {
|
||||
|
|
|
|||
|
|
@ -378,7 +378,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
|
|||
|
||||
attr = to_attr(dentry);
|
||||
if (!attr)
|
||||
goto out_put_item;
|
||||
goto out_free_buffer;
|
||||
|
||||
if (type & CONFIGFS_ITEM_BIN_ATTR) {
|
||||
buffer->bin_attr = to_bin_attr(dentry);
|
||||
|
|
@ -391,7 +391,7 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
|
|||
/* Grab the module reference for this attribute if we have one */
|
||||
error = -ENODEV;
|
||||
if (!try_module_get(buffer->owner))
|
||||
goto out_put_item;
|
||||
goto out_free_buffer;
|
||||
|
||||
error = -EACCES;
|
||||
if (!buffer->item->ci_type)
|
||||
|
|
@ -435,8 +435,6 @@ static int __configfs_open_file(struct inode *inode, struct file *file, int type
|
|||
|
||||
out_put_module:
|
||||
module_put(buffer->owner);
|
||||
out_put_item:
|
||||
config_item_put(buffer->item);
|
||||
out_free_buffer:
|
||||
up_read(&frag->frag_sem);
|
||||
kfree(buffer);
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
|
|||
int num_pages = 0;
|
||||
|
||||
/* This always succeeds since __GFP_DIRECT_RECLAIM is set. */
|
||||
bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
|
||||
bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
|
||||
|
||||
while (len) {
|
||||
unsigned int blocks_this_page = min(len, blocks_per_page);
|
||||
|
|
@ -74,7 +74,7 @@ static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode,
|
|||
len -= blocks_this_page;
|
||||
lblk += blocks_this_page;
|
||||
pblk += blocks_this_page;
|
||||
if (num_pages == BIO_MAX_PAGES || !len ||
|
||||
if (num_pages == BIO_MAX_VECS || !len ||
|
||||
!fscrypt_mergeable_bio(bio, inode, lblk)) {
|
||||
err = submit_bio_wait(bio);
|
||||
if (err)
|
||||
|
|
@ -126,7 +126,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
|
|||
return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk,
|
||||
len);
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS);
|
||||
nr_pages = min_t(unsigned int, ARRAY_SIZE(pages),
|
||||
(len + blocks_per_page - 1) >> blocks_per_page_bits);
|
||||
|
||||
|
|
|
|||
|
|
@ -1235,7 +1235,7 @@ submit_bio_retry:
|
|||
}
|
||||
|
||||
if (!bio) {
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
|
||||
|
||||
bio->bi_end_io = z_erofs_decompressqueue_endio;
|
||||
bio_set_dev(bio, sb->s_bdev);
|
||||
|
|
|
|||
|
|
@ -398,7 +398,7 @@ static void io_submit_init_bio(struct ext4_io_submit *io,
|
|||
* bio_alloc will _always_ be able to allocate a bio if
|
||||
* __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
|
||||
*/
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
||||
bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
|
||||
fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
|
||||
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
|
||||
bio_set_dev(bio, bh->b_bdev);
|
||||
|
|
|
|||
|
|
@ -292,7 +292,7 @@ void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
|
|||
f2fs_put_page(page, 0);
|
||||
|
||||
if (readahead)
|
||||
f2fs_ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
|
||||
f2fs_ra_meta_pages(sbi, index, BIO_MAX_VECS, META_POR, true);
|
||||
}
|
||||
|
||||
static int __f2fs_write_meta_page(struct page *page,
|
||||
|
|
|
|||
|
|
@ -862,7 +862,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
|
|||
f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
|
||||
alloc_new:
|
||||
if (!bio) {
|
||||
bio = __bio_alloc(fio, BIO_MAX_PAGES);
|
||||
bio = __bio_alloc(fio, BIO_MAX_VECS);
|
||||
__attach_io_flag(fio);
|
||||
f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
|
||||
fio->page->index, fio, GFP_NOIO);
|
||||
|
|
@ -937,7 +937,7 @@ alloc_new:
|
|||
fio->retry = true;
|
||||
goto skip;
|
||||
}
|
||||
io->bio = __bio_alloc(fio, BIO_MAX_PAGES);
|
||||
io->bio = __bio_alloc(fio, BIO_MAX_VECS);
|
||||
f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
|
||||
bio_page->index, fio, GFP_NOIO);
|
||||
io->fio = *fio;
|
||||
|
|
|
|||
|
|
@ -4381,7 +4381,7 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
|
|||
block_t total_node_blocks = 0;
|
||||
|
||||
do {
|
||||
readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
|
||||
readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_VECS,
|
||||
META_SIT, true);
|
||||
|
||||
start = start_blk * sit_i->sents_per_block;
|
||||
|
|
|
|||
|
|
@ -851,7 +851,7 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
|
|||
else if (type == NODE)
|
||||
return 8 * sbi->blocks_per_seg;
|
||||
else if (type == META)
|
||||
return 8 * BIO_MAX_PAGES;
|
||||
return 8 * BIO_MAX_VECS;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -868,7 +868,7 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
|
|||
return 0;
|
||||
|
||||
nr_to_write = wbc->nr_to_write;
|
||||
desired = BIO_MAX_PAGES;
|
||||
desired = BIO_MAX_VECS;
|
||||
if (type == NODE)
|
||||
desired <<= 1;
|
||||
|
||||
|
|
|
|||
|
|
@ -753,9 +753,9 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
|||
case Opt_io_size_bits:
|
||||
if (args->from && match_int(args, &arg))
|
||||
return -EINVAL;
|
||||
if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
|
||||
if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
|
||||
f2fs_warn(sbi, "Not support %d, larger than %d",
|
||||
1 << arg, BIO_MAX_PAGES);
|
||||
1 << arg, BIO_MAX_VECS);
|
||||
return -EINVAL;
|
||||
}
|
||||
F2FS_OPTION(sbi).write_io_size_bits = arg;
|
||||
|
|
|
|||
|
|
@ -998,12 +998,16 @@ static void trans_drain(struct gfs2_trans *tr)
|
|||
while (!list_empty(head)) {
|
||||
bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
|
||||
list_del_init(&bd->bd_list);
|
||||
if (!list_empty(&bd->bd_ail_st_list))
|
||||
gfs2_remove_from_ail(bd);
|
||||
kmem_cache_free(gfs2_bufdata_cachep, bd);
|
||||
}
|
||||
head = &tr->tr_databuf;
|
||||
while (!list_empty(head)) {
|
||||
bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
|
||||
list_del_init(&bd->bd_list);
|
||||
if (!list_empty(&bd->bd_ail_st_list))
|
||||
gfs2_remove_from_ail(bd);
|
||||
kmem_cache_free(gfs2_bufdata_cachep, bd);
|
||||
}
|
||||
}
|
||||
|
|
@ -1032,7 +1036,7 @@ repeat:
|
|||
* Do this check while holding the log_flush_lock to prevent new
|
||||
* buffers from being added to the ail via gfs2_pin()
|
||||
*/
|
||||
if (gfs2_withdrawn(sdp))
|
||||
if (gfs2_withdrawn(sdp) || !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
|
||||
goto out;
|
||||
|
||||
/* Log might have been flushed while we waited for the flush lock */
|
||||
|
|
|
|||
|
|
@ -267,7 +267,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
|
|||
bio_end_io_t *end_io)
|
||||
{
|
||||
struct super_block *sb = sdp->sd_vfs;
|
||||
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
|
||||
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_VECS);
|
||||
|
||||
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
|
||||
bio_set_dev(bio, sb->s_bdev);
|
||||
|
|
|
|||
|
|
@ -1539,9 +1539,7 @@ static int gfs2_reconfigure(struct fs_context *fc)
|
|||
return -EINVAL;
|
||||
|
||||
if (fc->sb_flags & SB_RDONLY) {
|
||||
error = gfs2_make_fs_ro(sdp);
|
||||
if (error)
|
||||
errorfc(fc, "unable to remount read-only");
|
||||
gfs2_make_fs_ro(sdp);
|
||||
} else {
|
||||
error = gfs2_make_fs_rw(sdp);
|
||||
if (error)
|
||||
|
|
|
|||
|
|
@ -587,9 +587,8 @@ out:
|
|||
* Returns: errno
|
||||
*/
|
||||
|
||||
int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
||||
void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
||||
{
|
||||
int error = 0;
|
||||
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
|
||||
gfs2_flush_delete_work(sdp);
|
||||
|
|
@ -624,8 +623,6 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
|||
|
||||
if (!log_write_allowed)
|
||||
sdp->sd_vfs->s_flags |= SB_RDONLY;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -637,7 +634,6 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
|
|||
static void gfs2_put_super(struct super_block *sb)
|
||||
{
|
||||
struct gfs2_sbd *sdp = sb->s_fs_info;
|
||||
int error;
|
||||
struct gfs2_jdesc *jd;
|
||||
|
||||
/* No more recovery requests */
|
||||
|
|
@ -658,9 +654,7 @@ restart:
|
|||
spin_unlock(&sdp->sd_jindex_spin);
|
||||
|
||||
if (!sb_rdonly(sb)) {
|
||||
error = gfs2_make_fs_ro(sdp);
|
||||
if (error)
|
||||
gfs2_io_error(sdp);
|
||||
gfs2_make_fs_ro(sdp);
|
||||
}
|
||||
WARN_ON(gfs2_withdrawing(sdp));
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
|
|||
struct gfs2_inode **ipp);
|
||||
|
||||
extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_make_fs_ro(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
|
||||
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
|
||||
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
|
||||
|
|
|
|||
|
|
@ -169,6 +169,8 @@ static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
|
|||
bd->bd_bh = bh;
|
||||
bd->bd_gl = gl;
|
||||
INIT_LIST_HEAD(&bd->bd_list);
|
||||
INIT_LIST_HEAD(&bd->bd_ail_st_list);
|
||||
INIT_LIST_HEAD(&bd->bd_ail_gl_list);
|
||||
bh->b_private = bd;
|
||||
return bd;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -119,17 +119,22 @@ void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
|
|||
static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
||||
{
|
||||
struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
|
||||
struct inode *inode = sdp->sd_jdesc->jd_inode;
|
||||
struct gfs2_inode *ip = GFS2_I(inode);
|
||||
struct gfs2_glock *i_gl = ip->i_gl;
|
||||
u64 no_formal_ino = ip->i_no_formal_ino;
|
||||
struct inode *inode;
|
||||
struct gfs2_inode *ip;
|
||||
struct gfs2_glock *i_gl;
|
||||
u64 no_formal_ino;
|
||||
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
|
||||
int ret = 0;
|
||||
int tries;
|
||||
|
||||
if (test_bit(SDF_NORECOVERY, &sdp->sd_flags))
|
||||
if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
|
||||
return;
|
||||
|
||||
inode = sdp->sd_jdesc->jd_inode;
|
||||
ip = GFS2_I(inode);
|
||||
i_gl = ip->i_gl;
|
||||
no_formal_ino = ip->i_no_formal_ino;
|
||||
|
||||
/* Prevent any glock dq until withdraw recovery is complete */
|
||||
set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
|
||||
/*
|
||||
|
|
@ -156,7 +161,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
|
|||
ret = 0;
|
||||
}
|
||||
if (!ret)
|
||||
ret = gfs2_make_fs_ro(sdp);
|
||||
gfs2_make_fs_ro(sdp);
|
||||
gfs2_freeze_unlock(&freeze_gh);
|
||||
}
|
||||
|
||||
|
|
|
|||
25
fs/io-wq.c
25
fs/io-wq.c
|
|
@ -110,7 +110,6 @@ struct io_wq {
|
|||
io_wq_work_fn *do_work;
|
||||
|
||||
struct task_struct *manager;
|
||||
struct user_struct *user;
|
||||
|
||||
struct io_wq_hash *hash;
|
||||
|
||||
|
|
@ -592,7 +591,7 @@ static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
|
|||
tsk->pf_io_worker = worker;
|
||||
worker->task = tsk;
|
||||
set_cpus_allowed_ptr(tsk, cpumask_of_node(wqe->node));
|
||||
tsk->flags |= PF_NOFREEZE | PF_NO_SETAFFINITY;
|
||||
tsk->flags |= PF_NO_SETAFFINITY;
|
||||
|
||||
raw_spin_lock_irq(&wqe->lock);
|
||||
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
|
||||
|
|
@ -710,7 +709,6 @@ static int io_wq_manager(void *data)
|
|||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
io_wq_check_workers(wq);
|
||||
schedule_timeout(HZ);
|
||||
try_to_freeze();
|
||||
if (fatal_signal_pending(current))
|
||||
set_bit(IO_WQ_BIT_EXIT, &wq->state);
|
||||
} while (!test_bit(IO_WQ_BIT_EXIT, &wq->state));
|
||||
|
|
@ -722,9 +720,9 @@ static int io_wq_manager(void *data)
|
|||
io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* we might not ever have created any workers */
|
||||
if (atomic_read(&wq->worker_refs))
|
||||
wait_for_completion(&wq->worker_done);
|
||||
if (atomic_dec_and_test(&wq->worker_refs))
|
||||
complete(&wq->worker_done);
|
||||
wait_for_completion(&wq->worker_done);
|
||||
|
||||
spin_lock_irq(&wq->hash->wait.lock);
|
||||
for_each_node(node)
|
||||
|
|
@ -774,7 +772,10 @@ static int io_wq_fork_manager(struct io_wq *wq)
|
|||
if (wq->manager)
|
||||
return 0;
|
||||
|
||||
reinit_completion(&wq->worker_done);
|
||||
WARN_ON_ONCE(test_bit(IO_WQ_BIT_EXIT, &wq->state));
|
||||
|
||||
init_completion(&wq->worker_done);
|
||||
atomic_set(&wq->worker_refs, 1);
|
||||
tsk = create_io_thread(io_wq_manager, wq, NUMA_NO_NODE);
|
||||
if (!IS_ERR(tsk)) {
|
||||
wq->manager = get_task_struct(tsk);
|
||||
|
|
@ -782,6 +783,9 @@ static int io_wq_fork_manager(struct io_wq *wq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&wq->worker_refs))
|
||||
complete(&wq->worker_done);
|
||||
|
||||
return PTR_ERR(tsk);
|
||||
}
|
||||
|
||||
|
|
@ -794,8 +798,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
|
|||
/* Can only happen if manager creation fails after exec */
|
||||
if (io_wq_fork_manager(wqe->wq) ||
|
||||
test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
|
||||
work->flags |= IO_WQ_WORK_CANCEL;
|
||||
wqe->wq->do_work(work);
|
||||
io_run_cancel(work, wqe);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1018,13 +1021,9 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
|
|||
init_completion(&wq->exited);
|
||||
refcount_set(&wq->refs, 1);
|
||||
|
||||
init_completion(&wq->worker_done);
|
||||
atomic_set(&wq->worker_refs, 0);
|
||||
|
||||
ret = io_wq_fork_manager(wq);
|
||||
if (!ret)
|
||||
return wq;
|
||||
|
||||
err:
|
||||
io_wq_put_hash(data->hash);
|
||||
cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
|
||||
|
|
|
|||
|
|
@ -79,8 +79,8 @@ static inline void wq_list_del(struct io_wq_work_list *list,
|
|||
|
||||
struct io_wq_work {
|
||||
struct io_wq_work_node list;
|
||||
const struct cred *creds;
|
||||
unsigned flags;
|
||||
unsigned short personality;
|
||||
};
|
||||
|
||||
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
|
||||
|
|
|
|||
855
fs/io_uring.c
855
fs/io_uring.c
File diff suppressed because it is too large
Load diff
|
|
@ -1221,7 +1221,7 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
|
|||
struct iomap_ioend *ioend;
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &iomap_ioend_bioset);
|
||||
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
|
||||
bio_set_dev(bio, wpc->iomap.bdev);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
|
||||
|
|
@ -1252,7 +1252,7 @@ iomap_chain_bio(struct bio *prev)
|
|||
{
|
||||
struct bio *new;
|
||||
|
||||
new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
|
||||
new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
|
||||
bio_copy_dev(new, prev);/* also copies over blkcg information */
|
||||
new->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
new->bi_opf = prev->bi_opf;
|
||||
|
|
|
|||
|
|
@ -300,7 +300,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
*/
|
||||
bio_opf = iomap_dio_bio_opflags(dio, iomap, use_fua);
|
||||
|
||||
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_PAGES);
|
||||
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS);
|
||||
do {
|
||||
size_t n;
|
||||
if (dio->error) {
|
||||
|
|
@ -344,7 +344,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
|
|||
copied += n;
|
||||
|
||||
nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter,
|
||||
BIO_MAX_PAGES);
|
||||
BIO_MAX_VECS);
|
||||
iomap_dio_submit_bio(dio, iomap, bio, pos);
|
||||
pos += n;
|
||||
} while (nr_pages);
|
||||
|
|
|
|||
|
|
@ -652,7 +652,7 @@ alloc_new:
|
|||
goto out;
|
||||
}
|
||||
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
|
||||
BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
|
||||
BIO_MAX_VECS, GFP_NOFS|__GFP_HIGH);
|
||||
if (bio == NULL)
|
||||
goto confused;
|
||||
|
||||
|
|
|
|||
|
|
@ -127,7 +127,7 @@ config PNFS_BLOCK
|
|||
config PNFS_FLEXFILE_LAYOUT
|
||||
tristate
|
||||
depends on NFS_V4_1 && NFS_V3
|
||||
default m
|
||||
default NFS_V4
|
||||
|
||||
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
|
||||
string "NFSv4.1 Implementation ID Domain"
|
||||
|
|
|
|||
58
fs/nfs/dir.c
58
fs/nfs/dir.c
|
|
@ -81,8 +81,9 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir
|
|||
spin_lock(&dir->i_lock);
|
||||
if (list_empty(&nfsi->open_files) &&
|
||||
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
|
||||
nfsi->cache_validity |= NFS_INO_INVALID_DATA |
|
||||
NFS_INO_REVAL_FORCED;
|
||||
nfs_set_cache_invalid(dir,
|
||||
NFS_INO_INVALID_DATA |
|
||||
NFS_INO_REVAL_FORCED);
|
||||
list_add(&ctx->list, &nfsi->open_files);
|
||||
spin_unlock(&dir->i_lock);
|
||||
return ctx;
|
||||
|
|
@ -1401,6 +1402,13 @@ out_force:
|
|||
goto out;
|
||||
}
|
||||
|
||||
static void nfs_mark_dir_for_revalidate(struct inode *inode)
|
||||
{
|
||||
spin_lock(&inode->i_lock);
|
||||
nfs_set_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* We judge how long we want to trust negative
|
||||
* dentries by looking at the parent inode mtime.
|
||||
|
|
@ -1435,19 +1443,14 @@ nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
|
|||
__func__, dentry);
|
||||
return 1;
|
||||
case 0:
|
||||
nfs_mark_for_revalidate(dir);
|
||||
if (inode && S_ISDIR(inode->i_mode)) {
|
||||
/* Purge readdir caches. */
|
||||
nfs_zap_caches(inode);
|
||||
/*
|
||||
* We can't d_drop the root of a disconnected tree:
|
||||
* its d_hash is on the s_anon list and d_drop() would hide
|
||||
* it from shrink_dcache_for_unmount(), leading to busy
|
||||
* inodes on unmount and further oopses.
|
||||
*/
|
||||
if (IS_ROOT(dentry))
|
||||
return 1;
|
||||
}
|
||||
/*
|
||||
* We can't d_drop the root of a disconnected tree:
|
||||
* its d_hash is on the s_anon list and d_drop() would hide
|
||||
* it from shrink_dcache_for_unmount(), leading to busy
|
||||
* inodes on unmount and further oopses.
|
||||
*/
|
||||
if (inode && IS_ROOT(dentry))
|
||||
return 1;
|
||||
dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
|
||||
__func__, dentry);
|
||||
return 0;
|
||||
|
|
@ -1525,6 +1528,13 @@ out:
|
|||
nfs_free_fattr(fattr);
|
||||
nfs_free_fhandle(fhandle);
|
||||
nfs4_label_free(label);
|
||||
|
||||
/*
|
||||
* If the lookup failed despite the dentry change attribute being
|
||||
* a match, then we should revalidate the directory cache.
|
||||
*/
|
||||
if (!ret && nfs_verify_change_attribute(dir, dentry->d_time))
|
||||
nfs_mark_dir_for_revalidate(dir);
|
||||
return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
|
||||
}
|
||||
|
||||
|
|
@ -1567,7 +1577,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
|
|||
error = nfs_lookup_verify_inode(inode, flags);
|
||||
if (error) {
|
||||
if (error == -ESTALE)
|
||||
nfs_zap_caches(dir);
|
||||
nfs_mark_dir_for_revalidate(dir);
|
||||
goto out_bad;
|
||||
}
|
||||
nfs_advise_use_readdirplus(dir);
|
||||
|
|
@ -1691,10 +1701,9 @@ static void nfs_drop_nlink(struct inode *inode)
|
|||
if (inode->i_nlink > 0)
|
||||
drop_nlink(inode);
|
||||
NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter();
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
|
||||
| NFS_INO_INVALID_CTIME
|
||||
| NFS_INO_INVALID_OTHER
|
||||
| NFS_INO_REVAL_FORCED;
|
||||
nfs_set_cache_invalid(
|
||||
inode, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME |
|
||||
NFS_INO_INVALID_OTHER | NFS_INO_REVAL_FORCED);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
|
|
@ -1706,7 +1715,7 @@ static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
|
|||
{
|
||||
if (S_ISDIR(inode->i_mode))
|
||||
/* drop any readdir cache as it could easily be old */
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
|
||||
|
||||
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
||||
nfs_complete_unlink(dentry, inode);
|
||||
|
|
@ -2064,7 +2073,6 @@ out:
|
|||
dput(parent);
|
||||
return d;
|
||||
out_error:
|
||||
nfs_mark_for_revalidate(dir);
|
||||
d = ERR_PTR(error);
|
||||
goto out;
|
||||
}
|
||||
|
|
@ -2473,9 +2481,9 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
|
|||
if (error == 0) {
|
||||
spin_lock(&old_inode->i_lock);
|
||||
NFS_I(old_inode)->attr_gencount = nfs_inc_attr_generation_counter();
|
||||
NFS_I(old_inode)->cache_validity |= NFS_INO_INVALID_CHANGE
|
||||
| NFS_INO_INVALID_CTIME
|
||||
| NFS_INO_REVAL_FORCED;
|
||||
nfs_set_cache_invalid(old_inode, NFS_INO_INVALID_CHANGE |
|
||||
NFS_INO_INVALID_CTIME |
|
||||
NFS_INO_REVAL_FORCED);
|
||||
spin_unlock(&old_inode->i_lock);
|
||||
}
|
||||
out:
|
||||
|
|
|
|||
|
|
@ -207,7 +207,7 @@ static bool nfs_has_xattr_cache(const struct nfs_inode *nfsi)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
|
||||
void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
|
||||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
bool have_delegation = NFS_PROTO(inode)->have_delegation(inode, FMODE_READ);
|
||||
|
|
@ -229,6 +229,7 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
|
|||
if (flags & NFS_INO_INVALID_DATA)
|
||||
nfs_fscache_invalidate(inode);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
|
||||
|
||||
/*
|
||||
* Invalidate the local caches
|
||||
|
|
@ -1067,8 +1068,8 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
|
|||
spin_lock(&inode->i_lock);
|
||||
if (list_empty(&nfsi->open_files) &&
|
||||
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
|
||||
nfsi->cache_validity |= NFS_INO_INVALID_DATA |
|
||||
NFS_INO_REVAL_FORCED;
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA |
|
||||
NFS_INO_REVAL_FORCED);
|
||||
list_add_tail_rcu(&ctx->list, &nfsi->open_files);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -411,7 +411,8 @@ extern int nfs_write_inode(struct inode *, struct writeback_control *);
|
|||
extern int nfs_drop_inode(struct inode *);
|
||||
extern void nfs_clear_inode(struct inode *);
|
||||
extern void nfs_evict_inode(struct inode *);
|
||||
void nfs_zap_acl_cache(struct inode *inode);
|
||||
extern void nfs_zap_acl_cache(struct inode *inode);
|
||||
extern void nfs_set_cache_invalid(struct inode *inode, unsigned long flags);
|
||||
extern bool nfs_check_cache_invalid(struct inode *, unsigned long);
|
||||
extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode);
|
||||
extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode);
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@
|
|||
#define NFS3_pagepad_sz (1) /* Page padding */
|
||||
#define NFS3_fhandle_sz (1+16)
|
||||
#define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
|
||||
#define NFS3_post_op_fh_sz (1+NFS3_fh_sz)
|
||||
#define NFS3_sattr_sz (15)
|
||||
#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
|
||||
#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
|
||||
|
|
@ -73,7 +74,7 @@
|
|||
#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1+NFS3_pagepad_sz)
|
||||
#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3+NFS3_pagepad_sz)
|
||||
#define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
|
||||
#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
|
||||
#define NFS3_createres_sz (1+NFS3_post_op_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
|
||||
#define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
|
||||
#define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
|
||||
#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2+NFS3_pagepad_sz)
|
||||
|
|
|
|||
|
|
@ -357,13 +357,15 @@ static ssize_t _nfs42_proc_copy(struct file *src,
|
|||
truncate_pagecache_range(dst_inode, pos_dst,
|
||||
pos_dst + res->write_res.count);
|
||||
spin_lock(&dst_inode->i_lock);
|
||||
NFS_I(dst_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE |
|
||||
NFS_INO_REVAL_FORCED | NFS_INO_INVALID_SIZE |
|
||||
NFS_INO_INVALID_ATTR | NFS_INO_INVALID_DATA);
|
||||
nfs_set_cache_invalid(
|
||||
dst_inode, NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED |
|
||||
NFS_INO_INVALID_SIZE | NFS_INO_INVALID_ATTR |
|
||||
NFS_INO_INVALID_DATA);
|
||||
spin_unlock(&dst_inode->i_lock);
|
||||
spin_lock(&src_inode->i_lock);
|
||||
NFS_I(src_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE |
|
||||
NFS_INO_REVAL_FORCED | NFS_INO_INVALID_ATIME);
|
||||
nfs_set_cache_invalid(src_inode, NFS_INO_REVAL_PAGECACHE |
|
||||
NFS_INO_REVAL_FORCED |
|
||||
NFS_INO_INVALID_ATIME);
|
||||
spin_unlock(&src_inode->i_lock);
|
||||
status = res->write_res.count;
|
||||
out:
|
||||
|
|
|
|||
|
|
@ -1169,14 +1169,14 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
|
|||
static void
|
||||
nfs4_inc_nlink_locked(struct inode *inode)
|
||||
{
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
|
||||
inc_nlink(inode);
|
||||
}
|
||||
|
||||
static void
|
||||
nfs4_dec_nlink_locked(struct inode *inode)
|
||||
{
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
|
||||
drop_nlink(inode);
|
||||
}
|
||||
|
||||
|
|
@ -1187,35 +1187,31 @@ nfs4_update_changeattr_locked(struct inode *inode,
|
|||
{
|
||||
struct nfs_inode *nfsi = NFS_I(inode);
|
||||
|
||||
nfsi->cache_validity |= NFS_INO_INVALID_CTIME
|
||||
| NFS_INO_INVALID_MTIME
|
||||
| cache_validity;
|
||||
cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME;
|
||||
|
||||
if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(inode)) {
|
||||
nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
|
||||
nfsi->attrtimeo_timestamp = jiffies;
|
||||
} else {
|
||||
if (S_ISDIR(inode->i_mode)) {
|
||||
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
|
||||
cache_validity |= NFS_INO_INVALID_DATA;
|
||||
nfs_force_lookup_revalidate(inode);
|
||||
} else {
|
||||
if (!NFS_PROTO(inode)->have_delegation(inode,
|
||||
FMODE_READ))
|
||||
nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
|
||||
cache_validity |= NFS_INO_REVAL_PAGECACHE;
|
||||
}
|
||||
|
||||
if (cinfo->before != inode_peek_iversion_raw(inode))
|
||||
nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
|
||||
NFS_INO_INVALID_ACL |
|
||||
NFS_INO_INVALID_XATTR;
|
||||
cache_validity |= NFS_INO_INVALID_ACCESS |
|
||||
NFS_INO_INVALID_ACL |
|
||||
NFS_INO_INVALID_XATTR;
|
||||
}
|
||||
inode_set_iversion_raw(inode, cinfo->after);
|
||||
nfsi->read_cache_jiffies = timestamp;
|
||||
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
|
||||
nfs_set_cache_invalid(inode, cache_validity);
|
||||
nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE;
|
||||
|
||||
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
|
||||
nfs_fscache_invalidate(inode);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -5893,6 +5889,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
|
|||
unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
|
||||
int ret, i;
|
||||
|
||||
/* You can't remove system.nfs4_acl: */
|
||||
if (buflen == 0)
|
||||
return -EINVAL;
|
||||
if (!nfs4_server_supports_acls(server))
|
||||
return -EOPNOTSUPP;
|
||||
if (npages > ARRAY_SIZE(pages))
|
||||
|
|
@ -5915,9 +5914,9 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
|
|||
* so mark the attribute cache invalid.
|
||||
*/
|
||||
spin_lock(&inode->i_lock);
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
|
||||
| NFS_INO_INVALID_CTIME
|
||||
| NFS_INO_REVAL_FORCED;
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
|
||||
NFS_INO_INVALID_CTIME |
|
||||
NFS_INO_REVAL_FORCED);
|
||||
spin_unlock(&inode->i_lock);
|
||||
nfs_access_zap_cache(inode);
|
||||
nfs_zap_acl_cache(inode);
|
||||
|
|
@ -5969,7 +5968,7 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
|
|||
return ret;
|
||||
if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
|
||||
return -ENOENT;
|
||||
return 0;
|
||||
return label.len;
|
||||
}
|
||||
|
||||
static int nfs4_get_security_label(struct inode *inode, void *buf,
|
||||
|
|
|
|||
|
|
@ -500,9 +500,9 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
|
|||
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
||||
spin_lock(&inode->i_lock);
|
||||
NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter();
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
|
||||
| NFS_INO_INVALID_CTIME
|
||||
| NFS_INO_REVAL_FORCED;
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
|
||||
NFS_INO_INVALID_CTIME |
|
||||
NFS_INO_REVAL_FORCED);
|
||||
spin_unlock(&inode->i_lock);
|
||||
d_move(dentry, sdentry);
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -303,9 +303,9 @@ static void nfs_set_pageerror(struct address_space *mapping)
|
|||
nfs_zap_mapping(mapping->host, mapping);
|
||||
/* Force file size revalidation */
|
||||
spin_lock(&inode->i_lock);
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_REVAL_FORCED |
|
||||
NFS_INO_REVAL_PAGECACHE |
|
||||
NFS_INO_INVALID_SIZE;
|
||||
nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED |
|
||||
NFS_INO_REVAL_PAGECACHE |
|
||||
NFS_INO_INVALID_SIZE);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
|
|
@ -1604,7 +1604,7 @@ static int nfs_writeback_done(struct rpc_task *task,
|
|||
/* Deal with the suid/sgid bit corner case */
|
||||
if (nfs_should_remove_suid(inode)) {
|
||||
spin_lock(&inode->i_lock);
|
||||
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER;
|
||||
nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER);
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -399,7 +399,7 @@ static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
|
|||
{
|
||||
wi->bio = NULL;
|
||||
wi->rest_blocks = segbuf->sb_sum.nblocks;
|
||||
wi->max_pages = BIO_MAX_PAGES;
|
||||
wi->max_pages = BIO_MAX_VECS;
|
||||
wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
|
||||
wi->start = wi->end = 0;
|
||||
wi->blocknr = segbuf->sb_pseg_start;
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length,
|
|||
int error, i;
|
||||
struct bio *bio;
|
||||
|
||||
if (page_count <= BIO_MAX_PAGES)
|
||||
if (page_count <= BIO_MAX_VECS)
|
||||
bio = bio_alloc(GFP_NOIO, page_count);
|
||||
else
|
||||
bio = bio_kmalloc(GFP_NOIO, page_count);
|
||||
|
|
|
|||
|
|
@ -684,7 +684,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
|
|||
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
|
||||
iov_iter_truncate(from, max);
|
||||
|
||||
nr_pages = iov_iter_npages(from, BIO_MAX_PAGES);
|
||||
nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
|
||||
if (!nr_pages)
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue