aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acnames.h4
-rw-r--r--include/acpi/acpixf.h3
-rw-r--r--include/acpi/actbl1.h19
-rw-r--r--include/acpi/actbl3.h9
-rw-r--r--include/asm-generic/dma-mapping-common.h17
-rw-r--r--include/asm-generic/gpio.h2
-rw-r--r--include/asm-generic/io.h2
-rw-r--r--include/asm-generic/irq_work.h10
-rw-r--r--include/asm-generic/pgtable.h31
-rw-r--r--include/asm-generic/sections.h4
-rw-r--r--include/dt-bindings/sound/cs35l32.h26
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/ahci_platform.h13
-rw-r--r--include/linux/ata_platform.h5
-rw-r--r--include/linux/atmel-mci.h2
-rw-r--r--include/linux/balloon_compaction.h169
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/cgroup.h26
-rw-r--r--include/linux/compaction.h24
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/cpuset.h3
-rw-r--r--include/linux/dma-mapping.h26
-rw-r--r--include/linux/flex_proportions.h5
-rw-r--r--include/linux/fs.h59
-rw-r--r--include/linux/fsl_ifc.h6
-rw-r--r--include/linux/genalloc.h7
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio/driver.h8
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/ioport.h5
-rw-r--r--include/linux/irq.h8
-rw-r--r--include/linux/irq_work.h3
-rw-r--r--include/linux/irqchip/arm-gic.h16
-rw-r--r--include/linux/irqdesc.h29
-rw-r--r--include/linux/kernel.h56
-rw-r--r--include/linux/libata.h12
-rw-r--r--include/linux/lockd/lockd.h1
-rw-r--r--include/linux/memcontrol.h15
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mfd/tmio.h25
-rw-r--r--include/linux/migrate.h14
-rw-r--r--include/linux/mm.h38
-rw-r--r--include/linux/mmc/card.h10
-rw-r--r--include/linux/mmc/dw_mmc.h4
-rw-r--r--include/linux/mmc/host.h12
-rw-r--r--include/linux/mmc/mmc.h7
-rw-r--r--include/linux/mmc/sdhci.h3
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmdebug.h20
-rw-r--r--include/linux/mmzone.h51
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/of.h3
-rw-r--r--include/linux/of_address.h27
-rw-r--r--include/linux/of_pci.h13
-rw-r--r--include/linux/pagemap.h18
-rw-r--r--include/linux/pci.h60
-rw-r--r--include/linux/pci_hotplug.h2
-rw-r--r--include/linux/pci_ids.h17
-rw-r--r--include/linux/percpu-refcount.h122
-rw-r--r--include/linux/percpu.h13
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/platform_data/gpio-dwapb.h32
-rw-r--r--include/linux/pm.h5
-rw-r--r--include/linux/pm_domain.h130
-rw-r--r--include/linux/proportions.h5
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/screen_info.h8
-rw-r--r--include/linux/security.h8
-rw-r--r--include/linux/slab.h64
-rw-r--r--include/linux/slab_def.h20
-rw-r--r--include/linux/spi/mcp23s08.h18
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/linux/swap.h22
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/linux/topology.h17
-rw-r--r--include/linux/vm_event_item.h7
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/davinci/dm644x_ccdc.h2
-rw-r--r--include/media/omap3isp.h3
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/videobuf2-core.h15
-rw-r--r--include/misc/cxl.h48
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/inet_frag.h2
-rw-r--r--include/ras/ras_event.h48
-rw-r--r--include/sound/pcm.h52
-rw-r--r--include/sound/rt5645.h3
-rw-r--r--include/sound/rt5677.h13
-rw-r--r--include/sound/soc-dapm.h5
-rw-r--r--include/sound/soc.h101
-rw-r--r--include/sound/vx_core.h7
-rw-r--r--include/trace/events/asoc.h6
-rw-r--r--include/trace/events/btrfs.h85
-rw-r--r--include/trace/events/filelock.h14
-rw-r--r--include/uapi/Kbuild1
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/kernel-page-flags.h1
-rw-r--r--include/uapi/linux/pci_regs.h3
-rw-r--r--include/uapi/linux/prctl.h27
-rw-r--r--include/uapi/linux/smiapp.h29
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h9
-rw-r--r--include/uapi/linux/vfio.h3
-rw-r--r--include/uapi/linux/videodev2.h13
-rw-r--r--include/uapi/misc/Kbuild2
-rw-r--r--include/uapi/misc/cxl.h88
-rw-r--r--include/uapi/sound/asound.h3
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/interface/elfnote.h48
-rw-r--r--include/xen/interface/io/vscsiif.h229
-rw-r--r--include/xen/interface/xen.h272
-rw-r--r--include/xen/xenbus.h21
119 files changed, 1790 insertions, 865 deletions
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index c728113374f5..f97804bdf1ff 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -59,6 +59,10 @@
#define METHOD_NAME__PRS "_PRS"
#define METHOD_NAME__PRT "_PRT"
#define METHOD_NAME__PRW "_PRW"
+#define METHOD_NAME__PS0 "_PS0"
+#define METHOD_NAME__PS1 "_PS1"
+#define METHOD_NAME__PS2 "_PS2"
+#define METHOD_NAME__PS3 "_PS3"
#define METHOD_NAME__REG "_REG"
#define METHOD_NAME__SB_ "_SB_"
#define METHOD_NAME__SEG "_SEG"
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index b7c89d47efbe..9fc1d71c82bc 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20140724
+#define ACPI_CA_VERSION 0x20140828
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -692,6 +692,7 @@ ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
*event_status))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_gpe_device(u32 gpe_index,
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 7626bfeac2cb..29e79370641d 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -952,7 +952,8 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
- ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
+ ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
+ ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */
};
/*
@@ -968,7 +969,7 @@ struct acpi_srat_cpu_affinity {
u32 flags;
u8 local_sapic_eid;
u8 proximity_domain_hi[3];
- u32 reserved; /* Reserved, must be zero */
+ u32 clock_domain;
};
/* Flags */
@@ -1010,6 +1011,20 @@ struct acpi_srat_x2apic_cpu_affinity {
#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
+/* 3: GICC Affinity (ACPI 5.1) */
+
+struct acpi_srat_gicc_affinity {
+ struct acpi_subtable_header header;
+ u32 proximity_domain;
+ u32 acpi_processor_uid;
+ u32 flags;
+ u32 clock_domain;
+};
+
+/* Flags for struct acpi_srat_gicc_affinity */
+
+#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
+
/* Reset to default packing */
#pragma pack()
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 787bcc814463..5480cb2236bf 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -310,10 +310,15 @@ struct acpi_gtdt_timer_entry {
u32 common_flags;
};
+/* Flag Definitions: timer_flags and virtual_timer_flags above */
+
+#define ACPI_GTDT_GT_IRQ_MODE (1)
+#define ACPI_GTDT_GT_IRQ_POLARITY (1<<1)
+
/* Flag Definitions: common_flags above */
-#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
-#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
+#define ACPI_GTDT_GT_IS_SECURE_TIMER (1)
+#define ACPI_GTDT_GT_ALWAYS_ON (1<<1)
/* 1: SBSA Generic Watchdog Structure */
diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
index de8bf89940f8..3378dcf4c31e 100644
--- a/include/asm-generic/dma-mapping-common.h
+++ b/include/asm-generic/dma-mapping-common.h
@@ -179,6 +179,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+ unsigned long vm_flags,
+ pgprot_t prot, const void *caller);
+
+void *dma_common_pages_remap(struct page **pages, size_t size,
+ unsigned long vm_flags, pgprot_t prot,
+ const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+
/**
* dma_mmap_attrs - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
@@ -205,14 +214,6 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
-static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
-}
-
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index c1d4105e1c1d..383ade1a211b 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -27,7 +27,7 @@
*/
#ifndef ARCH_NR_GPIOS
-#define ARCH_NR_GPIOS 256
+#define ARCH_NR_GPIOS 512
#endif
/*
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 975e1cc75edb..b8fdc57a7335 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -331,7 +331,7 @@ static inline void iounmap(void __iomem *addr)
#ifndef CONFIG_GENERIC_IOMAP
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return (void __iomem *) port;
+ return PCI_IOBASE + (port & IO_SPACE_LIMIT);
}
static inline void ioport_unmap(void __iomem *p)
diff --git a/include/asm-generic/irq_work.h b/include/asm-generic/irq_work.h
new file mode 100644
index 000000000000..a44f452c6590
--- /dev/null
+++ b/include/asm-generic/irq_work.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_IRQ_WORK_H
+#define __ASM_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return false;
+}
+
+#endif /* __ASM_IRQ_WORK_H */
+
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 53b2acc38213..081ff8826bf6 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -249,6 +249,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#define pgprot_writecombine pgprot_noncached
#endif
+#ifndef pgprot_device
+#define pgprot_device pgprot_noncached
+#endif
+
/*
* When walking page tables, get the address of the next boundary,
* or the end address of the range if that comes earlier. Although no
@@ -660,11 +664,12 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
}
#ifdef CONFIG_NUMA_BALANCING
-#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
/*
- * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the
- * same bit too). It's set only when _PAGE_PRESET is not set and it's
- * never set if _PAGE_PRESENT is set.
+ * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that
+ * is protected for PROT_NONE and a NUMA hinting fault entry. If the
+ * architecture defines __PAGE_PROTNONE then it should take that into account
+ * but those that do not can rely on the fact that the NUMA hinting scanner
+ * skips inaccessible VMAs.
*
* pte/pmd_present() returns true if pte/pmd_numa returns true. Page
* fault triggers on those regions if pte/pmd_numa returns true
@@ -673,16 +678,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
#ifndef pte_numa
static inline int pte_numa(pte_t pte)
{
- return (pte_flags(pte) &
- (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
+ return ptenuma_flags(pte) == _PAGE_NUMA;
}
#endif
#ifndef pmd_numa
static inline int pmd_numa(pmd_t pmd)
{
- return (pmd_flags(pmd) &
- (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA;
+ return pmdnuma_flags(pmd) == _PAGE_NUMA;
}
#endif
@@ -722,6 +725,8 @@ static inline pte_t pte_mknuma(pte_t pte)
{
pteval_t val = pte_val(pte);
+ VM_BUG_ON(!(val & _PAGE_PRESENT));
+
val &= ~_PAGE_PRESENT;
val |= _PAGE_NUMA;
@@ -765,16 +770,6 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
}
#endif
#else
-extern int pte_numa(pte_t pte);
-extern int pmd_numa(pmd_t pmd);
-extern pte_t pte_mknonnuma(pte_t pte);
-extern pmd_t pmd_mknonnuma(pmd_t pmd);
-extern pte_t pte_mknuma(pte_t pte);
-extern pmd_t pmd_mknuma(pmd_t pmd);
-extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
-extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
-#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
-#else
static inline int pmd_numa(pmd_t pmd)
{
return 0;
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index f1a24b5c3b90..b58fd667f87b 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -3,6 +3,8 @@
/* References to section boundaries */
+#include <linux/compiler.h>
+
/*
* Usage guidelines:
* _text, _data: architecture specific, don't use them in arch-independent code
@@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[];
/* Start and end of .ctors section - used for constructor calls. */
extern char __ctors_start[], __ctors_end[];
+extern __visible const void __nosave_begin, __nosave_end;
+
/* function descriptor handling (if any). Override
* in asm/sections.h */
#ifndef dereference_function_descriptor
diff --git a/include/dt-bindings/sound/cs35l32.h b/include/dt-bindings/sound/cs35l32.h
new file mode 100644
index 000000000000..0c6d6a3c15a2
--- /dev/null
+++ b/include/dt-bindings/sound/cs35l32.h
@@ -0,0 +1,26 @@
+#ifndef __DT_CS35L32_H
+#define __DT_CS35L32_H
+
+#define CS35L32_BOOST_MGR_AUTO 0
+#define CS35L32_BOOST_MGR_AUTO_AUDIO 1
+#define CS35L32_BOOST_MGR_BYPASS 2
+#define CS35L32_BOOST_MGR_FIXED 3
+
+#define CS35L32_DATA_CFG_LR_VP 0
+#define CS35L32_DATA_CFG_LR_STAT 1
+#define CS35L32_DATA_CFG_LR 2
+#define CS35L32_DATA_CFG_LR_VPSTAT 3
+
+#define CS35L32_BATT_THRESH_3_1V 0
+#define CS35L32_BATT_THRESH_3_2V 1
+#define CS35L32_BATT_THRESH_3_3V 2
+#define CS35L32_BATT_THRESH_3_4V 3
+
+#define CS35L32_BATT_RECOV_3_1V 0
+#define CS35L32_BATT_RECOV_3_2V 1
+#define CS35L32_BATT_RECOV_3_3V 2
+#define CS35L32_BATT_RECOV_3_4V 3
+#define CS35L32_BATT_RECOV_3_5V 4
+#define CS35L32_BATT_RECOV_3_6V 5
+
+#endif /* __DT_CS35L32_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 807cbc46d73e..b7926bb9b444 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -587,7 +587,6 @@ static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
int acpi_dev_pm_attach(struct device *dev, bool power_on);
-void acpi_dev_pm_detach(struct device *dev, bool power_off);
#else
static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
{
@@ -597,7 +596,6 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
{
return -ENODEV;
}
-static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {}
#endif
#ifdef CONFIG_ACPI
diff --git a/include/linux/aer.h b/include/linux/aer.h
index c826d1c28f9c..4fef65e57023 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,8 @@
#ifndef _AER_H_
#define _AER_H_
+#include <linux/types.h>
+
#define AER_NONFATAL 0
#define AER_FATAL 1
#define AER_CORRECTABLE 2
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 09a947e8bc87..642d6ae4030c 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -22,19 +22,6 @@ struct ata_port_info;
struct ahci_host_priv;
struct platform_device;
-/*
- * Note ahci_platform_data is deprecated, it is only kept around for use
- * by the old da850 and spear13xx ahci code.
- * New drivers should instead declare their own platform_driver struct, and
- * use ahci_platform* functions in their own probe, suspend and resume methods.
- */
-struct ahci_platform_data {
- int (*init)(struct device *dev, void __iomem *addr);
- void (*exit)(struct device *dev);
- int (*suspend)(struct device *dev);
- int (*resume)(struct device *dev);
-};
-
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index b9fde17f767c..5c618a084225 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -8,11 +8,6 @@ struct pata_platform_info {
* spacing used by ata_std_ports().
*/
unsigned int ioport_shift;
- /*
- * Indicate platform specific irq types and initial
- * IRQ flags when call request_irq()
- */
- unsigned int irq_flags;
};
extern int __pata_platform_probe(struct device *dev,
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 4c7a4b2104bf..91b77f8d495d 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -1,6 +1,8 @@
#ifndef __LINUX_ATMEL_MCI_H
#define __LINUX_ATMEL_MCI_H
+#include <linux/types.h>
+
#define ATMCI_MAX_NR_SLOTS 2
/**
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 089743ade734..9b0a15d06a4f 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -27,10 +27,13 @@
* counter raised only while it is under our special handling;
*
* iii. after the lockless scan step have selected a potential balloon page for
- * isolation, re-test the page->mapping flags and the page ref counter
+ * isolation, re-test the PageBalloon mark and the PagePrivate flag
* under the proper page lock, to ensure isolating a valid balloon page
* (not yet isolated, nor under release procedure)
*
+ * iv. isolation or dequeueing procedure must clear PagePrivate flag under
+ * page lock together with removing page from balloon device page list.
+ *
* The functions provided by this interface are placed to help on coping with
* the aforementioned balloon page corner case, as well as to ensure the simple
* set of exposed rules are satisfied while we are dealing with balloon pages
@@ -54,43 +57,22 @@
* balloon driver as a page book-keeper for its registered balloon devices.
*/
struct balloon_dev_info {
- void *balloon_device; /* balloon device descriptor */
- struct address_space *mapping; /* balloon special page->mapping */
unsigned long isolated_pages; /* # of isolated pages for migration */
spinlock_t pages_lock; /* Protection to pages list */
struct list_head pages; /* Pages enqueued & handled to Host */
+ int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
};
extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
-extern struct balloon_dev_info *balloon_devinfo_alloc(
- void *balloon_dev_descriptor);
-static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info)
-{
- kfree(b_dev_info);
-}
-
-/*
- * balloon_page_free - release a balloon page back to the page free lists
- * @page: ballooned page to be set free
- *
- * This function must be used to properly set free an isolated/dequeued balloon
- * page at the end of a sucessful page migration, or at the balloon driver's
- * page release procedure.
- */
-static inline void balloon_page_free(struct page *page)
+static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
{
- /*
- * Balloon pages always get an extra refcount before being isolated
- * and before being dequeued to help on sorting out fortuite colisions
- * between a thread attempting to isolate and another thread attempting
- * to release the very same balloon page.
- *
- * Before we handle the page back to Buddy, lets drop its extra refcnt.
- */
- put_page(page);
- __free_page(page);
+ balloon->isolated_pages = 0;
+ spin_lock_init(&balloon->pages_lock);
+ INIT_LIST_HEAD(&balloon->pages);
+ balloon->migratepage = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
@@ -98,107 +80,58 @@ extern bool balloon_page_isolate(struct page *page);
extern void balloon_page_putback(struct page *page);
extern int balloon_page_migrate(struct page *newpage,
struct page *page, enum migrate_mode mode);
-extern struct address_space
-*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
- const struct address_space_operations *a_ops);
-
-static inline void balloon_mapping_free(struct address_space *balloon_mapping)
-{
- kfree(balloon_mapping);
-}
/*
- * page_flags_cleared - helper to perform balloon @page ->flags tests.
- *
- * As balloon pages are obtained from buddy and we do not play with page->flags
- * at driver level (exception made when we get the page lock for compaction),
- * we can safely identify a ballooned page by checking if the
- * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also
- * helps us skip ballooned pages that are locked for compaction or release, thus
- * mitigating their racy check at balloon_page_movable()
- */
-static inline bool page_flags_cleared(struct page *page)
-{
- return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP);
-}
-
-/*
- * __is_movable_balloon_page - helper to perform @page mapping->flags tests
+ * __is_movable_balloon_page - helper to perform @page PageBalloon tests
*/
static inline bool __is_movable_balloon_page(struct page *page)
{
- struct address_space *mapping = page->mapping;
- return mapping_balloon(mapping);
+ return PageBalloon(page);
}
/*
- * balloon_page_movable - test page->mapping->flags to identify balloon pages
- * that can be moved by compaction/migration.
- *
- * This function is used at core compaction's page isolation scheme, therefore
- * most pages exposed to it are not enlisted as balloon pages and so, to avoid
- * undesired side effects like racing against __free_pages(), we cannot afford
- * holding the page locked while testing page->mapping->flags here.
+ * balloon_page_movable - test PageBalloon to identify balloon pages
+ * and PagePrivate to check that the page is not
+ * isolated and can be moved by compaction/migration.
*
* As we might return false positives in the case of a balloon page being just
- * released under us, the page->mapping->flags need to be re-tested later,
- * under the proper page lock, at the functions that will be coping with the
- * balloon page case.
+ * released under us, this need to be re-tested later, under the page lock.
*/
static inline bool balloon_page_movable(struct page *page)
{
- /*
- * Before dereferencing and testing mapping->flags, let's make sure
- * this is not a page that uses ->mapping in a different way
- */
- if (page_flags_cleared(page) && !page_mapped(page) &&
- page_count(page) == 1)
- return __is_movable_balloon_page(page);
-
- return false;
+ return PageBalloon(page) && PagePrivate(page);
}
/*
* isolated_balloon_page - identify an isolated balloon page on private
* compaction/migration page lists.
- *
- * After a compaction thread isolates a balloon page for migration, it raises
- * the page refcount to prevent concurrent compaction threads from re-isolating
- * the same page. For that reason putback_movable_pages(), or other routines
- * that need to identify isolated balloon pages on private pagelists, cannot
- * rely on balloon_page_movable() to accomplish the task.
*/
static inline bool isolated_balloon_page(struct page *page)
{
- /* Already isolated balloon pages, by default, have a raised refcount */
- if (page_flags_cleared(page) && !page_mapped(page) &&
- page_count(page) >= 2)
- return __is_movable_balloon_page(page);
-
- return false;
+ return PageBalloon(page);
}
/*
* balloon_page_insert - insert a page into the balloon's page list and make
- * the page->mapping assignment accordingly.
+ * the page->private assignment accordingly.
+ * @balloon : pointer to balloon device
* @page : page to be assigned as a 'balloon page'
- * @mapping : allocated special 'balloon_mapping'
- * @head : balloon's device page list head
*
* Caller must ensure the page is locked and the spin_lock protecting balloon
* pages list is held before inserting a page into the balloon device.
*/
-static inline void balloon_page_insert(struct page *page,
- struct address_space *mapping,
- struct list_head *head)
+static inline void balloon_page_insert(struct balloon_dev_info *balloon,
+ struct page *page)
{
- page->mapping = mapping;
- list_add(&page->lru, head);
+ __SetPageBalloon(page);
+ SetPagePrivate(page);
+ set_page_private(page, (unsigned long)balloon);
+ list_add(&page->lru, &balloon->pages);
}
/*
* balloon_page_delete - delete a page from balloon's page list and clear
- * the page->mapping assignement accordingly.
+ * the page->private assignement accordingly.
* @page : page to be released from balloon's page list
*
* Caller must ensure the page is locked and the spin_lock protecting balloon
@@ -206,8 +139,12 @@ static inline void balloon_page_insert(struct page *page,
*/
static inline void balloon_page_delete(struct page *page)
{
- page->mapping = NULL;
- list_del(&page->lru);
+ __ClearPageBalloon(page);
+ set_page_private(page, 0);
+ if (PagePrivate(page)) {
+ ClearPagePrivate(page);
+ list_del(&page->lru);
+ }
}
/*
@@ -216,11 +153,7 @@ static inline void balloon_page_delete(struct page *page)
*/
static inline struct balloon_dev_info *balloon_page_device(struct page *page)
{
- struct address_space *mapping = page->mapping;
- if (likely(mapping))
- return mapping->private_data;
-
- return NULL;
+ return (struct balloon_dev_info *)page_private(page);
}
static inline gfp_t balloon_mapping_gfp_mask(void)
@@ -228,34 +161,24 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
return GFP_HIGHUSER_MOVABLE;
}
-static inline bool balloon_compaction_check(void)
-{
- return true;
-}
-
#else /* !CONFIG_BALLOON_COMPACTION */
-static inline void *balloon_mapping_alloc(void *balloon_device,
- const struct address_space_operations *a_ops)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void balloon_mapping_free(struct address_space *balloon_mapping)
+static inline void balloon_page_insert(struct balloon_dev_info *balloon,
+ struct page *page)
{
- return;
+ __SetPageBalloon(page);
+ list_add(&page->lru, &balloon->pages);
}
-static inline void balloon_page_insert(struct page *page,
- struct address_space *mapping,
- struct list_head *head)
+static inline void balloon_page_delete(struct page *page)
{
- list_add(&page->lru, head);
+ __ClearPageBalloon(page);
+ list_del(&page->lru);
}
-static inline void balloon_page_delete(struct page *page)
+static inline bool __is_movable_balloon_page(struct page *page)
{
- list_del(&page->lru);
+ return false;
}
static inline bool balloon_page_movable(struct page *page)
@@ -289,9 +212,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
return GFP_HIGHUSER;
}
-static inline bool balloon_compaction_check(void)
-{
- return false;
-}
#endif /* CONFIG_BALLOON_COMPACTION */
#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a1e31f274fcd..c13a0c09faea 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -140,6 +140,7 @@ enum {
};
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+void blk_mq_finish_init(struct request_queue *q);
int blk_mq_register_disk(struct gendisk *);
void blk_mq_unregister_disk(struct gendisk *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 518b46555b80..87be398166d3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1564,7 +1564,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q,
}
static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
{
- return 0;
+ return NULL;
}
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b5223c570eba..1d5196889048 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -27,7 +27,6 @@
struct cgroup_root;
struct cgroup_subsys;
-struct inode;
struct cgroup;
extern int cgroup_init_early(void);
@@ -38,7 +37,8 @@ extern void cgroup_exit(struct task_struct *p);
extern int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry);
-extern int proc_cgroup_show(struct seq_file *, void *);
+extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk);
/* define the enumeration of all cgroup subsystems */
#define SUBSYS(_x) _x ## _cgrp_id,
@@ -161,11 +161,6 @@ static inline void css_put(struct cgroup_subsys_state *css)
/* bits in struct cgroup flags field */
enum {
- /*
- * Control Group has previously had a child cgroup or a task,
- * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
- */
- CGRP_RELEASABLE,
/* Control Group requires release notifications to userspace */
CGRP_NOTIFY_ON_RELEASE,
/*
@@ -235,13 +230,6 @@ struct cgroup {
struct list_head e_csets[CGROUP_SUBSYS_COUNT];
/*
- * Linked list running through all cgroups that can
- * potentially be reaped by the release agent. Protected by
- * release_list_lock
- */
- struct list_head release_list;
-
- /*
* list of pidlists, up to two for each namespace (one for procs, one
* for tasks); created on demand.
*/
@@ -250,6 +238,9 @@ struct cgroup {
/* used to wait for offlining of csses */
wait_queue_head_t offline_waitq;
+
+ /* used to schedule release agent */
+ struct work_struct release_agent_work;
};
#define MAX_CGROUP_ROOT_NAMELEN 64
@@ -536,13 +527,10 @@ static inline bool cgroup_has_tasks(struct cgroup *cgrp)
return !list_empty(&cgrp->cset_links);
}
-/* returns ino associated with a cgroup, 0 indicates unmounted root */
+/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
- if (cgrp->kn)
- return cgrp->kn->ino;
- else
- return 0;
+ return cgrp->kn->ino;
}
/* cft/css accessors for cftype->write() operation */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 01e3132820da..60bdf8dc02a3 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -2,14 +2,24 @@
#define _LINUX_COMPACTION_H
/* Return values for compact_zone() and try_to_compact_pages() */
+/* compaction didn't start as it was deferred due to past failures */
+#define COMPACT_DEFERRED 0
/* compaction didn't start as it was not possible or direct reclaim was more suitable */
-#define COMPACT_SKIPPED 0
+#define COMPACT_SKIPPED 1
/* compaction should continue to another pageblock */
-#define COMPACT_CONTINUE 1
+#define COMPACT_CONTINUE 2
/* direct compaction partially compacted a zone and there are suitable pages */
-#define COMPACT_PARTIAL 2
+#define COMPACT_PARTIAL 3
/* The full zone was compacted */
-#define COMPACT_COMPLETE 3
+#define COMPACT_COMPLETE 4
+
+/* Used to signal whether compaction detected need_sched() or lock contention */
+/* No contention detected */
+#define COMPACT_CONTENDED_NONE 0
+/* Either need_sched() was true or fatal signal pending */
+#define COMPACT_CONTENDED_SCHED 1
+/* Zone lock or lru_lock was contended in async compaction */
+#define COMPACT_CONTENDED_LOCK 2
#ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory;
@@ -22,7 +32,8 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask,
- enum migrate_mode mode, bool *contended);
+ enum migrate_mode mode, int *contended,
+ struct zone **candidate_zone);
extern void compact_pgdat(pg_data_t *pgdat, int order);
extern void reset_isolation_suitable(pg_data_t *pgdat);
extern unsigned long compaction_suitable(struct zone *zone, int order);
@@ -91,7 +102,8 @@ static inline bool compaction_restarting(struct zone *zone, int order)
#else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- enum migrate_mode mode, bool *contended)
+ enum migrate_mode mode, int *contended,
+ struct zone **candidate_zone)
{
return COMPACT_CONTINUE;
}
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7d1955afa62c..138336b6bb04 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -112,6 +112,9 @@ struct cpufreq_policy {
spinlock_t transition_lock;
wait_queue_head_t transition_wait;
struct task_struct *transition_task; /* Task which is doing the transition */
+
+ /* For cpufreq driver's internal use */
+ void *driver_data;
};
/* Only for ACPI */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 6e39c9bb0dae..2f073db7392e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -86,7 +86,8 @@ extern void __cpuset_memory_pressure_bump(void);
extern void cpuset_task_status_allowed(struct seq_file *m,
struct task_struct *task);
-extern int proc_cpuset_show(struct seq_file *, void *);
+extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *tsk);
extern int cpuset_mem_spread_node(void);
extern int cpuset_slab_spread_node(void);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 931b70986272..d5d388160f42 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -263,6 +263,32 @@ struct dma_attrs;
#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
dma_unmap_sg(dev, sgl, nents, dir)
+#else
+static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
+}
+
+static inline void dma_free_writecombine(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_addr)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
+}
+
+static inline int dma_mmap_writecombine(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+}
#endif /* CONFIG_HAVE_DMA_ATTRS */
#ifdef CONFIG_NEED_DMA_MAP_STATE
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index 4ebc49fae391..0d348e011a6e 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -10,6 +10,7 @@
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/seqlock.h>
+#include <linux/gfp.h>
/*
* When maximum proportion of some event type is specified, this is the
@@ -32,7 +33,7 @@ struct fprop_global {
seqcount_t sequence;
};
-int fprop_global_init(struct fprop_global *p);
+int fprop_global_init(struct fprop_global *p, gfp_t gfp);
void fprop_global_destroy(struct fprop_global *p);
bool fprop_new_period(struct fprop_global *p, int periods);
@@ -79,7 +80,7 @@ struct fprop_local_percpu {
raw_spinlock_t lock; /* Protect period and numerator */
};
-int fprop_local_init_percpu(struct fprop_local_percpu *pl);
+int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 94187721ad41..2023306c620e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -851,13 +851,7 @@ static inline struct file *get_file(struct file *f)
*/
#define FILE_LOCK_DEFERRED 1
-/*
- * The POSIX file lock owner is determined by
- * the "struct files_struct" in the thread group
- * (or NULL for no owner - BSD locks).
- *
- * Lockd stuffs a "host" pointer into this.
- */
+/* legacy typedef, should eventually be removed */
typedef void *fl_owner_t;
struct file_lock_operations {
@@ -868,10 +862,13 @@ struct file_lock_operations {
struct lock_manager_operations {
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
unsigned long (*lm_owner_key)(struct file_lock *);
+ void (*lm_get_owner)(struct file_lock *, struct file_lock *);
+ void (*lm_put_owner)(struct file_lock *);
void (*lm_notify)(struct file_lock *); /* unblock callback */
- int (*lm_grant)(struct file_lock *, struct file_lock *, int);
- void (*lm_break)(struct file_lock *);
- int (*lm_change)(struct file_lock **, int);
+ int (*lm_grant)(struct file_lock *, int);
+ bool (*lm_break)(struct file_lock *);
+ int (*lm_change)(struct file_lock **, int, struct list_head *);
+ void (*lm_setup)(struct file_lock *, void **);
};
struct lock_manager {
@@ -966,7 +963,7 @@ void locks_free_lock(struct file_lock *fl);
extern void locks_init_lock(struct file_lock *);
extern struct file_lock * locks_alloc_lock(void);
extern void locks_copy_lock(struct file_lock *, struct file_lock *);
-extern void __locks_copy_lock(struct file_lock *, const struct file_lock *);
+extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
extern void locks_remove_posix(struct file *, fl_owner_t);
extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
@@ -980,11 +977,9 @@ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
extern void lease_get_mtime(struct inode *, struct timespec *time);
-extern int generic_setlease(struct file *, long, struct file_lock **);
-extern int vfs_setlease(struct file *, long, struct file_lock **);
-extern int lease_modify(struct file_lock **, int);
-extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
-extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
+extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
+extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
+extern int lease_modify(struct file_lock **, int, struct list_head *);
#else /* !CONFIG_FILE_LOCKING */
static inline int fcntl_getlk(struct file *file, unsigned int cmd,
struct flock __user *user)
@@ -1013,12 +1008,12 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file,
#endif
static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
- return 0;
+ return -EINVAL;
}
static inline int fcntl_getlease(struct file *filp)
{
- return 0;
+ return F_UNLCK;
}
static inline void locks_init_lock(struct file_lock *fl)
@@ -1026,7 +1021,7 @@ static inline void locks_init_lock(struct file_lock *fl)
return;
}
-static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl)
+static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
{
return;
}
@@ -1100,33 +1095,22 @@ static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
}
static inline int generic_setlease(struct file *filp, long arg,
- struct file_lock **flp)
+ struct file_lock **flp, void **priv)
{
return -EINVAL;
}
static inline int vfs_setlease(struct file *filp, long arg,
- struct file_lock **lease)
+ struct file_lock **lease, void **priv)
{
return -EINVAL;
}
-static inline int lease_modify(struct file_lock **before, int arg)
+static inline int lease_modify(struct file_lock **before, int arg,
+ struct list_head *dispose)
{
return -EINVAL;
}
-
-static inline int lock_may_read(struct inode *inode, loff_t start,
- unsigned long len)
-{
- return 1;
-}
-
-static inline int lock_may_write(struct inode *inode, loff_t start,
- unsigned long len)
-{
- return 1;
-}
#endif /* !CONFIG_FILE_LOCKING */
@@ -1151,8 +1135,8 @@ extern void fasync_free(struct fasync_struct *);
/* can be called from interrupts */
extern void kill_fasync(struct fasync_struct **, int, int);
-extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
-extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
+extern void f_setown(struct file *filp, unsigned long arg, int force);
extern void f_delown(struct file *filp);
extern pid_t f_getown(struct file *filp);
extern int send_sigurg(struct fown_struct *fown);
@@ -1506,7 +1490,7 @@ struct file_operations {
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
- int (*setlease)(struct file *, long, struct file_lock **);
+ int (*setlease)(struct file *, long, struct file_lock **, void **);
long (*fallocate)(struct file *file, int mode, loff_t offset,
loff_t len);
int (*show_fdinfo)(struct seq_file *m, struct file *f);
@@ -2611,6 +2595,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata);
extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *);
+extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
extern const struct dentry_operations simple_dentry_operations;
extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index f49ddb1b2273..84d60cb841b1 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -781,13 +781,13 @@ struct fsl_ifc_regs {
__be32 amask;
u32 res4[0x2];
} amask_cs[FSL_IFC_BANK_COUNT];
- u32 res5[0x17];
+ u32 res5[0x18];
struct {
- __be32 csor_ext;
__be32 csor;
+ __be32 csor_ext;
u32 res6;
} csor_cs[FSL_IFC_BANK_COUNT];
- u32 res7[0x19];
+ u32 res7[0x18];
struct {
__be32 ftim[4];
u32 res8[0x8];
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 1c2fdaa2ffc3..1ccaab44abcc 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
+extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+ unsigned long size, unsigned long start, unsigned int nr,
+ void *data);
+
extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
unsigned long start, unsigned int nr, void *data);
@@ -117,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev,
int min_alloc_order, int nid);
extern struct gen_pool *dev_get_gen_pool(struct device *dev);
+bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+ size_t size);
+
#ifdef CONFIG_OF
extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
const char *propname, int index);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5e7219dc0fae..41b30fd4d041 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -156,7 +156,7 @@ struct vm_area_struct;
#define GFP_DMA32 __GFP_DMA32
/* Convert GFP flags to their corresponding migrate type */
-static inline int allocflags_to_migratetype(gfp_t gfp_flags)
+static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
{
WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c5e41da20112..249db3057e4d 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -56,6 +56,8 @@ struct seq_file;
* as the chip access may sleep when e.g. reading out the IRQ status
* registers.
* @exported: flags if the gpiochip is exported for use from sysfs. Private.
+ * @irq_not_threaded: flag must be set if @can_sleep is set but the
+ * IRQs don't need to be threaded
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -101,6 +103,7 @@ struct gpio_chip {
struct gpio_desc *desc;
const char *const *names;
bool can_sleep;
+ bool irq_not_threaded;
bool exported;
#ifdef CONFIG_GPIOLIB_IRQCHIP
@@ -141,7 +144,7 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
/* add/remove chips */
extern int gpiochip_add(struct gpio_chip *chip);
-extern int gpiochip_remove(struct gpio_chip *chip);
+extern void gpiochip_remove(struct gpio_chip *chip);
extern struct gpio_chip *gpiochip_find(void *data,
int (*match)(struct gpio_chip *chip, void *data));
@@ -166,7 +169,8 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
#endif /* CONFIG_GPIOLIB_IRQCHIP */
-int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label);
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+ const char *label);
void gpiochip_free_own_desc(struct gpio_desc *desc);
#else /* CONFIG_GPIOLIB */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 63579cb8d3dc..ad9051bab267 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl)
{
- VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
+ VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pmd_trans_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma, ptl);
else
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 698ad053d064..69517a24bc50 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id);
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
-#ifdef CONFIG_PM_SLEEP
-extern int check_wakeup_irqs(void);
-#else
-static inline int check_wakeup_irqs(void) { return 0; }
-#endif
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 20f9a527922a..7b02bcc85b9e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -80,6 +80,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_STASH,
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
+ DOMAIN_ATTR_NESTING, /* two stages of translation */
DOMAIN_ATTR_MAX,
};
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 142ec544167c..2c5250222278 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s,
/* Wrappers for managed devices */
struct device;
+
+extern int devm_request_resource(struct device *dev, struct resource *root,
+ struct resource *new);
+extern void devm_release_resource(struct device *dev, struct resource *new);
+
#define devm_request_region(dev,start,n,name) \
__devm_request_region(dev, &ioport_resource, (start), (n), (name))
#define devm_request_mem_region(dev,start,n,name) \
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62af59242ddc..03f48d936f66 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -173,6 +173,7 @@ struct irq_data {
* IRQD_IRQ_DISABLED - Disabled state of the interrupt
* IRQD_IRQ_MASKED - Masked state of the interrupt
* IRQD_IRQ_INPROGRESS - In progress state of the interrupt
+ * IRQD_WAKEUP_ARMED - Wakeup mode armed
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -186,6 +187,7 @@ enum {
IRQD_IRQ_DISABLED = (1 << 16),
IRQD_IRQ_MASKED = (1 << 17),
IRQD_IRQ_INPROGRESS = (1 << 18),
+ IRQD_WAKEUP_ARMED = (1 << 19),
};
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -257,6 +259,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d)
return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
}
+static inline bool irqd_is_wakeup_armed(struct irq_data *d)
+{
+ return d->state_use_accessors & IRQD_WAKEUP_ARMED;
+}
+
+
/*
* Functions for chained handlers which can be enabled/disabled by the
* standard disable_irq/enable_irq calls. Must be called with
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index bf9422c3aefe..bf3fe719c7ce 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -39,9 +39,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu);
#endif
void irq_work_run(void);
+void irq_work_tick(void);
void irq_work_sync(struct irq_work *work);
#ifdef CONFIG_IRQ_WORK
+#include <asm/irq_work.h>
+
bool irq_work_needs_cpu(void);
#else
static inline bool irq_work_needs_cpu(void) { return false; }
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 45e2d8c15bd2..13eed92c7d24 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -21,7 +21,11 @@
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
+#define GICC_ENABLE 0x1
+#define GICC_INT_PRI_THRESHOLD 0xf0
#define GICC_IAR_INT_ID_MASK 0x3ff
+#define GICC_INT_SPURIOUS 1023
+#define GICC_DIS_BYPASS_MASK 0x1e0
#define GIC_DIST_CTRL 0x000
#define GIC_DIST_CTR 0x004
@@ -39,6 +43,18 @@
#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
#define GIC_DIST_SGI_PENDING_SET 0xf20
+#define GICD_ENABLE 0x1
+#define GICD_DISABLE 0x0
+#define GICD_INT_ACTLOW_LVLTRIG 0x0
+#define GICD_INT_EN_CLR_X32 0xffffffff
+#define GICD_INT_EN_SET_SGI 0x0000ffff
+#define GICD_INT_EN_CLR_PPI 0xffff0000
+#define GICD_INT_DEF_PRI 0xa0
+#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
+ (GICD_INT_DEF_PRI << 16) |\
+ (GICD_INT_DEF_PRI << 8) |\
+ GICD_INT_DEF_PRI)
+
#define GICH_HCR 0x0
#define GICH_VTR 0x4
#define GICH_VMCR 0x8
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 472c021a2d4f..faf433af425e 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -12,6 +12,8 @@ struct irq_affinity_notify;
struct proc_dir_entry;
struct module;
struct irq_desc;
+struct irq_domain;
+struct pt_regs;
/**
* struct irq_desc - interrupt descriptor
@@ -36,6 +38,11 @@ struct irq_desc;
* @threads_oneshot: bitfield to handle shared oneshot threads
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
+ * @nr_actions: number of installed actions on this descriptor
+ * @no_suspend_depth: number of irqactions on a irq descriptor with
+ * IRQF_NO_SUSPEND set
+ * @force_resume_depth: number of irqactions on a irq descriptor with
+ * IRQF_FORCE_RESUME set
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
*/
@@ -68,6 +75,11 @@ struct irq_desc {
unsigned long threads_oneshot;
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
+#ifdef CONFIG_PM_SLEEP
+ unsigned int nr_actions;
+ unsigned int no_suspend_depth;
+ unsigned int force_resume_depth;
+#endif
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
@@ -118,6 +130,23 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de
int generic_handle_irq(unsigned int irq);
+#ifdef CONFIG_HANDLE_DOMAIN_IRQ
+/*
+ * Convert a HW interrupt number to a logical one using a IRQ domain,
+ * and handle the result interrupt number. Return -EINVAL if
+ * conversion failed. Providing a NULL domain indicates that the
+ * conversion has already been done.
+ */
+int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
+ bool lookup, struct pt_regs *regs);
+
+static inline int handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs)
+{
+ return __handle_domain_irq(domain, hwirq, true, regs);
+}
+#endif
+
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
{
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 95624bed87ef..e9e420b6d931 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -715,23 +715,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
(void) (&_max1 == &_max2); \
_max1 > _max2 ? _max1 : _max2; })
-#define min3(x, y, z) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- typeof(z) _min3 = (z); \
- (void) (&_min1 == &_min2); \
- (void) (&_min1 == &_min3); \
- _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
- (_min2 < _min3 ? _min2 : _min3); })
-
-#define max3(x, y, z) ({ \
- typeof(x) _max1 = (x); \
- typeof(y) _max2 = (y); \
- typeof(z) _max3 = (z); \
- (void) (&_max1 == &_max2); \
- (void) (&_max1 == &_max3); \
- _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
- (_max2 > _max3 ? _max2 : _max3); })
+#define min3(x, y, z) min((typeof(x))min(x, y), z)
+#define max3(x, y, z) max((typeof(x))max(x, y), z)
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
@@ -746,20 +731,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
/**
* clamp - return a value clamped to a given range with strict typechecking
* @val: current value
- * @min: minimum allowable value
- * @max: maximum allowable value
+ * @lo: lowest allowable value
+ * @hi: highest allowable value
*
- * This macro does strict typechecking of min/max to make sure they are of the
+ * This macro does strict typechecking of lo/hi to make sure they are of the
* same type as val. See the unnecessary pointer comparisons.
*/
-#define clamp(val, min, max) ({ \
- typeof(val) __val = (val); \
- typeof(min) __min = (min); \
- typeof(max) __max = (max); \
- (void) (&__val == &__min); \
- (void) (&__val == &__max); \
- __val = __val < __min ? __min: __val; \
- __val > __max ? __max: __val; })
+#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
/*
* ..and if you can't take the strict
@@ -781,36 +759,26 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
* clamp_t - return a value clamped to a given range using a given type
* @type: the type of variable to use
* @val: current value
- * @min: minimum allowable value
- * @max: maximum allowable value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
* 'type' to make all the comparisons.
*/
-#define clamp_t(type, val, min, max) ({ \
- type __val = (val); \
- type __min = (min); \
- type __max = (max); \
- __val = __val < __min ? __min: __val; \
- __val > __max ? __max: __val; })
+#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
/**
* clamp_val - return a value clamped to a given range using val's type
* @val: current value
- * @min: minimum allowable value
- * @max: maximum allowable value
+ * @lo: minimum allowable value
+ * @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
* type the input argument 'val' is. This is useful when val is an unsigned
* type and min and max are literals that will otherwise be assigned a signed
* integer type.
*/
-#define clamp_val(val, min, max) ({ \
- typeof(val) __val = (val); \
- typeof(val) __min = (min); \
- typeof(val) __max = (max); \
- __val = __val < __min ? __min: __val; \
- __val > __max ? __max: __val; })
+#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
/*
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 92abb497ab14..bd5fefeaf548 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1404,14 +1404,14 @@ static inline int sata_srst_pmp(struct ata_link *link)
* printk helpers
*/
__printf(3, 4)
-int ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...);
+void ata_port_printk(const struct ata_port *ap, const char *level,
+ const char *fmt, ...);
__printf(3, 4)
-int ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...);
+void ata_link_printk(const struct ata_link *link, const char *level,
+ const char *fmt, ...);
__printf(3, 4)
-int ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...);
+void ata_dev_printk(const struct ata_device *dev, const char *level,
+ const char *fmt, ...);
#define ata_port_err(ap, fmt, ...) \
ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 219d79627c05..ff82a32871b5 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -178,7 +178,6 @@ struct nlm_block {
unsigned char b_granted; /* VFS granted lock */
struct nlm_file * b_file; /* file in question */
struct cache_req * b_cache_req; /* deferred request handling */
- struct file_lock * b_fl; /* set for GETLK */
struct cache_deferred_req * b_deferred_req;
unsigned int b_flags; /* block flags */
#define B_QUEUED 1 /* lock queued */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e0752d204d9e..19df5d857411 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -440,11 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg);
-int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
- struct kmem_cache *root_cache);
-void memcg_free_cache_params(struct kmem_cache *s);
-
-int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
void memcg_update_array_size(int num_groups);
struct kmem_cache *
@@ -574,16 +569,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return -1;
}
-static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
- struct kmem_cache *s, struct kmem_cache *root_cache)
-{
- return 0;
-}
-
-static inline void memcg_free_cache_params(struct kmem_cache *s)
-{
-}
-
static inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index d9524c49d767..8f1a41951df9 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -84,6 +84,7 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long, int);
+extern int test_pages_in_a_zone(unsigned long, unsigned long);
extern void __offline_isolated_pages(unsigned long, unsigned long);
typedef void (*online_page_callback_t)(struct page *page);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index f230a978e6ba..3d385c81c153 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -134,9 +134,10 @@ void mpol_free_shared_policy(struct shared_policy *p);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
unsigned long idx);
-struct mempolicy *get_vma_policy(struct task_struct *tsk,
- struct vm_area_struct *vma, unsigned long addr);
-bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma);
+struct mempolicy *get_task_policy(struct task_struct *p);
+struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr);
+bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
extern void numa_policy_init(void);
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8f6f2e91e7ae..57388171610d 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -5,6 +5,7 @@
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/mmc/card.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -83,6 +84,27 @@
*/
#define TMIO_MMC_HAVE_HIGH_REG (1 << 6)
+/*
+ * Some controllers have CMD12 automatically
+ * issue/non-issue register
+ */
+#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
+
+/*
+ * Some controllers needs to set 1 on SDIO status reserved bits
+ */
+#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
+
+/*
+ * Some controllers have DMA enable/disable register
+ */
+#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9)
+
+/*
+ * Some controllers allows to set SDx actual clock
+ */
+#define TMIO_MMC_CLK_ACTUAL (1 << 10)
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
@@ -96,6 +118,7 @@ struct tmio_mmc_dma {
int slave_id_tx;
int slave_id_rx;
int alignment_shift;
+ dma_addr_t dma_rx_offset;
bool (*filter)(struct dma_chan *chan, void *arg);
};
@@ -120,6 +143,8 @@ struct tmio_mmc_data {
/* clock management callbacks */
int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
void (*clk_disable)(struct platform_device *pdev);
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
};
/*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a2901c414664..01aad3ed89ec 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private);
* Return values from addresss_space_operations.migratepage():
* - negative errno on page migration failure;
* - zero on page migration success;
- *
- * The balloon page migration introduces this special case where a 'distinct'
- * return code is used to flag a successful page migration to unmap_and_move().
- * This approach is necessary because page migration can race against balloon
- * deflation procedure, and for such case we could introduce a nasty page leak
- * if a successfully migrated balloon page gets released concurrently with
- * migration's unmap_and_move() wrap-up steps.
*/
#define MIGRATEPAGE_SUCCESS 0
-#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
- * sucessful migration case.
- */
+
enum migrate_reason {
MR_COMPACTION,
MR_MEMORY_FAILURE,
@@ -82,9 +73,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
return -ENOSYS;
}
-/* Possible settings for the migrate_page() method in address_operations */
-#define migrate_page NULL
-
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0f4196a0bc20..fa0d74e06428 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -18,6 +18,7 @@
#include <linux/pfn.h>
#include <linux/bit_spinlock.h>
#include <linux/shrinker.h>
+#include <linux/resource.h>
struct mempolicy;
struct anon_vma;
@@ -553,6 +554,25 @@ static inline void __ClearPageBuddy(struct page *page)
atomic_set(&page->_mapcount, -1);
}
+#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
+
+static inline int PageBalloon(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
+}
+
+static inline void __SetPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
+ atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
+}
+
+static inline void __ClearPageBalloon(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageBalloon(page), page);
+ atomic_set(&page->_mapcount, -1);
+}
+
void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
@@ -1247,8 +1267,8 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
-extern pid_t
-vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
+extern struct task_struct *task_of_stack(struct task_struct *task,
+ struct vm_area_struct *vma, bool in_group);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -1780,6 +1800,20 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+static inline int check_data_rlimit(unsigned long rlim,
+ unsigned long new,
+ unsigned long start,
+ unsigned long end_data,
+ unsigned long start_data)
+{
+ if (rlim < RLIM_INFINITY) {
+ if (((new - start) + (end_data - start_data)) > rlim)
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
extern int mm_take_all_locks(struct mm_struct *mm);
extern void mm_drop_all_locks(struct mm_struct *mm);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d424b9de3aff..b0692d28f8e6 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -42,7 +42,8 @@ struct mmc_csd {
unsigned int read_partial:1,
read_misalign:1,
write_partial:1,
- write_misalign:1;
+ write_misalign:1,
+ dsr_imp:1;
};
struct mmc_ext_csd {
@@ -74,7 +75,7 @@ struct mmc_ext_csd {
unsigned int sec_trim_mult; /* Secure trim multiplier */
unsigned int sec_erase_mult; /* Secure erase multiplier */
unsigned int trim_timeout; /* In milliseconds */
- bool enhanced_area_en; /* enable bit */
+ bool partition_setting_completed; /* enable bit */
unsigned long long enhanced_area_offset; /* Units: Byte */
unsigned int enhanced_area_size; /* Units: KB */
unsigned int cache_size; /* Units: KB */
@@ -214,11 +215,12 @@ enum mmc_blk_status {
};
/* The number of MMC physical partitions. These consist of:
- * boot partitions (2), general purpose partitions (4) in MMC v4.4.
+ * boot partitions (2), general purpose partitions (4) and
+ * RPMB partition (1) in MMC v4.4.
*/
#define MMC_NUM_BOOT_PARTITION 2
#define MMC_NUM_GP_PARTITION 4
-#define MMC_NUM_PHY_PARTITION 6
+#define MMC_NUM_PHY_PARTITION 7
#define MAX_MMC_PART_NAME_LEN 20
/*
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 29ce014ab421..001366927cf4 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -26,6 +26,8 @@ enum dw_mci_state {
STATE_DATA_BUSY,
STATE_SENDING_STOP,
STATE_DATA_ERROR,
+ STATE_SENDING_CMD11,
+ STATE_WAITING_CMD11_DONE,
};
enum {
@@ -188,7 +190,7 @@ struct dw_mci {
/* Workaround flags */
u32 quirks;
- struct regulator *vmmc; /* Power regulator */
+ bool vqmmc_enabled;
unsigned long irq_flags; /* IRQ flags */
int irq;
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7960424d0bc0..df0c15396bbf 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -42,6 +42,7 @@ struct mmc_ios {
#define MMC_POWER_OFF 0
#define MMC_POWER_UP 1
#define MMC_POWER_ON 2
+#define MMC_POWER_UNDEFINED 3
unsigned char bus_width; /* data bus width */
@@ -139,6 +140,13 @@ struct mmc_host_ops {
int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
void (*hw_reset)(struct mmc_host *host);
void (*card_event)(struct mmc_host *host);
+
+ /*
+ * Optional callback to support controllers with HW issues for multiple
+ * I/O. Returns the number of supported blocks for the request.
+ */
+ int (*multi_io_quirk)(struct mmc_card *card,
+ unsigned int direction, int blk_size);
};
struct mmc_card;
@@ -265,7 +273,6 @@ struct mmc_host {
#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
-#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */
#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
@@ -365,6 +372,9 @@ struct mmc_host {
unsigned int slotno; /* used for sdio acpi binding */
+ int dsr_req; /* DSR value is valid */
+ u32 dsr; /* optional driver stage (DSR) value */
+
unsigned long private[0] ____cacheline_aligned;
};
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 64ec963ed347..1cd00b3a75b9 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -53,6 +53,11 @@
#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
+#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
+#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
+extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE];
+extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE];
+
/* class 3 */
#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
@@ -281,6 +286,7 @@ struct _mmc_csd {
#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
+#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */
#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
#define EXT_CSD_HPI_MGMT 161 /* R/W */
@@ -349,6 +355,7 @@ struct _mmc_csd {
#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3)
#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
+#define EXT_CSD_PART_SETTING_COMPLETED (0x1)
#define EXT_CSD_PART_SUPPORT_PART_EN (0x1)
#define EXT_CSD_CMD_SET_NORMAL (1<<0)
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 09ebe57d5ce9..dba793e3a331 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -98,6 +98,8 @@ struct sdhci_host {
#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
/* Controller does not support DDR50 */
#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7)
+/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
+#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8)
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -146,6 +148,7 @@ struct sdhci_host {
struct mmc_command *cmd; /* Current command */
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
+ unsigned int busy_handle:1; /* Handling the order of Busy-end */
struct sg_mapping_iter sg_miter; /* SG state for PIO */
unsigned int blocks; /* remaining PIO blocks */
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index d2433381e828..e56fa24c9322 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -24,7 +24,10 @@ void mmc_gpio_free_cd(struct mmc_host *host);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce);
+ unsigned int debounce, bool *gpio_invert);
+int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
+ unsigned int idx, bool override_active_level,
+ unsigned int debounce, bool *gpio_invert);
void mmc_gpiod_free_cd(struct mmc_host *host);
void mmc_gpiod_request_cd_irq(struct mmc_host *host);
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 2f348d02f640..877ef226f90f 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -4,10 +4,14 @@
#include <linux/stringify.h>
struct page;
+struct vm_area_struct;
+struct mm_struct;
extern void dump_page(struct page *page, const char *reason);
extern void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags);
+void dump_vma(const struct vm_area_struct *vma);
+void dump_mm(const struct mm_struct *mm);
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -18,12 +22,28 @@ extern void dump_page_badflags(struct page *page, const char *reason,
BUG(); \
} \
} while (0)
+#define VM_BUG_ON_VMA(cond, vma) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_vma(vma); \
+ BUG(); \
+ } \
+ } while (0)
+#define VM_BUG_ON_MM(cond, mm) \
+ do { \
+ if (unlikely(cond)) { \
+ dump_mm(mm); \
+ BUG(); \
+ } \
+ } while (0)
#define VM_WARN_ON(cond) WARN_ON(cond)
#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
#else
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
+#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
+#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 318df7051850..48bf12ef6620 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -521,13 +521,13 @@ struct zone {
atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
} ____cacheline_internodealigned_in_smp;
-typedef enum {
+enum zone_flags {
ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
ZONE_CONGESTED, /* zone has many dirty pages backed by
* a congested BDI
*/
- ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found
+ ZONE_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
*/
@@ -535,52 +535,7 @@ typedef enum {
* many pages under writeback
*/
ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
-} zone_flags_t;
-
-static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
-{
- set_bit(flag, &zone->flags);
-}
-
-static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
-{
- return test_and_set_bit(flag, &zone->flags);
-}
-
-static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
-{
- clear_bit(flag, &zone->flags);
-}
-
-static inline int zone_is_reclaim_congested(const struct zone *zone)
-{
- return test_bit(ZONE_CONGESTED, &zone->flags);
-}
-
-static inline int zone_is_reclaim_dirty(const struct zone *zone)
-{
- return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
-}
-
-static inline int zone_is_reclaim_writeback(const struct zone *zone)
-{
- return test_bit(ZONE_WRITEBACK, &zone->flags);
-}
-
-static inline int zone_is_reclaim_locked(const struct zone *zone)
-{
- return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
-}
-
-static inline int zone_is_fair_depleted(const struct zone *zone)
-{
- return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
-}
-
-static inline int zone_is_oom_locked(const struct zone *zone)
-{
- return test_bit(ZONE_OOM_LOCKED, &zone->flags);
-}
+};
static inline unsigned long zone_end_pfn(const struct zone *zone)
{
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8103f32f6d87..44f4746d033b 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -29,7 +29,6 @@ struct msi_desc {
__u8 multi_cap : 3; /* log2 num of messages supported */
__u8 maskbit : 1; /* mask-pending bit supported ? */
__u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
- __u8 pos; /* Location of the msi capability */
__u16 entry_nr; /* specific enabled entry */
unsigned default_irq; /* default pre-assigned irq */
} msi_attrib;
@@ -47,8 +46,6 @@ struct msi_desc {
/* Last set MSI message */
struct msi_msg msg;
-
- struct kobject kobj;
};
/*
@@ -60,7 +57,6 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
-int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
void arch_restore_msi_irqs(struct pci_dev *dev);
void default_teardown_msi_irqs(struct pci_dev *dev);
@@ -77,8 +73,6 @@ struct msi_chip {
int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev,
struct msi_desc *desc);
void (*teardown_irq)(struct msi_chip *chip, unsigned int irq);
- int (*check_device)(struct msi_chip *chip, struct pci_dev *dev,
- int nvec, int type);
};
#endif /* LINUX_MSI_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 6c4363b8ddc3..6545e7aec7bb 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -863,4 +863,7 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
}
#endif
+/* CONFIG_OF_RESOLVE api */
+extern int of_resolve_phandles(struct device_node *tree);
+
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index fb7b7221e063..8cb14eb393d6 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -23,17 +23,6 @@ struct of_pci_range {
#define for_each_of_pci_range(parser, range) \
for (; of_pci_range_parser_one(parser, range);)
-static inline void of_pci_range_to_resource(struct of_pci_range *range,
- struct device_node *np,
- struct resource *res)
-{
- res->flags = range->flags;
- res->start = range->cpu_addr;
- res->end = range->cpu_addr + range->size - 1;
- res->parent = res->child = res->sibling = NULL;
- res->name = np->full_name;
-}
-
/* Translate a DMA address from device space to CPU space */
extern u64 of_translate_dma_address(struct device_node *dev,
const __be32 *in_addr);
@@ -55,7 +44,9 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
extern const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags);
+extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
extern unsigned long pci_address_to_pio(phys_addr_t addr);
+extern phys_addr_t pci_pio_to_address(unsigned long pio);
extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node);
@@ -80,6 +71,11 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
return NULL;
}
+static inline phys_addr_t pci_pio_to_address(unsigned long pio)
+{
+ return 0;
+}
+
static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
struct device_node *node)
{
@@ -138,6 +134,9 @@ extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
u64 *size, unsigned int *flags);
extern int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r);
+extern int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res);
#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
struct resource *r)
@@ -150,6 +149,12 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev,
{
return NULL;
}
+static inline int of_pci_range_to_resource(struct of_pci_range *range,
+ struct device_node *np,
+ struct resource *res)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index dde3a4a0fa5d..1fd207e7a847 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -15,6 +15,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
int of_pci_get_devfn(struct device_node *np);
int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+int of_get_pci_domain_nr(struct device_node *node);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -43,6 +44,18 @@ of_pci_parse_bus_range(struct device_node *node, struct resource *res)
{
return -EINVAL;
}
+
+static inline int
+of_get_pci_domain_nr(struct device_node *node)
+{
+ return -1;
+}
+#endif
+
+#if defined(CONFIG_OF_ADDRESS)
+int of_pci_get_host_bridge_resources(struct device_node *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base);
#endif
#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 19191d39c4f3..7ea069cd3257 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -24,8 +24,7 @@ enum mapping_flags {
AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
- AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */
- AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
+ AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
};
static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
return !!mapping;
}
-static inline void mapping_set_balloon(struct address_space *mapping)
-{
- set_bit(AS_BALLOON_MAP, &mapping->flags);
-}
-
-static inline void mapping_clear_balloon(struct address_space *mapping)
-{
- clear_bit(AS_BALLOON_MAP, &mapping->flags);
-}
-
-static inline int mapping_balloon(struct address_space *mapping)
-{
- return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
-}
-
static inline void mapping_set_exiting(struct address_space *mapping)
{
set_bit(AS_EXITING, &mapping->flags);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 96453f9bc8ba..5be8db45e368 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -45,7 +45,7 @@
* In the interest of not exposing interfaces to user-space unnecessarily,
* the following kernel-only defines are being added here.
*/
-#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn)
+#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
@@ -457,6 +457,9 @@ struct pci_bus {
unsigned char primary; /* number of primary bridge */
unsigned char max_bus_speed; /* enum pci_bus_speed */
unsigned char cur_bus_speed; /* enum pci_bus_speed */
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+ int domain_nr;
+#endif
char name[48];
@@ -1103,6 +1106,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
resource_size_t),
void *alignf_data);
+
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+
static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
struct pci_bus_region region;
@@ -1288,12 +1294,32 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
*/
#ifdef CONFIG_PCI_DOMAINS
extern int pci_domains_supported;
+int pci_get_new_domain_nr(void);
#else
enum { pci_domains_supported = 0 };
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
+static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#endif /* CONFIG_PCI_DOMAINS */
+/*
+ * Generic implementation for PCI domain support. If your
+ * architecture does not need custom management of PCI
+ * domains then this implementation will be used
+ */
+#ifdef CONFIG_PCI_DOMAINS_GENERIC
+static inline int pci_domain_nr(struct pci_bus *bus)
+{
+ return bus->domain_nr;
+}
+void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
+#else
+static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
+ struct device *parent)
+{
+}
+#endif
+
/* some architectures require additional setup to direct VGA traffic */
typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
@@ -1402,6 +1428,7 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
+static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
@@ -1563,16 +1590,11 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
-struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
void pci_dev_specific_enable_acs(struct pci_dev *dev);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
-static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
-{
- return pci_dev_get(dev);
-}
static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
u16 acs_flags)
{
@@ -1707,7 +1729,7 @@ bool pci_acs_path_enabled(struct pci_dev *start,
struct pci_dev *end, u16 acs_flags);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
-#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT)
+#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
/* Large Resource Data Type Tag Item Names */
#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
@@ -1834,15 +1856,17 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
u16 alias, void *data), void *data);
-/**
- * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device
- * @pdev: the PCI device
- *
- * if the device is PCIE, return NULL
- * if the device isn't connected to a PCIe bridge (that is its parent is a
- * legacy PCI bridge and the bridge is directly connected to bus 0), return its
- * parent
- */
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
-
+/* helper functions for operation of device flag */
+static inline void pci_set_dev_assigned(struct pci_dev *pdev)
+{
+ pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
+{
+ pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
+{
+ return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
+}
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 5f2e559af6b0..2706ee9a4327 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -187,6 +187,4 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
return -ENODEV;
}
#endif
-
-void pci_configure_slot(struct pci_dev *dev);
#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6ed0bb73a864..2338e68398cb 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2245,6 +2245,8 @@
#define PCI_VENDOR_ID_MORETON 0x15aa
#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
+#define PCI_VENDOR_ID_VMWARE 0x15ad
+
#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
@@ -2818,7 +2820,22 @@
#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
+#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 68a64f11ce02..d5c89e0dd0e6 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -13,7 +13,7 @@
*
* The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
* than an atomic_t - this is because of the way shutdown works, see
- * percpu_ref_kill()/PCPU_COUNT_BIAS.
+ * percpu_ref_kill()/PERCPU_COUNT_BIAS.
*
* Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
* refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
@@ -49,29 +49,60 @@
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
+#include <linux/gfp.h>
struct percpu_ref;
typedef void (percpu_ref_func_t)(struct percpu_ref *);
+/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
+enum {
+ __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
+ __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
+ __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
+
+ __PERCPU_REF_FLAG_BITS = 2,
+};
+
+/* @flags for percpu_ref_init() */
+enum {
+ /*
+ * Start w/ ref == 1 in atomic mode. Can be switched to percpu
+ * operation using percpu_ref_switch_to_percpu(). If initialized
+ * with this flag, the ref will stay in atomic mode until
+ * percpu_ref_switch_to_percpu() is invoked on it.
+ */
+ PERCPU_REF_INIT_ATOMIC = 1 << 0,
+
+ /*
+ * Start dead w/ ref == 0 in atomic mode. Must be revived with
+ * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
+ */
+ PERCPU_REF_INIT_DEAD = 1 << 1,
+};
+
struct percpu_ref {
- atomic_t count;
+ atomic_long_t count;
/*
* The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t.
*/
- unsigned long pcpu_count_ptr;
+ unsigned long percpu_count_ptr;
percpu_ref_func_t *release;
- percpu_ref_func_t *confirm_kill;
+ percpu_ref_func_t *confirm_switch;
+ bool force_atomic:1;
struct rcu_head rcu;
};
int __must_check percpu_ref_init(struct percpu_ref *ref,
- percpu_ref_func_t *release);
-void percpu_ref_reinit(struct percpu_ref *ref);
+ percpu_ref_func_t *release, unsigned int flags,
+ gfp_t gfp);
void percpu_ref_exit(struct percpu_ref *ref);
+void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch);
+void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
-void __percpu_ref_kill_expedited(struct percpu_ref *ref);
+void percpu_ref_reinit(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
@@ -88,26 +119,24 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
return percpu_ref_kill_and_confirm(ref, NULL);
}
-#define PCPU_REF_DEAD 1
-
/*
* Internal helper. Don't use outside percpu-refcount proper. The
* function doesn't return the pointer and let the caller test it for NULL
* because doing so forces the compiler to generate two conditional
- * branches as it can't assume that @ref->pcpu_count is not NULL.
+ * branches as it can't assume that @ref->percpu_count is not NULL.
*/
-static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
- unsigned __percpu **pcpu_countp)
+static inline bool __ref_is_percpu(struct percpu_ref *ref,
+ unsigned long __percpu **percpu_countp)
{
- unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
+ unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
/* paired with smp_store_release() in percpu_ref_reinit() */
smp_read_barrier_depends();
- if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
+ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
return false;
- *pcpu_countp = (unsigned __percpu *)pcpu_ptr;
+ *percpu_countp = (unsigned long __percpu *)percpu_ptr;
return true;
}
@@ -115,18 +144,20 @@ static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
* percpu_ref_get - increment a percpu refcount
* @ref: percpu_ref to get
*
- * Analagous to atomic_inc().
- */
+ * Analagous to atomic_long_inc().
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
static inline void percpu_ref_get(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count))
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count))
+ this_cpu_inc(*percpu_count);
else
- atomic_inc(&ref->count);
+ atomic_long_inc(&ref->count);
rcu_read_unlock_sched();
}
@@ -138,20 +169,20 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
* Increment a percpu refcount unless its count already reached zero.
* Returns %true on success; %false on failure.
*
- * The caller is responsible for ensuring that @ref stays accessible.
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
- int ret = false;
+ unsigned long __percpu *percpu_count;
+ int ret;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count)) {
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_inc(*percpu_count);
ret = true;
} else {
- ret = atomic_inc_not_zero(&ref->count);
+ ret = atomic_long_inc_not_zero(&ref->count);
}
rcu_read_unlock_sched();
@@ -166,23 +197,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
* Increment a percpu refcount unless it has already been killed. Returns
* %true on success; %false on failure.
*
- * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget
- * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be
- * used. After the confirm_kill callback is invoked, it's guaranteed that
- * no new reference will be given out by percpu_ref_tryget().
+ * Completion of percpu_ref_kill() in itself doesn't guarantee that this
+ * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
+ * should be used. After the confirm_kill callback is invoked, it's
+ * guaranteed that no new reference will be given out by
+ * percpu_ref_tryget_live().
*
- * The caller is responsible for ensuring that @ref stays accessible.
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
int ret = false;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count)) {
- this_cpu_inc(*pcpu_count);
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_inc(*percpu_count);
ret = true;
+ } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
+ ret = atomic_long_inc_not_zero(&ref->count);
}
rcu_read_unlock_sched();
@@ -196,16 +230,18 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
*
* Decrement the refcount, and if 0, call the release function (which was passed
* to percpu_ref_init())
+ *
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline void percpu_ref_put(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
rcu_read_lock_sched();
- if (__pcpu_ref_alive(ref, &pcpu_count))
- this_cpu_dec(*pcpu_count);
- else if (unlikely(atomic_dec_and_test(&ref->count)))
+ if (__ref_is_percpu(ref, &percpu_count))
+ this_cpu_dec(*percpu_count);
+ else if (unlikely(atomic_long_dec_and_test(&ref->count)))
ref->release(ref);
rcu_read_unlock_sched();
@@ -216,14 +252,16 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
* @ref: percpu_ref to test
*
* Returns %true if @ref reached zero.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
{
- unsigned __percpu *pcpu_count;
+ unsigned long __percpu *percpu_count;
- if (__pcpu_ref_alive(ref, &pcpu_count))
+ if (__ref_is_percpu(ref, &percpu_count))
return false;
- return !atomic_read(&ref->count);
+ return !atomic_long_read(&ref->count);
}
#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 6f61b61b7996..a3aa63e47637 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -48,9 +48,9 @@
* intelligent way to determine this would be nice.
*/
#if BITS_PER_LONG > 32
-#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#define PERCPU_DYNAMIC_RESERVE (28 << 10)
#else
-#define PERCPU_DYNAMIC_RESERVE (12 << 10)
+#define PERCPU_DYNAMIC_RESERVE (20 << 10)
#endif
extern void *pcpu_base_addr;
@@ -122,11 +122,16 @@ extern void __init setup_per_cpu_areas(void);
#endif
extern void __init percpu_init_late(void);
+extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
-#define alloc_percpu(type) \
- (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
+#define alloc_percpu_gfp(type, gfp) \
+ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
+ __alignof__(type), gfp)
+#define alloc_percpu(type) \
+ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
+ __alignof__(type))
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index d5dd4657c8d6..50e50095c8d1 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -12,6 +12,7 @@
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
+#include <linux/gfp.h>
#ifdef CONFIG_SMP
@@ -26,14 +27,14 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key);
-#define percpu_counter_init(fbc, value) \
+#define percpu_counter_init(fbc, value, gfp) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, &__key); \
+ __percpu_counter_init(fbc, value, gfp, &__key); \
})
void percpu_counter_destroy(struct percpu_counter *fbc);
@@ -89,7 +90,8 @@ struct percpu_counter {
s64 count;
};
-static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
+static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp)
{
fbc->count = amount;
return 0;
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
new file mode 100644
index 000000000000..28702c849af1
--- /dev/null
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef GPIO_DW_APB_H
+#define GPIO_DW_APB_H
+
+struct dwapb_port_property {
+ struct device_node *node;
+ const char *name;
+ unsigned int idx;
+ unsigned int ngpio;
+ unsigned int gpio_base;
+ unsigned int irq;
+ bool irq_shared;
+};
+
+struct dwapb_platform_data {
+ struct dwapb_port_property *properties;
+ unsigned int nports;
+};
+
+#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 72c0fe098a27..383fd68aaee1 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -619,6 +619,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
+ void (*detach)(struct device *dev, bool power_off);
};
/*
@@ -679,12 +680,16 @@ struct dev_pm_domain {
extern void device_pm_lock(void);
extern void dpm_resume_start(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
+extern void dpm_resume_noirq(pm_message_t state);
+extern void dpm_resume_early(pm_message_t state);
extern void dpm_resume(pm_message_t state);
extern void dpm_complete(pm_message_t state);
extern void device_pm_unlock(void);
extern int dpm_suspend_end(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
+extern int dpm_suspend_noirq(pm_message_t state);
+extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index ebc4c76ffb73..73e938b7e937 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -35,18 +35,10 @@ struct gpd_dev_ops {
int (*stop)(struct device *dev);
int (*save_state)(struct device *dev);
int (*restore_state)(struct device *dev);
- int (*suspend)(struct device *dev);
- int (*suspend_late)(struct device *dev);
- int (*resume_early)(struct device *dev);
- int (*resume)(struct device *dev);
- int (*freeze)(struct device *dev);
- int (*freeze_late)(struct device *dev);
- int (*thaw_early)(struct device *dev);
- int (*thaw)(struct device *dev);
bool (*active_wakeup)(struct device *dev);
};
-struct gpd_cpu_data {
+struct gpd_cpuidle_data {
unsigned int saved_exit_latency;
struct cpuidle_state *idle_state;
};
@@ -71,7 +63,6 @@ struct generic_pm_domain {
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
bool suspend_power_off; /* Power status before system suspend */
- bool dev_irq_safe; /* Device callbacks are IRQ-safe */
int (*power_off)(struct generic_pm_domain *domain);
s64 power_off_latency_ns;
int (*power_on)(struct generic_pm_domain *domain);
@@ -80,8 +71,9 @@ struct generic_pm_domain {
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
bool max_off_time_changed;
bool cached_power_down_ok;
- struct device_node *of_node; /* Node in device tree */
- struct gpd_cpu_data *cpu_data;
+ struct gpd_cpuidle_data *cpuidle_data;
+ void (*attach_dev)(struct device *dev);
+ void (*detach_dev)(struct device *dev);
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -108,7 +100,6 @@ struct gpd_timing_data {
struct generic_pm_domain_data {
struct pm_domain_data base;
- struct gpd_dev_ops ops;
struct gpd_timing_data td;
struct notifier_block nb;
struct mutex lock;
@@ -127,17 +118,11 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
return to_gpd_data(dev->power.subsys_data->domain_data);
}
-extern struct dev_power_governor simple_qos_governor;
-
extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
struct device *dev,
struct gpd_timing_data *td);
-extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
- struct device *dev,
- struct gpd_timing_data *td);
-
extern int __pm_genpd_name_add_device(const char *domain_name,
struct device *dev,
struct gpd_timing_data *td);
@@ -151,10 +136,6 @@ extern int pm_genpd_add_subdomain_names(const char *master_name,
const char *subdomain_name);
extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *target);
-extern int pm_genpd_add_callbacks(struct device *dev,
- struct gpd_dev_ops *ops,
- struct gpd_timing_data *td);
-extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
@@ -165,8 +146,7 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd,
extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
extern int pm_genpd_name_poweron(const char *domain_name);
-extern bool default_stop_ok(struct device *dev);
-
+extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
#else
@@ -184,12 +164,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
- struct device *dev,
- struct gpd_timing_data *td)
-{
- return -ENOSYS;
-}
static inline int __pm_genpd_name_add_device(const char *domain_name,
struct device *dev,
struct gpd_timing_data *td)
@@ -217,16 +191,6 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
{
return -ENOSYS;
}
-static inline int pm_genpd_add_callbacks(struct device *dev,
- struct gpd_dev_ops *ops,
- struct gpd_timing_data *td)
-{
- return -ENOSYS;
-}
-static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
-{
- return -ENOSYS;
-}
static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
{
return -ENOSYS;
@@ -255,10 +219,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
{
return -ENOSYS;
}
-static inline bool default_stop_ok(struct device *dev)
-{
- return false;
-}
#define simple_qos_governor NULL
#define pm_domain_always_on_gov NULL
#endif
@@ -269,45 +229,87 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
return __pm_genpd_add_device(genpd, dev, NULL);
}
-static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
- struct device *dev)
-{
- return __pm_genpd_of_add_device(genpd_node, dev, NULL);
-}
-
static inline int pm_genpd_name_add_device(const char *domain_name,
struct device *dev)
{
return __pm_genpd_name_add_device(domain_name, dev, NULL);
}
-static inline int pm_genpd_remove_callbacks(struct device *dev)
-{
- return __pm_genpd_remove_callbacks(dev, true);
-}
-
#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME
-extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
extern void pm_genpd_poweroff_unused(void);
#else
-static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
static inline void pm_genpd_poweroff_unused(void) {}
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
-extern void pm_genpd_syscore_switch(struct device *dev, bool suspend);
+extern void pm_genpd_syscore_poweroff(struct device *dev);
+extern void pm_genpd_syscore_poweron(struct device *dev);
#else
-static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {}
+static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
+static inline void pm_genpd_syscore_poweron(struct device *dev) {}
#endif
-static inline void pm_genpd_syscore_poweroff(struct device *dev)
+/* OF PM domain providers */
+struct of_device_id;
+
+struct genpd_onecell_data {
+ struct generic_pm_domain **domains;
+ unsigned int num_domains;
+};
+
+typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
+ void *data);
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+ void *data);
+void of_genpd_del_provider(struct device_node *np);
+
+struct generic_pm_domain *__of_genpd_xlate_simple(
+ struct of_phandle_args *genpdspec,
+ void *data);
+struct generic_pm_domain *__of_genpd_xlate_onecell(
+ struct of_phandle_args *genpdspec,
+ void *data);
+
+int genpd_dev_pm_attach(struct device *dev);
+#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
+static inline int __of_genpd_add_provider(struct device_node *np,
+ genpd_xlate_t xlate, void *data)
+{
+ return 0;
+}
+static inline void of_genpd_del_provider(struct device_node *np) {}
+
+#define __of_genpd_xlate_simple NULL
+#define __of_genpd_xlate_onecell NULL
+
+static inline int genpd_dev_pm_attach(struct device *dev)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
+
+static inline int of_genpd_add_provider_simple(struct device_node *np,
+ struct generic_pm_domain *genpd)
+{
+ return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
+}
+static inline int of_genpd_add_provider_onecell(struct device_node *np,
+ struct genpd_onecell_data *data)
{
- pm_genpd_syscore_switch(dev, true);
+ return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
}
-static inline void pm_genpd_syscore_poweron(struct device *dev)
+#ifdef CONFIG_PM
+extern int dev_pm_domain_attach(struct device *dev, bool power_on);
+extern void dev_pm_domain_detach(struct device *dev, bool power_off);
+#else
+static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
- pm_genpd_syscore_switch(dev, false);
+ return -ENODEV;
}
+static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+#endif
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 26a8a4ed9b07..00e8e8fa7358 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -12,6 +12,7 @@
#include <linux/percpu_counter.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/gfp.h>
struct prop_global {
/*
@@ -40,7 +41,7 @@ struct prop_descriptor {
struct mutex mutex; /* serialize the prop_global switch */
};
-int prop_descriptor_init(struct prop_descriptor *pd, int shift);
+int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
void prop_change_shift(struct prop_descriptor *pd, int new_shift);
/*
@@ -61,7 +62,7 @@ struct prop_local_percpu {
raw_spinlock_t lock; /* protect the snapshot state */
};
-int prop_local_init_percpu(struct prop_local_percpu *pl);
+int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
void prop_local_destroy_percpu(struct prop_local_percpu *pl);
void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 48bf152761c7..67fc8fcdc4b0 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -38,6 +38,9 @@ extern int reboot_force;
extern int register_reboot_notifier(struct notifier_block *);
extern int unregister_reboot_notifier(struct notifier_block *);
+extern int register_restart_handler(struct notifier_block *);
+extern int unregister_restart_handler(struct notifier_block *);
+extern void do_kernel_restart(char *cmd);
/*
* Architecture-specific implementations of sys_reboot commands.
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index be574506e6a9..c0c2bce6b0b7 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
static inline void anon_vma_merge(struct vm_area_struct *vma,
struct vm_area_struct *next)
{
- VM_BUG_ON(vma->anon_vma != next->anon_vma);
+ VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
unlink_anon_vmas(next);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9c6353d9e63a..5e63ba59258c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1935,11 +1935,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
+/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
+ * __GFP_FS is also cleared as it implies __GFP_IO.
+ */
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
- flags &= ~__GFP_IO;
+ flags &= ~(__GFP_IO | __GFP_FS);
return flags;
}
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 005bf3e38db5..f0f8bad54be9 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -5,12 +5,4 @@
extern struct screen_info screen_info;
-#define ORIG_X (screen_info.orig_x)
-#define ORIG_Y (screen_info.orig_y)
-#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
-#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
-#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
-#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
-#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
-#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index 623f90e5f38d..b10e7af95d3b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1559,7 +1559,7 @@ struct security_operations {
int (*file_lock) (struct file *file, unsigned int cmd);
int (*file_fcntl) (struct file *file, unsigned int cmd,
unsigned long arg);
- int (*file_set_fowner) (struct file *file);
+ void (*file_set_fowner) (struct file *file);
int (*file_send_sigiotask) (struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive) (struct file *file);
@@ -1834,7 +1834,7 @@ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot);
int security_file_lock(struct file *file, unsigned int cmd);
int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
-int security_file_set_fowner(struct file *file);
+void security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
@@ -2312,9 +2312,9 @@ static inline int security_file_fcntl(struct file *file, unsigned int cmd,
return 0;
}
-static inline int security_file_set_fowner(struct file *file)
+static inline void security_file_set_fowner(struct file *file)
{
- return 0;
+ return;
}
static inline int security_file_send_sigiotask(struct task_struct *tsk,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1d9abb7d22a0..c265bec6a57d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,31 +158,6 @@ size_t ksize(const void *);
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
-#ifdef CONFIG_SLOB
-/*
- * Common fields provided in kmem_cache by all slab allocators
- * This struct is either used directly by the allocator (SLOB)
- * or the allocator must include definitions for all fields
- * provided in kmem_cache_common in their definition of kmem_cache.
- *
- * Once we can do anonymous structs (C11 standard) we could put a
- * anonymous struct definition in these allocators so that the
- * separate allocations in the kmem_cache structure of SLAB and
- * SLUB is no longer needed.
- */
-struct kmem_cache {
- unsigned int object_size;/* The original size of the object */
- unsigned int size; /* The aligned/padded/added on size */
- unsigned int align; /* Alignment as calculated */
- unsigned long flags; /* Active flags on the slab */
- const char *name; /* Slab name for sysfs */
- int refcount; /* Use counter */
- void (*ctor)(void *); /* Called on object slot creation */
- struct list_head list; /* List of all slab caches on the system */
-};
-
-#endif /* CONFIG_SLOB */
-
/*
* Kmalloc array related definitions
*/
@@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
}
#endif /* CONFIG_TRACING */
-#ifdef CONFIG_SLAB
-#include <linux/slab_def.h>
-#endif
-
-#ifdef CONFIG_SLUB
-#include <linux/slub_def.h>
-#endif
-
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
#ifdef CONFIG_TRACING
@@ -582,37 +549,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* allocator where we care about the real place the memory allocation
* request comes from.
*/
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
- (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
- (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
#define kmalloc_track_caller(size, flags) \
__kmalloc_track_caller(size, flags, _RET_IP_)
-#else
-#define kmalloc_track_caller(size, flags) \
- __kmalloc(size, flags)
-#endif /* DEBUG_SLAB */
#ifdef CONFIG_NUMA
-/*
- * kmalloc_node_track_caller is a special version of kmalloc_node that
- * records the calling function of the routine calling it for slab leak
- * tracking instead of just the calling function (confusing, eh?).
- * It's useful when the call to kmalloc_node comes from a widely-used
- * standard allocator where we care about the real place the memory
- * allocation request comes from.
- */
-#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
- (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
- (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
#define kmalloc_node_track_caller(size, flags, node) \
__kmalloc_node_track_caller(size, flags, node, \
_RET_IP_)
-#else
-#define kmalloc_node_track_caller(size, flags, node) \
- __kmalloc_node(size, flags, node)
-#endif
#else /* CONFIG_NUMA */
@@ -650,14 +595,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
-/*
- * Determine the size of a slab object
- */
-static inline unsigned int kmem_cache_size(struct kmem_cache *s)
-{
- return s->object_size;
-}
-
+unsigned int kmem_cache_size(struct kmem_cache *s);
void __init kmem_cache_init_late(void);
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8235dfbb3b05..b869d1662ba3 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -8,6 +8,8 @@
*/
struct kmem_cache {
+ struct array_cache __percpu *cpu_cache;
+
/* 1) Cache tunables. Protected by slab_mutex */
unsigned int batchcount;
unsigned int limit;
@@ -71,23 +73,7 @@ struct kmem_cache {
struct memcg_cache_params *memcg_params;
#endif
-/* 6) per-cpu/per-node data, touched during every alloc/free */
- /*
- * We put array[] at the end of kmem_cache, because we want to size
- * this array to nr_cpu_ids slots instead of NR_CPUS
- * (see kmem_cache_init())
- * We still use [NR_CPUS] and not [1] or [0] because cache_cache
- * is statically defined, so we reserve the max number of cpus.
- *
- * We also need to guarantee that the list is able to accomodate a
- * pointer for each node since "nodelists" uses the remainder of
- * available pointers.
- */
- struct kmem_cache_node **node;
- struct array_cache *array[NR_CPUS + MAX_NUMNODES];
- /*
- * Do not add fields after array[]
- */
+ struct kmem_cache_node *node[MAX_NUMNODES];
};
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
index 2d676d5aaa89..aa07d7b32568 100644
--- a/include/linux/spi/mcp23s08.h
+++ b/include/linux/spi/mcp23s08.h
@@ -22,4 +22,22 @@ struct mcp23s08_platform_data {
* base to base+15 (or base+31 for s17 variant).
*/
unsigned base;
+ /* Marks the device as a interrupt controller.
+ * NOTE: The interrupt functionality is only supported for i2c
+ * versions of the chips. The spi chips can also do the interrupts,
+ * but this is not supported by the linux driver yet.
+ */
+ bool irq_controller;
+
+ /* Sets the mirror flag in the IOCON register. Devices
+ * with two interrupt outputs (these are the devices ending with 17 and
+ * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and
+ * IO 8-15 are bank 2. These chips have two different interrupt outputs:
+ * One for bank 1 and another for bank 2. If irq-mirror is set, both
+ * interrupts are generated regardless of the bank that an input change
+ * occurred on. If it is not set, the interrupt are only generated for
+ * the bank they belong to.
+ * On devices with only one interrupt output this property is useless.
+ */
+ bool mirror;
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 519064e0c943..3388c1b6f7d8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,6 +189,8 @@ struct platform_suspend_ops {
struct platform_freeze_ops {
int (*begin)(void);
+ int (*prepare)(void);
+ void (*restore)(void);
void (*end)(void);
};
@@ -371,6 +373,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
extern bool events_check_enabled;
extern bool pm_wakeup_pending(void);
+extern void pm_system_wakeup(void);
+extern void pm_wakeup_clear(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
@@ -418,6 +422,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
static inline bool pm_wakeup_pending(void) { return false; }
+static inline void pm_system_wakeup(void) {}
+static inline void pm_wakeup_clear(void) {}
static inline void lock_system_sleep(void) {}
static inline void unlock_system_sleep(void) {}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1b72060f093a..37a585beef5c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -327,8 +327,10 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
-extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
- gfp_t gfp_mask, bool noswap);
+extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
+ unsigned long nr_pages,
+ gfp_t gfp_mask,
+ bool may_swap);
extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
struct zone *zone,
@@ -354,22 +356,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
extern int page_evictable(struct page *page);
extern void check_move_unevictable_pages(struct page **, int nr_pages);
-extern unsigned long scan_unevictable_pages;
-extern int scan_unevictable_handler(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
-#ifdef CONFIG_NUMA
-extern int scan_unevictable_register_node(struct node *node);
-extern void scan_unevictable_unregister_node(struct node *node);
-#else
-static inline int scan_unevictable_register_node(struct node *node)
-{
- return 0;
-}
-static inline void scan_unevictable_unregister_node(struct node *node)
-{
-}
-#endif
-
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_MEMCG
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9a82c7dc3fdd..595ee86f5e0d 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -181,14 +181,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
return cpumask_test_cpu(cpu, tick_nohz_full_mask);
}
-extern void tick_nohz_init(void);
extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_cpu(int cpu);
extern void tick_nohz_full_kick_all(void);
extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
-static inline void tick_nohz_init(void) { }
static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void __tick_nohz_full_check(void) { }
diff --git a/include/linux/topology.h b/include/linux/topology.h
index dda6ee521e74..909b6e43b694 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -119,11 +119,20 @@ static inline int numa_node_id(void)
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
*/
DECLARE_PER_CPU(int, _numa_mem_);
+extern int _node_numa_mem_[MAX_NUMNODES];
#ifndef set_numa_mem
static inline void set_numa_mem(int node)
{
this_cpu_write(_numa_mem_, node);
+ _node_numa_mem_[numa_node_id()] = node;
+}
+#endif
+
+#ifndef node_to_mem_node
+static inline int node_to_mem_node(int node)
+{
+ return _node_numa_mem_[node];
}
#endif
@@ -146,6 +155,7 @@ static inline int cpu_to_mem(int cpu)
static inline void set_cpu_numa_mem(int cpu, int node)
{
per_cpu(_numa_mem_, cpu) = node;
+ _node_numa_mem_[cpu_to_node(cpu)] = node;
}
#endif
@@ -159,6 +169,13 @@ static inline int numa_mem_id(void)
}
#endif
+#ifndef node_to_mem_node
+static inline int node_to_mem_node(int node)
+{
+ return node;
+}
+#endif
+
#ifndef cpu_to_mem
static inline int cpu_to_mem(int cpu)
{
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index ced92345c963..730334cdf037 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -72,6 +72,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif
+#ifdef CONFIG_MEMORY_BALLOON
+ BALLOON_INFLATE,
+ BALLOON_DEFLATE,
+#ifdef CONFIG_BALLOON_COMPACTION
+ BALLOON_MIGRATE,
+#endif
+#endif
#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index e44d634e7fb7..05c214760977 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -46,6 +46,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
-u64 zs_get_total_size_bytes(struct zs_pool *pool);
+unsigned long zs_get_total_pages(struct zs_pool *pool);
#endif
diff --git a/include/media/davinci/dm644x_ccdc.h b/include/media/davinci/dm644x_ccdc.h
index 852e96c4bb46..984fb79031de 100644
--- a/include/media/davinci/dm644x_ccdc.h
+++ b/include/media/davinci/dm644x_ccdc.h
@@ -114,7 +114,7 @@ struct ccdc_fault_pixel {
/* Number of fault pixel */
unsigned short fp_num;
/* Address of fault pixel table */
- unsigned int fpc_table_addr;
+ unsigned long fpc_table_addr;
};
/* Structure for CCDC configuration parameters for raw capture mode passed
diff --git a/include/media/omap3isp.h b/include/media/omap3isp.h
index c9d06d9f7e6e..398279dd1922 100644
--- a/include/media/omap3isp.h
+++ b/include/media/omap3isp.h
@@ -57,6 +57,8 @@ enum {
* 0 - Active high, 1 - Active low
* @vs_pol: Vertical synchronization polarity
* 0 - Active high, 1 - Active low
+ * @fld_pol: Field signal polarity
+ * 0 - Positive, 1 - Negative
* @data_pol: Data polarity
* 0 - Normal, 1 - One's complement
*/
@@ -65,6 +67,7 @@ struct isp_parallel_platform_data {
unsigned int clk_pol:1;
unsigned int hs_pol:1;
unsigned int vs_pol:1;
+ unsigned int fld_pol:1;
unsigned int data_pol:1;
};
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 80f951890b4c..e7a1514075ec 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -135,6 +135,7 @@ void rc_map_init(void);
#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
+#define RC_MAP_DVBSKY "rc-dvbsky"
#define RC_MAP_EMPTY "rc-empty"
#define RC_MAP_EM_TERRATEC "rc-em-terratec"
#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 2fefcf491aa8..6ef2d01197da 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -356,8 +356,8 @@ struct v4l2_fh;
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
* structure type, so sizeof(struct vb2_buffer) will is used
- * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAGS_TIMESTAMP_* and
- * V4L2_BUF_FLAGS_TSTAMP_SRC_*
+ * @timestamp_flags: Timestamp flags; V4L2_BUF_FLAG_TIMESTAMP_* and
+ * V4L2_BUF_FLAG_TSTAMP_SRC_*
* @gfp_flags: additional gfp flags used when allocating the buffers.
* Typically this is 0, but it may be e.g. GFP_DMA or __GFP_DMA32
* to force the buffer allocation to a specific memory zone.
@@ -366,6 +366,7 @@ struct v4l2_fh;
* cannot be started unless at least this number of buffers
* have been queued into the driver.
*
+ * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @bufs: videobuf buffer structures
* @num_buffers: number of allocated/used buffers
@@ -402,6 +403,7 @@ struct vb2_queue {
u32 min_buffers_needed;
/* private: internal use only */
+ struct mutex mmap_lock;
enum v4l2_memory memory;
struct vb2_buffer *bufs[VIDEO_MAX_FRAME];
unsigned int num_buffers;
@@ -592,6 +594,15 @@ vb2_plane_size(struct vb2_buffer *vb, unsigned int plane_no)
return 0;
}
+/**
+ * vb2_start_streaming_called() - return streaming status of driver
+ * @q: videobuf queue
+ */
+static inline bool vb2_start_streaming_called(struct vb2_queue *q)
+{
+ return q->start_streaming_called;
+}
+
/*
* The following functions are not part of the vb2 core API, but are simple
* helper functions that you can use in your struct v4l2_file_operations,
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
new file mode 100644
index 000000000000..975cc7861f18
--- /dev/null
+++ b/include/misc/cxl.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _MISC_CXL_H
+#define _MISC_CXL_H
+
+#ifdef CONFIG_CXL_BASE
+
+#define CXL_IRQ_RANGES 4
+
+struct cxl_irq_ranges {
+ irq_hw_number_t offset[CXL_IRQ_RANGES];
+ irq_hw_number_t range[CXL_IRQ_RANGES];
+};
+
+extern atomic_t cxl_use_count;
+
+static inline bool cxl_ctx_in_use(void)
+{
+ return (atomic_read(&cxl_use_count) != 0);
+}
+
+static inline void cxl_ctx_get(void)
+{
+ atomic_inc(&cxl_use_count);
+}
+
+static inline void cxl_ctx_put(void)
+{
+ atomic_dec(&cxl_use_count);
+}
+
+void cxl_slbia(struct mm_struct *mm);
+
+#else /* CONFIG_CXL_BASE */
+
+static inline bool cxl_ctx_in_use(void) { return false; }
+static inline void cxl_slbia(struct mm_struct *mm) {}
+
+#endif /* CONFIG_CXL_BASE */
+
+#endif
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 2f26dfb8450e..1f99a1de0e4f 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -63,7 +63,7 @@ static inline void dst_entries_add(struct dst_ops *dst, int val)
static inline int dst_entries_init(struct dst_ops *dst)
{
- return percpu_counter_init(&dst->pcpuc_entries, 0);
+ return percpu_counter_init(&dst->pcpuc_entries, 0, GFP_KERNEL);
}
static inline void dst_entries_destroy(struct dst_ops *dst)
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 65a8855e99fe..8d1765577acc 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -151,7 +151,7 @@ static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i)
static inline void init_frag_mem_limit(struct netns_frags *nf)
{
- percpu_counter_init(&nf->mem, 0);
+ percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
}
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 47da53c27ffa..79abb9c71772 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -8,6 +8,7 @@
#include <linux/tracepoint.h>
#include <linux/edac.h>
#include <linux/ktime.h>
+#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/cper.h>
@@ -173,25 +174,34 @@ TRACE_EVENT(mc_event,
* u8 severity - error severity 0:NONFATAL 1:FATAL 2:CORRECTED
*/
-#define aer_correctable_errors \
- {BIT(0), "Receiver Error"}, \
- {BIT(6), "Bad TLP"}, \
- {BIT(7), "Bad DLLP"}, \
- {BIT(8), "RELAY_NUM Rollover"}, \
- {BIT(12), "Replay Timer Timeout"}, \
- {BIT(13), "Advisory Non-Fatal"}
-
-#define aer_uncorrectable_errors \
- {BIT(4), "Data Link Protocol"}, \
- {BIT(12), "Poisoned TLP"}, \
- {BIT(13), "Flow Control Protocol"}, \
- {BIT(14), "Completion Timeout"}, \
- {BIT(15), "Completer Abort"}, \
- {BIT(16), "Unexpected Completion"}, \
- {BIT(17), "Receiver Overflow"}, \
- {BIT(18), "Malformed TLP"}, \
- {BIT(19), "ECRC"}, \
- {BIT(20), "Unsupported Request"}
+#define aer_correctable_errors \
+ {PCI_ERR_COR_RCVR, "Receiver Error"}, \
+ {PCI_ERR_COR_BAD_TLP, "Bad TLP"}, \
+ {PCI_ERR_COR_BAD_DLLP, "Bad DLLP"}, \
+ {PCI_ERR_COR_REP_ROLL, "RELAY_NUM Rollover"}, \
+ {PCI_ERR_COR_REP_TIMER, "Replay Timer Timeout"}, \
+ {PCI_ERR_COR_ADV_NFAT, "Advisory Non-Fatal Error"}, \
+ {PCI_ERR_COR_INTERNAL, "Corrected Internal Error"}, \
+ {PCI_ERR_COR_LOG_OVER, "Header Log Overflow"}
+
+#define aer_uncorrectable_errors \
+ {PCI_ERR_UNC_UND, "Undefined"}, \
+ {PCI_ERR_UNC_DLP, "Data Link Protocol Error"}, \
+ {PCI_ERR_UNC_SURPDN, "Surprise Down Error"}, \
+ {PCI_ERR_UNC_POISON_TLP,"Poisoned TLP"}, \
+ {PCI_ERR_UNC_FCP, "Flow Control Protocol Error"}, \
+ {PCI_ERR_UNC_COMP_TIME, "Completion Timeout"}, \
+ {PCI_ERR_UNC_COMP_ABORT,"Completer Abort"}, \
+ {PCI_ERR_UNC_UNX_COMP, "Unexpected Completion"}, \
+ {PCI_ERR_UNC_RX_OVER, "Receiver Overflow"}, \
+ {PCI_ERR_UNC_MALF_TLP, "Malformed TLP"}, \
+ {PCI_ERR_UNC_ECRC, "ECRC Error"}, \
+ {PCI_ERR_UNC_UNSUP, "Unsupported Request Error"}, \
+ {PCI_ERR_UNC_ACSV, "ACS Violation"}, \
+ {PCI_ERR_UNC_INTN, "Uncorrectable Internal Error"},\
+ {PCI_ERR_UNC_MCBTLP, "MC Blocked TLP"}, \
+ {PCI_ERR_UNC_ATOMEG, "AtomicOp Egress Blocked"}, \
+ {PCI_ERR_UNC_TLPPRE, "TLP Prefix Blocked Error"}
TRACE_EVENT(aer_event,
TP_PROTO(const char *dev_name,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 6f3e10ca0e32..e862497f7556 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -183,6 +183,7 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B)
#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8)
#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE)
+#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE)
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE
@@ -365,6 +366,7 @@ struct snd_pcm_runtime {
struct snd_pcm_group { /* keep linked substreams */
spinlock_t lock;
+ struct mutex mutex;
struct list_head substreams;
int count;
};
@@ -460,6 +462,7 @@ struct snd_pcm {
void (*private_free) (struct snd_pcm *pcm);
struct device *dev; /* actual hw device this belongs to */
bool internal; /* pcm is for internal use only */
+ bool nonatomic; /* whole PCM operations are in non-atomic context */
#if defined(CONFIG_SND_PCM_OSS) || defined(CONFIG_SND_PCM_OSS_MODULE)
struct snd_pcm_oss oss;
#endif
@@ -492,8 +495,6 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree);
* Native I/O
*/
-extern rwlock_t snd_pcm_link_rwlock;
-
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info);
int snd_pcm_info_user(struct snd_pcm_substream *substream,
struct snd_pcm_info __user *info);
@@ -537,41 +538,18 @@ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream)
return substream->group != &substream->self_group;
}
-static inline void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
-{
- read_lock(&snd_pcm_link_rwlock);
- spin_lock(&substream->self_group.lock);
-}
-
-static inline void snd_pcm_stream_unlock(struct snd_pcm_substream *substream)
-{
- spin_unlock(&substream->self_group.lock);
- read_unlock(&snd_pcm_link_rwlock);
-}
-
-static inline void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
-{
- read_lock_irq(&snd_pcm_link_rwlock);
- spin_lock(&substream->self_group.lock);
-}
-
-static inline void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream)
-{
- spin_unlock(&substream->self_group.lock);
- read_unlock_irq(&snd_pcm_link_rwlock);
-}
-
-#define snd_pcm_stream_lock_irqsave(substream, flags) \
-do { \
- read_lock_irqsave(&snd_pcm_link_rwlock, (flags)); \
- spin_lock(&substream->self_group.lock); \
-} while (0)
-
-#define snd_pcm_stream_unlock_irqrestore(substream, flags) \
-do { \
- spin_unlock(&substream->self_group.lock); \
- read_unlock_irqrestore(&snd_pcm_link_rwlock, (flags)); \
-} while (0)
+void snd_pcm_stream_lock(struct snd_pcm_substream *substream);
+void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
+void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
+void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
+unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+#define snd_pcm_stream_lock_irqsave(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _snd_pcm_stream_lock_irqsave(substream); \
+ } while (0)
+void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
+ unsigned long flags);
#define snd_pcm_group_for_each_entry(s, substream) \
list_for_each_entry(s, &substream->group->substreams, link_list)
diff --git a/include/sound/rt5645.h b/include/sound/rt5645.h
index 1de744c242f6..a5352712194b 100644
--- a/include/sound/rt5645.h
+++ b/include/sound/rt5645.h
@@ -20,6 +20,9 @@ struct rt5645_platform_data {
/* 0 = IN2N; 1 = GPIO5; 2 = GPIO11 */
unsigned int dmic2_data_pin;
/* 0 = IN2P; 1 = GPIO6; 2 = GPIO10; 3 = GPIO12 */
+
+ unsigned int hp_det_gpio;
+ bool gpio_hp_det_active_high;
};
#endif
diff --git a/include/sound/rt5677.h b/include/sound/rt5677.h
index 3da14313bcfc..082670e3a353 100644
--- a/include/sound/rt5677.h
+++ b/include/sound/rt5677.h
@@ -12,10 +12,21 @@
#ifndef __LINUX_SND_RT5677_H
#define __LINUX_SND_RT5677_H
+enum rt5677_dmic2_clk {
+ RT5677_DMIC_CLK1 = 0,
+ RT5677_DMIC_CLK2 = 1,
+};
+
+
struct rt5677_platform_data {
- /* IN1 IN2 can optionally be differential */
+ /* IN1/IN2/LOUT1/LOUT2/LOUT3 can optionally be differential */
bool in1_diff;
bool in2_diff;
+ bool lout1_diff;
+ bool lout2_diff;
+ bool lout3_diff;
+ /* DMIC2 clock source selection */
+ enum rt5677_dmic2_clk dmic2_clk_pin;
};
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index aac04ff84eea..3a4d7da67b8d 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -432,6 +432,7 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
const char *pin);
void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card);
+unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
/* Mostly internal - should not normally be used */
void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm);
@@ -587,13 +588,13 @@ struct snd_soc_dapm_context {
enum snd_soc_bias_level suspend_bias_level;
struct delayed_work delayed_work;
unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
-
+ /* Go to BIAS_OFF in suspend if the DAPM context is idle */
+ unsigned int suspend_bias_off:1;
void (*seq_notifier)(struct snd_soc_dapm_context *,
enum snd_soc_dapm_type, int);
struct device *dev; /* from parent - for debug */
struct snd_soc_component *component; /* parent component */
- struct snd_soc_codec *codec; /* parent codec */
struct snd_soc_card *card; /* parent card */
/* used during DAPM updates */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index c83a334dd00f..7ba7130037a0 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -690,6 +690,17 @@ struct snd_soc_compr_ops {
struct snd_soc_component_driver {
const char *name;
+ /* Default control and setup, added after probe() is run */
+ const struct snd_kcontrol_new *controls;
+ unsigned int num_controls;
+ const struct snd_soc_dapm_widget *dapm_widgets;
+ unsigned int num_dapm_widgets;
+ const struct snd_soc_dapm_route *dapm_routes;
+ unsigned int num_dapm_routes;
+
+ int (*probe)(struct snd_soc_component *);
+ void (*remove)(struct snd_soc_component *);
+
/* DT */
int (*of_xlate_dai_name)(struct snd_soc_component *component,
struct of_phandle_args *args,
@@ -697,6 +708,10 @@ struct snd_soc_component_driver {
void (*seq_notifier)(struct snd_soc_component *, enum snd_soc_dapm_type,
int subseq);
int (*stream_event)(struct snd_soc_component *, int event);
+
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
};
struct snd_soc_component {
@@ -710,6 +725,7 @@ struct snd_soc_component {
unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
unsigned int registered_as_component:1;
+ unsigned int probed:1;
struct list_head list;
@@ -728,9 +744,35 @@ struct snd_soc_component {
struct mutex io_mutex;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
+
+ /*
+ * DO NOT use any of the fields below in drivers, they are temporary and
+ * are going to be removed again soon. If you use them in driver code the
+ * driver will be marked as BROKEN when these fields are removed.
+ */
+
/* Don't use these, use snd_soc_component_get_dapm() */
struct snd_soc_dapm_context dapm;
struct snd_soc_dapm_context *dapm_ptr;
+
+ const struct snd_kcontrol_new *controls;
+ unsigned int num_controls;
+ const struct snd_soc_dapm_widget *dapm_widgets;
+ unsigned int num_dapm_widgets;
+ const struct snd_soc_dapm_route *dapm_routes;
+ unsigned int num_dapm_routes;
+ struct snd_soc_codec *codec;
+
+ int (*probe)(struct snd_soc_component *);
+ void (*remove)(struct snd_soc_component *);
+
+#ifdef CONFIG_DEBUG_FS
+ void (*init_debugfs)(struct snd_soc_component *component);
+ const char *debugfs_prefix;
+#endif
};
/* SoC Audio Codec device */
@@ -746,11 +788,9 @@ struct snd_soc_codec {
struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */
unsigned int cache_bypass:1; /* Suppress access to the cache */
unsigned int suspended:1; /* Codec is in suspend PM state */
- unsigned int probed:1; /* Codec has been probed */
unsigned int ac97_registered:1; /* Codec has been AC97 registered */
unsigned int ac97_created:1; /* Codec has been created by SoC */
unsigned int cache_init:1; /* codec cache has been initialized */
- u32 cache_only; /* Suppress writes to hardware */
u32 cache_sync; /* Cache needs to be synced to hardware */
/* codec IO */
@@ -766,7 +806,6 @@ struct snd_soc_codec {
struct snd_soc_dapm_context dapm;
#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_codec_root;
struct dentry *debugfs_reg;
#endif
};
@@ -808,15 +847,12 @@ struct snd_soc_codec_driver {
int (*set_bias_level)(struct snd_soc_codec *,
enum snd_soc_bias_level level);
bool idle_bias_off;
+ bool suspend_bias_off;
void (*seq_notifier)(struct snd_soc_dapm_context *,
enum snd_soc_dapm_type, int);
bool ignore_pmdown_time; /* Doesn't benefit from pmdown delay */
-
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
};
/* SoC platform interface */
@@ -832,14 +868,6 @@ struct snd_soc_platform_driver {
int (*pcm_new)(struct snd_soc_pcm_runtime *);
void (*pcm_free)(struct snd_pcm *);
- /* Default control and setup, added after probe() is run */
- const struct snd_kcontrol_new *controls;
- int num_controls;
- const struct snd_soc_dapm_widget *dapm_widgets;
- int num_dapm_widgets;
- const struct snd_soc_dapm_route *dapm_routes;
- int num_dapm_routes;
-
/*
* For platform caused delay reporting.
* Optional.
@@ -853,13 +881,6 @@ struct snd_soc_platform_driver {
/* platform stream compress ops */
const struct snd_compr_ops *compr_ops;
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
-
- /* platform IO - used for platform DAPM */
- unsigned int (*read)(struct snd_soc_platform *, unsigned int);
- int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
int (*bespoke_trigger)(struct snd_pcm_substream *, int);
};
@@ -874,15 +895,10 @@ struct snd_soc_platform {
const struct snd_soc_platform_driver *driver;
unsigned int suspended:1; /* platform is suspended */
- unsigned int probed:1;
struct list_head list;
struct snd_soc_component component;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs_platform_root;
-#endif
};
struct snd_soc_dai_link {
@@ -897,7 +913,7 @@ struct snd_soc_dai_link {
* only for codec to codec links, or systems using device tree.
*/
const char *cpu_name;
- const struct device_node *cpu_of_node;
+ struct device_node *cpu_of_node;
/*
* You MAY specify the DAI name of the CPU DAI. If this information is
* omitted, the CPU-side DAI is matched using .cpu_name/.cpu_of_node
@@ -909,7 +925,7 @@ struct snd_soc_dai_link {
* DT/OF node, but not both.
*/
const char *codec_name;
- const struct device_node *codec_of_node;
+ struct device_node *codec_of_node;
/* You MUST specify the DAI name within the codec */
const char *codec_dai_name;
@@ -922,7 +938,7 @@ struct snd_soc_dai_link {
* do not need a platform.
*/
const char *platform_name;
- const struct device_node *platform_of_node;
+ struct device_node *platform_of_node;
int be_id; /* optional ID for machine driver BE identification */
const struct snd_soc_pcm_stream *params;
@@ -994,7 +1010,7 @@ struct snd_soc_aux_dev {
const struct device_node *codec_of_node;
/* codec/machine specific init - e.g. add machine controls */
- int (*init)(struct snd_soc_dapm_context *dapm);
+ int (*init)(struct snd_soc_component *component);
};
/* SoC card */
@@ -1112,6 +1128,7 @@ struct snd_soc_pcm_runtime {
struct snd_soc_platform *platform;
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai;
+ struct snd_soc_component *component; /* Only valid for AUX dev rtds */
struct snd_soc_dai **codec_dais;
unsigned int num_codecs;
@@ -1260,9 +1277,6 @@ void snd_soc_component_async_complete(struct snd_soc_component *component);
int snd_soc_component_test_bits(struct snd_soc_component *component,
unsigned int reg, unsigned int mask, unsigned int value);
-int snd_soc_component_init_io(struct snd_soc_component *component,
- struct regmap *regmap);
-
/* device driver data */
static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
@@ -1276,26 +1290,37 @@ static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card)
return card->drvdata;
}
+static inline void snd_soc_component_set_drvdata(struct snd_soc_component *c,
+ void *data)
+{
+ dev_set_drvdata(c->dev, data);
+}
+
+static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c)
+{
+ return dev_get_drvdata(c->dev);
+}
+
static inline void snd_soc_codec_set_drvdata(struct snd_soc_codec *codec,
void *data)
{
- dev_set_drvdata(codec->dev, data);
+ snd_soc_component_set_drvdata(&codec->component, data);
}
static inline void *snd_soc_codec_get_drvdata(struct snd_soc_codec *codec)
{
- return dev_get_drvdata(codec->dev);
+ return snd_soc_component_get_drvdata(&codec->component);
}
static inline void snd_soc_platform_set_drvdata(struct snd_soc_platform *platform,
void *data)
{
- dev_set_drvdata(platform->dev, data);
+ snd_soc_component_set_drvdata(&platform->component, data);
}
static inline void *snd_soc_platform_get_drvdata(struct snd_soc_platform *platform)
{
- return dev_get_drvdata(platform->dev);
+ return snd_soc_component_get_drvdata(&platform->component);
}
static inline void snd_soc_pcm_set_drvdata(struct snd_soc_pcm_runtime *rtd,
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index f634f8f85db5..cae9c9d4ef22 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -80,8 +80,6 @@ struct vx_pipe {
unsigned int references; /* an output pipe may be used for monitoring and/or playback */
struct vx_pipe *monitoring_pipe; /* pointer to the monitoring pipe (capture pipe only)*/
-
- struct tasklet_struct start_tq;
};
struct vx_core;
@@ -165,9 +163,7 @@ struct vx_core {
struct snd_vx_hardware *hw;
struct snd_vx_ops *ops;
- spinlock_t lock;
- spinlock_t irq_lock;
- struct tasklet_struct tq;
+ struct mutex lock;
unsigned int chip_status;
unsigned int pcm_running;
@@ -223,6 +219,7 @@ void snd_vx_free_firmware(struct vx_core *chip);
* interrupt handler; exported for pcmcia
*/
irqreturn_t snd_vx_irq_handler(int irq, void *dev);
+irqreturn_t snd_vx_threaded_irq_handler(int irq, void *dev);
/*
* lowlevel functions
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index 0194a641e4e2..b04ee7e5a466 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -175,7 +175,7 @@ TRACE_EVENT(snd_soc_dapm_output_path,
__entry->path_sink = (long)path->sink;
),
- TP_printk("%c%s -> %s -> %s\n",
+ TP_printk("%c%s -> %s -> %s",
(int) __entry->path_sink &&
(int) __entry->path_connect ? '*' : ' ',
__get_str(wname), __get_str(pname), __get_str(psname))
@@ -204,7 +204,7 @@ TRACE_EVENT(snd_soc_dapm_input_path,
__entry->path_source = (long)path->source;
),
- TP_printk("%c%s <- %s <- %s\n",
+ TP_printk("%c%s <- %s <- %s",
(int) __entry->path_source &&
(int) __entry->path_connect ? '*' : ' ',
__get_str(wname), __get_str(pname), __get_str(psname))
@@ -226,7 +226,7 @@ TRACE_EVENT(snd_soc_dapm_connected,
__entry->stream = stream;
),
- TP_printk("%s: found %d paths\n",
+ TP_printk("%s: found %d paths",
__entry->stream ? "capture" : "playback", __entry->paths)
);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 4ee4e30d26d9..1faecea101f3 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -23,6 +23,7 @@ struct map_lookup;
struct extent_buffer;
struct btrfs_work;
struct __btrfs_workqueue;
+struct btrfs_qgroup_operation;
#define show_ref_type(type) \
__print_symbolic(type, \
@@ -157,12 +158,13 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
#define show_map_flags(flag) \
__print_flags(flag, "|", \
- { EXTENT_FLAG_PINNED, "PINNED" }, \
- { EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
- { EXTENT_FLAG_VACANCY, "VACANCY" }, \
- { EXTENT_FLAG_PREALLOC, "PREALLOC" }, \
- { EXTENT_FLAG_LOGGING, "LOGGING" }, \
- { EXTENT_FLAG_FILLING, "FILLING" })
+ { (1 << EXTENT_FLAG_PINNED), "PINNED" },\
+ { (1 << EXTENT_FLAG_COMPRESSED), "COMPRESSED" },\
+ { (1 << EXTENT_FLAG_VACANCY), "VACANCY" },\
+ { (1 << EXTENT_FLAG_PREALLOC), "PREALLOC" },\
+ { (1 << EXTENT_FLAG_LOGGING), "LOGGING" },\
+ { (1 << EXTENT_FLAG_FILLING), "FILLING" },\
+ { (1 << EXTENT_FLAG_FS_MAPPING), "FS_MAPPING" })
TRACE_EVENT_CONDITION(btrfs_get_extent,
@@ -996,6 +998,7 @@ DECLARE_EVENT_CLASS(btrfs__work,
__field( void *, func )
__field( void *, ordered_func )
__field( void *, ordered_free )
+ __field( void *, normal_work )
),
TP_fast_assign(
@@ -1004,11 +1007,13 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->func = work->func;
__entry->ordered_func = work->ordered_func;
__entry->ordered_free = work->ordered_free;
+ __entry->normal_work = &work->normal_work;
),
- TP_printk("work=%p, wq=%p, func=%p, ordered_func=%p, ordered_free=%p",
- __entry->work, __entry->wq, __entry->func,
- __entry->ordered_func, __entry->ordered_free)
+ TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
+ " ordered_free=%p",
+ __entry->work, __entry->normal_work, __entry->wq,
+ __entry->func, __entry->ordered_func, __entry->ordered_free)
);
/* For situiations that the work is freed */
@@ -1043,13 +1048,6 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
TP_ARGS(work)
);
-DEFINE_EVENT(btrfs__work, btrfs_normal_work_done,
-
- TP_PROTO(struct btrfs_work *work),
-
- TP_ARGS(work)
-);
-
DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
TP_PROTO(struct btrfs_work *work),
@@ -1119,6 +1117,61 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
TP_ARGS(wq)
);
+#define show_oper_type(type) \
+ __print_symbolic(type, \
+ { BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \
+ { BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \
+ { BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \
+ { BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" })
+
+DECLARE_EVENT_CLASS(btrfs_qgroup_oper,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper),
+
+ TP_STRUCT__entry(
+ __field( u64, ref_root )
+ __field( u64, bytenr )
+ __field( u64, num_bytes )
+ __field( u64, seq )
+ __field( int, type )
+ __field( u64, elem_seq )
+ ),
+
+ TP_fast_assign(
+ __entry->ref_root = oper->ref_root;
+ __entry->bytenr = oper->bytenr,
+ __entry->num_bytes = oper->num_bytes;
+ __entry->seq = oper->seq;
+ __entry->type = oper->type;
+ __entry->elem_seq = oper->elem.seq;
+ ),
+
+ TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, "
+ "seq = %llu, elem.seq = %llu, type = %s",
+ (unsigned long long)__entry->ref_root,
+ (unsigned long long)__entry->bytenr,
+ (unsigned long long)__entry->num_bytes,
+ (unsigned long long)__entry->seq,
+ (unsigned long long)__entry->elem_seq,
+ show_oper_type(__entry->type))
+);
+
+DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper)
+);
+
+DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref,
+
+ TP_PROTO(struct btrfs_qgroup_operation *oper),
+
+ TP_ARGS(oper)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 59d11c22f076..a0d008070962 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -53,15 +53,15 @@ DECLARE_EVENT_CLASS(filelock_lease,
),
TP_fast_assign(
- __entry->fl = fl;
+ __entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl->fl_next;
- __entry->fl_owner = fl->fl_owner;
- __entry->fl_flags = fl->fl_flags;
- __entry->fl_type = fl->fl_type;
- __entry->fl_break_time = fl->fl_break_time;
- __entry->fl_downgrade_time = fl->fl_downgrade_time;
+ __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_owner = fl ? fl->fl_owner : NULL;
+ __entry->fl_flags = fl ? fl->fl_flags : 0;
+ __entry->fl_type = fl ? fl->fl_type : 0;
+ __entry->fl_break_time = fl ? fl->fl_break_time : 0;
+ __entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
diff --git a/include/uapi/Kbuild b/include/uapi/Kbuild
index 81d2106287fe..245aa6e05e6a 100644
--- a/include/uapi/Kbuild
+++ b/include/uapi/Kbuild
@@ -12,3 +12,4 @@ header-y += video/
header-y += drm/
header-y += xen/
header-y += scsi/
+header-y += misc/
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 70e150ebc6c9..3cc8e1c2b996 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -355,6 +355,7 @@ header-y += serio.h
header-y += shm.h
header-y += signal.h
header-y += signalfd.h
+header-y += smiapp.h
header-y += snmp.h
header-y += sock_diag.h
header-y += socket.h
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index 5116a0e48172..2f96d233c980 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -31,6 +31,7 @@
#define KPF_KSM 21
#define KPF_THP 22
+#define KPF_BALLOON 23
#endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 30db069bce62..4a1d0cc38ff2 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -552,6 +552,7 @@
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
#define PCI_EXP_RTSTA 32 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
@@ -630,7 +631,7 @@
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
-#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
+#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 58afc04c107e..513df75d0fc9 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -1,6 +1,8 @@
#ifndef _LINUX_PRCTL_H
#define _LINUX_PRCTL_H
+#include <linux/types.h>
+
/* Values to pass as first argument to prctl() */
#define PR_SET_PDEATHSIG 1 /* Second arg is a signal */
@@ -119,6 +121,31 @@
# define PR_SET_MM_ENV_END 11
# define PR_SET_MM_AUXV 12
# define PR_SET_MM_EXE_FILE 13
+# define PR_SET_MM_MAP 14
+# define PR_SET_MM_MAP_SIZE 15
+
+/*
+ * This structure provides new memory descriptor
+ * map which mostly modifies /proc/pid/stat[m]
+ * output for a task. This mostly done in a
+ * sake of checkpoint/restore functionality.
+ */
+struct prctl_mm_map {
+ __u64 start_code; /* code section bounds */
+ __u64 end_code;
+ __u64 start_data; /* data section bounds */
+ __u64 end_data;
+ __u64 start_brk; /* heap for brk() syscall */
+ __u64 brk;
+ __u64 start_stack; /* stack starts at */
+ __u64 arg_start; /* command line arguments bounds */
+ __u64 arg_end;
+ __u64 env_start; /* environment variables bounds */
+ __u64 env_end;
+ __u64 *auxv; /* auxiliary vector */
+ __u32 auxv_size; /* vector size */
+ __u32 exe_fd; /* /proc/$pid/exe link file */
+};
/*
* Set specific pid that is allowed to ptrace the current task.
diff --git a/include/uapi/linux/smiapp.h b/include/uapi/linux/smiapp.h
new file mode 100644
index 000000000000..53938f4412ee
--- /dev/null
+++ b/include/uapi/linux/smiapp.h
@@ -0,0 +1,29 @@
+/*
+ * include/uapi/linux/smiapp.h
+ *
+ * Generic driver for SMIA/SMIA++ compliant camera modules
+ *
+ * Copyright (C) 2014 Intel Corporation
+ * Contact: Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#ifndef __UAPI_LINUX_SMIAPP_H_
+#define __UAPI_LINUX_SMIAPP_H_
+
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_DISABLED 0
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_SOLID_COLOUR 1
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS 2
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_COLOUR_BARS_GREY 3
+#define V4L2_SMIAPP_TEST_PATTERN_MODE_PN9 4
+
+#endif /* __UAPI_LINUX_SMIAPP_H_ */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e946e43fb8d5..661f119a51b8 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -746,6 +746,8 @@ enum v4l2_auto_focus_range {
V4L2_AUTO_FOCUS_RANGE_INFINITY = 3,
};
+#define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32)
+#define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33)
/* FM Modulator class control IDs */
@@ -865,6 +867,10 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_VBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 1)
#define V4L2_CID_HBLANK (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 2)
#define V4L2_CID_ANALOGUE_GAIN (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 3)
+#define V4L2_CID_TEST_PATTERN_RED (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 4)
+#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
+#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
+#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
/* Image processing controls */
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 6c8f159e416e..6a0764c89fcb 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -21,17 +21,8 @@
#ifndef _V4L2_DV_TIMINGS_H
#define _V4L2_DV_TIMINGS_H
-#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6))
-/* Sadly gcc versions older than 4.6 have a bug in how they initialize
- anonymous unions where they require additional curly brackets.
- This violates the C1x standard. This workaround adds the curly brackets
- if needed. */
#define V4L2_INIT_BT_TIMINGS(_width, args...) \
{ .bt = { _width , ## args } }
-#else
-#define V4L2_INIT_BT_TIMINGS(_width, args...) \
- .bt = { _width , ## args }
-#endif
/* CEA-861-E timings (i.e. standard HDTV timings) */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 6612974c64bf..29715d27548f 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -33,6 +33,9 @@
/* Check if EEH is supported */
#define VFIO_EEH 5
+/* Two-stage IOMMU */
+#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
+
/*
* The IOCTL interface is designed for extensibility by embedding the
* structure length (argsz) and flags into structures passed between
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 778a3298fb34..1c2f84fd4d99 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -79,6 +79,7 @@
/* Four-character-code (FOURCC) */
#define v4l2_fourcc(a, b, c, d)\
((__u32)(a) | ((__u32)(b) << 8) | ((__u32)(c) << 16) | ((__u32)(d) << 24))
+#define v4l2_fourcc_be(a, b, c, d) (v4l2_fourcc(a, b, c, d) | (1 << 31))
/*
* E N U M S
@@ -307,6 +308,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */
#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
+#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
+#define V4L2_PIX_FMT_XRGB555X v4l2_fourcc_be('X', 'R', '1', '5') /* 16 XRGB-5-5-5 BE */
#define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */
#define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */
#define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */
@@ -1285,11 +1288,11 @@ struct v4l2_ext_control {
union {
__s32 value;
__s64 value64;
- char *string;
- __u8 *p_u8;
- __u16 *p_u16;
- __u32 *p_u32;
- void *ptr;
+ char __user *string;
+ __u8 __user *p_u8;
+ __u16 __user *p_u16;
+ __u32 __user *p_u32;
+ void __user *ptr;
};
} __attribute__ ((packed));
diff --git a/include/uapi/misc/Kbuild b/include/uapi/misc/Kbuild
new file mode 100644
index 000000000000..e96cae7d58c9
--- /dev/null
+++ b/include/uapi/misc/Kbuild
@@ -0,0 +1,2 @@
+# misc Header export list
+header-y += cxl.h
diff --git a/include/uapi/misc/cxl.h b/include/uapi/misc/cxl.h
new file mode 100644
index 000000000000..cd6d789b73ec
--- /dev/null
+++ b/include/uapi/misc/cxl.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2014 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_MISC_CXL_H
+#define _UAPI_MISC_CXL_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+
+struct cxl_ioctl_start_work {
+ __u64 flags;
+ __u64 work_element_descriptor;
+ __u64 amr;
+ __s16 num_interrupts;
+ __s16 reserved1;
+ __s32 reserved2;
+ __u64 reserved3;
+ __u64 reserved4;
+ __u64 reserved5;
+ __u64 reserved6;
+};
+
+#define CXL_START_WORK_AMR 0x0000000000000001ULL
+#define CXL_START_WORK_NUM_IRQS 0x0000000000000002ULL
+#define CXL_START_WORK_ALL (CXL_START_WORK_AMR |\
+ CXL_START_WORK_NUM_IRQS)
+
+/* ioctl numbers */
+#define CXL_MAGIC 0xCA
+#define CXL_IOCTL_START_WORK _IOW(CXL_MAGIC, 0x00, struct cxl_ioctl_start_work)
+#define CXL_IOCTL_GET_PROCESS_ELEMENT _IOR(CXL_MAGIC, 0x01, __u32)
+
+#define CXL_READ_MIN_SIZE 0x1000 /* 4K */
+
+/* Events from read() */
+enum cxl_event_type {
+ CXL_EVENT_RESERVED = 0,
+ CXL_EVENT_AFU_INTERRUPT = 1,
+ CXL_EVENT_DATA_STORAGE = 2,
+ CXL_EVENT_AFU_ERROR = 3,
+};
+
+struct cxl_event_header {
+ __u16 type;
+ __u16 size;
+ __u16 process_element;
+ __u16 reserved1;
+};
+
+struct cxl_event_afu_interrupt {
+ __u16 flags;
+ __u16 irq; /* Raised AFU interrupt number */
+ __u32 reserved1;
+};
+
+struct cxl_event_data_storage {
+ __u16 flags;
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 addr;
+ __u64 dsisr;
+ __u64 reserved3;
+};
+
+struct cxl_event_afu_error {
+ __u16 flags;
+ __u16 reserved1;
+ __u32 reserved2;
+ __u64 error;
+};
+
+struct cxl_event {
+ struct cxl_event_header header;
+ union {
+ struct cxl_event_afu_interrupt irq;
+ struct cxl_event_data_storage fault;
+ struct cxl_event_afu_error afu_error;
+ };
+};
+
+#endif /* _UAPI_MISC_CXL_H */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 32168f7ffce3..6ee586728df9 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -219,7 +219,8 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_G723_40_1B ((__force snd_pcm_format_t) 47) /* 1 sample in 1 byte */
#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
-#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U16_LE
+#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
+#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_LE
#ifdef SNDRV_LITTLE_ENDIAN
#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
diff --git a/include/xen/events.h b/include/xen/events.h
index 8bee7a75e850..5321cd9636e6 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -28,6 +28,8 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
unsigned long irqflags,
const char *devname,
void *dev_id);
+int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
+ unsigned int remote_port);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
unsigned int remote_port,
irq_handler_t handler,
diff --git a/include/xen/interface/elfnote.h b/include/xen/interface/elfnote.h
index 6f4eae328ca7..f90b03454659 100644
--- a/include/xen/interface/elfnote.h
+++ b/include/xen/interface/elfnote.h
@@ -3,6 +3,24 @@
*
* Definitions used for the Xen ELF notes.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2006, Ian Campbell, XenSource Ltd.
*/
@@ -18,12 +36,13 @@
*
* LEGACY indicated the fields in the legacy __xen_guest string which
* this a note type replaces.
+ *
+ * String values (for non-legacy) are NULL terminated ASCII, also known
+ * as ASCIZ type.
*/
/*
* NAME=VALUE pair (string).
- *
- * LEGACY: FEATURES and PAE
*/
#define XEN_ELFNOTE_INFO 0
@@ -137,10 +156,30 @@
/*
* Whether or not the guest supports cooperative suspend cancellation.
+ * This is a numeric value.
+ *
+ * Default is 0
*/
#define XEN_ELFNOTE_SUSPEND_CANCEL 14
/*
+ * The (non-default) location the initial phys-to-machine map should be
+ * placed at by the hypervisor (Dom0) or the tools (DomU).
+ * The kernel must be prepared for this mapping to be established using
+ * large pages, despite such otherwise not being available to guests.
+ * The kernel must also be able to handle the page table pages used for
+ * this mapping not being accessible through the initial mapping.
+ * (Only x86-64 supports this at present.)
+ */
+#define XEN_ELFNOTE_INIT_P2M 15
+
+/*
+ * Whether or not the guest can deal with being passed an initrd not
+ * mapped through its initial page tables.
+ */
+#define XEN_ELFNOTE_MOD_START_PFN 16
+
+/*
* The features supported by this kernel (numeric).
*
* Other than XEN_ELFNOTE_FEATURES on pre-4.2 Xen, this note allows a
@@ -153,6 +192,11 @@
*/
#define XEN_ELFNOTE_SUPPORTED_FEATURES 17
+/*
+ * The number of the highest elfnote defined.
+ */
+#define XEN_ELFNOTE_MAX XEN_ELFNOTE_SUPPORTED_FEATURES
+
#endif /* __XEN_PUBLIC_ELFNOTE_H__ */
/*
diff --git a/include/xen/interface/io/vscsiif.h b/include/xen/interface/io/vscsiif.h
new file mode 100644
index 000000000000..d07d7aca8d1c
--- /dev/null
+++ b/include/xen/interface/io/vscsiif.h
@@ -0,0 +1,229 @@
+/******************************************************************************
+ * vscsiif.h
+ *
+ * Based on the blkif.h code.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright(c) FUJITSU Limited 2008.
+ */
+
+#ifndef __XEN__PUBLIC_IO_SCSI_H__
+#define __XEN__PUBLIC_IO_SCSI_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen pvSCSI driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * p-devname
+ * Values: string
+ *
+ * A free string used to identify the physical device (e.g. a disk name).
+ *
+ * p-dev
+ * Values: string
+ *
+ * A string specifying the backend device: either a 4-tuple "h:c:t:l"
+ * (host, controller, target, lun, all integers), or a WWN (e.g.
+ * "naa.60014054ac780582").
+ *
+ * v-dev
+ * Values: string
+ *
+ * A string specifying the frontend device in form of a 4-tuple "h:c:t:l"
+ * (host, controller, target, lun, all integers).
+ *
+ *--------------------------------- Features ---------------------------------
+ *
+ * feature-sg-grant
+ * Values: unsigned [VSCSIIF_SG_TABLESIZE...65535]
+ * Default Value: 0
+ *
+ * Specifies the maximum number of scatter/gather elements in grant pages
+ * supported. If not set, the backend supports up to VSCSIIF_SG_TABLESIZE
+ * SG elements specified directly in the request.
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: unsigned
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ */
+
+/* Requests from the frontend to the backend */
+
+/*
+ * Request a SCSI operation specified via a CDB in vscsiif_request.cmnd.
+ * The target is specified via channel, id and lun.
+ *
+ * The operation to be performed is specified via a CDB in cmnd[], the length
+ * of the CDB is in cmd_len. sc_data_direction specifies the direction of data
+ * (to the device, from the device, or none at all).
+ *
+ * If data is to be transferred to or from the device the buffer(s) in the
+ * guest memory is/are specified via one or multiple scsiif_request_segment
+ * descriptors each specifying a memory page via a grant_ref_t, a offset into
+ * the page and the length of the area in that page. All scsiif_request_segment
+ * areas concatenated form the resulting data buffer used by the operation.
+ * If the number of scsiif_request_segment areas is not too large (less than
+ * or equal VSCSIIF_SG_TABLESIZE) the areas can be specified directly in the
+ * seg[] array and the number of valid scsiif_request_segment elements is to be
+ * set in nr_segments.
+ *
+ * If "feature-sg-grant" in the Xenstore is set it is possible to specify more
+ * than VSCSIIF_SG_TABLESIZE scsiif_request_segment elements via indirection.
+ * The maximum number of allowed scsiif_request_segment elements is the value
+ * of the "feature-sg-grant" entry from Xenstore. When using indirection the
+ * seg[] array doesn't contain specifications of the data buffers, but
+ * references to scsiif_request_segment arrays, which in turn reference the
+ * data buffers. While nr_segments holds the number of populated seg[] entries
+ * (plus the set VSCSIIF_SG_GRANT bit), the number of scsiif_request_segment
+ * elements referencing the target data buffers is calculated from the lengths
+ * of the seg[] elements (the sum of all valid seg[].length divided by the
+ * size of one scsiif_request_segment structure).
+ */
+#define VSCSIIF_ACT_SCSI_CDB 1
+
+/*
+ * Request abort of a running operation for the specified target given by
+ * channel, id, lun and the operation's rqid in ref_rqid.
+ */
+#define VSCSIIF_ACT_SCSI_ABORT 2
+
+/*
+ * Request a device reset of the specified target (channel and id).
+ */
+#define VSCSIIF_ACT_SCSI_RESET 3
+
+/*
+ * Preset scatter/gather elements for a following request. Deprecated.
+ * Keeping the define only to avoid usage of the value "4" for other actions.
+ */
+#define VSCSIIF_ACT_SCSI_SG_PRESET 4
+
+/*
+ * Maximum scatter/gather segments per request.
+ *
+ * Considering balance between allocating at least 16 "vscsiif_request"
+ * structures on one page (4096 bytes) and the number of scatter/gather
+ * elements needed, we decided to use 26 as a magic number.
+ *
+ * If "feature-sg-grant" is set, more scatter/gather elements can be specified
+ * by placing them in one or more (up to VSCSIIF_SG_TABLESIZE) granted pages.
+ * In this case the vscsiif_request seg elements don't contain references to
+ * the user data, but to the SG elements referencing the user data.
+ */
+#define VSCSIIF_SG_TABLESIZE 26
+
+/*
+ * based on Linux kernel 2.6.18, still valid
+ * Changing these values requires support of multiple protocols via the rings
+ * as "old clients" will blindly use these values and the resulting structure
+ * sizes.
+ */
+#define VSCSIIF_MAX_COMMAND_SIZE 16
+#define VSCSIIF_SENSE_BUFFERSIZE 96
+
+struct scsiif_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+
+#define VSCSIIF_SG_PER_PAGE (PAGE_SIZE / sizeof(struct scsiif_request_segment))
+
+/* Size of one request is 252 bytes */
+struct vscsiif_request {
+ uint16_t rqid; /* private guest value, echoed in resp */
+ uint8_t act; /* command between backend and frontend */
+ uint8_t cmd_len; /* valid CDB bytes */
+
+ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE]; /* the CDB */
+ uint16_t timeout_per_command; /* deprecated */
+ uint16_t channel, id, lun; /* (virtual) device specification */
+ uint16_t ref_rqid; /* command abort reference */
+ uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
+ DMA_FROM_DEVICE(2)
+ DMA_NONE(3) requests */
+ uint8_t nr_segments; /* Number of pieces of scatter-gather */
+/*
+ * flag in nr_segments: SG elements via grant page
+ *
+ * If VSCSIIF_SG_GRANT is set, the low 7 bits of nr_segments specify the number
+ * of grant pages containing SG elements. Usable if "feature-sg-grant" set.
+ */
+#define VSCSIIF_SG_GRANT 0x80
+
+ struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
+ uint32_t reserved[3];
+};
+
+/* Size of one response is 252 bytes */
+struct vscsiif_response {
+ uint16_t rqid; /* identifies request */
+ uint8_t padding;
+ uint8_t sense_len;
+ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
+ int32_t rslt;
+ uint32_t residual_len; /* request bufflen -
+ return the value from physical device */
+ uint32_t reserved[36];
+};
+
+DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
+
+#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index de082130ba4b..f68719f405af 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -3,6 +3,24 @@
*
* Guest OS interface to Xen.
*
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
* Copyright (c) 2004, K A Fraser
*/
@@ -73,13 +91,23 @@
* VIRTUAL INTERRUPTS
*
* Virtual interrupts that a guest OS may receive from Xen.
+ * In the side comments, 'V.' denotes a per-VCPU VIRQ while 'G.' denotes a
+ * global VIRQ. The former can be bound once per VCPU and cannot be re-bound.
+ * The latter can be allocated only once per guest: they must initially be
+ * allocated to VCPU0 but can subsequently be re-bound.
*/
-#define VIRQ_TIMER 0 /* Timebase update, and/or requested timeout. */
-#define VIRQ_DEBUG 1 /* Request guest to dump debug info. */
-#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
-#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
-#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
-#define VIRQ_PCPU_STATE 9 /* (DOM0) PCPU state changed */
+#define VIRQ_TIMER 0 /* V. Timebase update, and/or requested timeout. */
+#define VIRQ_DEBUG 1 /* V. Request guest to dump debug info. */
+#define VIRQ_CONSOLE 2 /* G. (DOM0) Bytes received on emergency console. */
+#define VIRQ_DOM_EXC 3 /* G. (DOM0) Exceptional event for some domain. */
+#define VIRQ_TBUF 4 /* G. (DOM0) Trace buffer has records available. */
+#define VIRQ_DEBUGGER 6 /* G. (DOM0) A domain has paused for debugging. */
+#define VIRQ_XENOPROF 7 /* V. XenOprofile interrupt: new sample available */
+#define VIRQ_CON_RING 8 /* G. (DOM0) Bytes received on console */
+#define VIRQ_PCPU_STATE 9 /* G. (DOM0) PCPU state changed */
+#define VIRQ_MEM_EVENT 10 /* G. (DOM0) A memory event has occured */
+#define VIRQ_XC_RESERVED 11 /* G. Reserved for XenClient */
+#define VIRQ_ENOMEM 12 /* G. (DOM0) Low on heap memory */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
@@ -92,24 +120,68 @@
#define VIRQ_ARCH_7 23
#define NR_VIRQS 24
+
/*
- * MMU-UPDATE REQUESTS
- *
- * HYPERVISOR_mmu_update() accepts a list of (ptr, val) pairs.
- * A foreigndom (FD) can be specified (or DOMID_SELF for none).
- * Where the FD has some effect, it is described below.
- * ptr[1:0] specifies the appropriate MMU_* command.
+ * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[],
+ * unsigned count, unsigned *done_out,
+ * unsigned foreigndom)
+ * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
+ * @count is the length of the above array.
+ * @pdone is an output parameter indicating number of completed operations
+ * @foreigndom[15:0]: FD, the expected owner of data pages referenced in this
+ * hypercall invocation. Can be DOMID_SELF.
+ * @foreigndom[31:16]: PFD, the expected owner of pagetable pages referenced
+ * in this hypercall invocation. The value of this field
+ * (x) encodes the PFD as follows:
+ * x == 0 => PFD == DOMID_SELF
+ * x != 0 => PFD == x - 1
*
+ * Sub-commands: ptr[1:0] specifies the appropriate MMU_* command.
+ * -------------
* ptr[1:0] == MMU_NORMAL_PT_UPDATE:
- * Updates an entry in a page table. If updating an L1 table, and the new
- * table entry is valid/present, the mapped frame must belong to the FD, if
- * an FD has been specified. If attempting to map an I/O page then the
- * caller assumes the privilege of the FD.
+ * Updates an entry in a page table belonging to PFD. If updating an L1 table,
+ * and the new table entry is valid/present, the mapped frame must belong to
+ * FD. If attempting to map an I/O page then the caller assumes the privilege
+ * of the FD.
* FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
* FD == DOMID_XEN: Map restricted areas of Xen's heap space.
* ptr[:2] -- Machine address of the page-table entry to modify.
* val -- Value to write.
*
+ * There also certain implicit requirements when using this hypercall. The
+ * pages that make up a pagetable must be mapped read-only in the guest.
+ * This prevents uncontrolled guest updates to the pagetable. Xen strictly
+ * enforces this, and will disallow any pagetable update which will end up
+ * mapping pagetable page RW, and will disallow using any writable page as a
+ * pagetable. In practice it means that when constructing a page table for a
+ * process, thread, etc, we MUST be very dilligient in following these rules:
+ * 1). Start with top-level page (PGD or in Xen language: L4). Fill out
+ * the entries.
+ * 2). Keep on going, filling out the upper (PUD or L3), and middle (PMD
+ * or L2).
+ * 3). Start filling out the PTE table (L1) with the PTE entries. Once
+ * done, make sure to set each of those entries to RO (so writeable bit
+ * is unset). Once that has been completed, set the PMD (L2) for this
+ * PTE table as RO.
+ * 4). When completed with all of the PMD (L2) entries, and all of them have
+ * been set to RO, make sure to set RO the PUD (L3). Do the same
+ * operation on PGD (L4) pagetable entries that have a PUD (L3) entry.
+ * 5). Now before you can use those pages (so setting the cr3), you MUST also
+ * pin them so that the hypervisor can verify the entries. This is done
+ * via the HYPERVISOR_mmuext_op(MMUEXT_PIN_L4_TABLE, guest physical frame
+ * number of the PGD (L4)). And this point the HYPERVISOR_mmuext_op(
+ * MMUEXT_NEW_BASEPTR, guest physical frame number of the PGD (L4)) can be
+ * issued.
+ * For 32-bit guests, the L4 is not used (as there is less pagetables), so
+ * instead use L3.
+ * At this point the pagetables can be modified using the MMU_NORMAL_PT_UPDATE
+ * hypercall. Also if so desired the OS can also try to write to the PTE
+ * and be trapped by the hypervisor (as the PTE entry is RO).
+ *
+ * To deallocate the pages, the operations are the reverse of the steps
+ * mentioned above. The argument is MMUEXT_UNPIN_TABLE for all levels and the
+ * pagetable MUST not be in use (meaning that the cr3 is not set to it).
+ *
* ptr[1:0] == MMU_MACHPHYS_UPDATE:
* Updates an entry in the machine->pseudo-physical mapping table.
* ptr[:2] -- Machine address within the frame whose mapping to modify.
@@ -119,6 +191,72 @@
* ptr[1:0] == MMU_PT_UPDATE_PRESERVE_AD:
* As MMU_NORMAL_PT_UPDATE above, but A/D bits currently in the PTE are ORed
* with those in @val.
+ *
+ * @val is usually the machine frame number along with some attributes.
+ * The attributes by default follow the architecture defined bits. Meaning that
+ * if this is a X86_64 machine and four page table layout is used, the layout
+ * of val is:
+ * - 63 if set means No execute (NX)
+ * - 46-13 the machine frame number
+ * - 12 available for guest
+ * - 11 available for guest
+ * - 10 available for guest
+ * - 9 available for guest
+ * - 8 global
+ * - 7 PAT (PSE is disabled, must use hypercall to make 4MB or 2MB pages)
+ * - 6 dirty
+ * - 5 accessed
+ * - 4 page cached disabled
+ * - 3 page write through
+ * - 2 userspace accessible
+ * - 1 writeable
+ * - 0 present
+ *
+ * The one bits that does not fit with the default layout is the PAGE_PSE
+ * also called PAGE_PAT). The MMUEXT_[UN]MARK_SUPER arguments to the
+ * HYPERVISOR_mmuext_op serve as mechanism to set a pagetable to be 4MB
+ * (or 2MB) instead of using the PAGE_PSE bit.
+ *
+ * The reason that the PAGE_PSE (bit 7) is not being utilized is due to Xen
+ * using it as the Page Attribute Table (PAT) bit - for details on it please
+ * refer to Intel SDM 10.12. The PAT allows to set the caching attributes of
+ * pages instead of using MTRRs.
+ *
+ * The PAT MSR is as follows (it is a 64-bit value, each entry is 8 bits):
+ * PAT4 PAT0
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WC | WB | UC | UC- | WC | WB | <= Linux
+ * +-----+-----+----+----+----+-----+----+----+
+ * | UC | UC- | WT | WB | UC | UC- | WT | WB | <= BIOS (default when machine boots)
+ * +-----+-----+----+----+----+-----+----+----+
+ * | rsv | rsv | WP | WC | UC | UC- | WT | WB | <= Xen
+ * +-----+-----+----+----+----+-----+----+----+
+ *
+ * The lookup of this index table translates to looking up
+ * Bit 7, Bit 4, and Bit 3 of val entry:
+ *
+ * PAT/PSE (bit 7) ... PCD (bit 4) .. PWT (bit 3).
+ *
+ * If all bits are off, then we are using PAT0. If bit 3 turned on,
+ * then we are using PAT1, if bit 3 and bit 4, then PAT2..
+ *
+ * As you can see, the Linux PAT1 translates to PAT4 under Xen. Which means
+ * that if a guest that follows Linux's PAT setup and would like to set Write
+ * Combined on pages it MUST use PAT4 entry. Meaning that Bit 7 (PAGE_PAT) is
+ * set. For example, under Linux it only uses PAT0, PAT1, and PAT2 for the
+ * caching as:
+ *
+ * WB = none (so PAT0)
+ * WC = PWT (bit 3 on)
+ * UC = PWT | PCD (bit 3 and 4 are on).
+ *
+ * To make it work with Xen, it needs to translate the WC bit as so:
+ *
+ * PWT (so bit 3 on) --> PAT (so bit 7 is on) and clear bit 3
+ *
+ * And to translate back it would:
+ *
+ * PAT (bit 7 on) --> PWT (bit 3 on) and clear bit 7.
*/
#define MMU_NORMAL_PT_UPDATE 0 /* checked '*ptr = val'. ptr is MA. */
#define MMU_MACHPHYS_UPDATE 1 /* ptr = MA of frame to modify entry for */
@@ -127,7 +265,12 @@
/*
* MMU EXTENDED OPERATIONS
*
- * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ * enum neg_errnoval HYPERVISOR_mmuext_op(mmuext_op_t uops[],
+ * unsigned int count,
+ * unsigned int *pdone,
+ * unsigned int foreigndom)
+ */
+/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
* A foreigndom (FD) can be specified (or DOMID_SELF for none).
* Where the FD has some effect, it is described below.
*
@@ -164,9 +307,23 @@
* cmd: MMUEXT_FLUSH_CACHE
* No additional arguments. Writes back and flushes cache contents.
*
+ * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
+ * No additional arguments. Writes back and flushes cache contents
+ * on all CPUs in the system.
+ *
* cmd: MMUEXT_SET_LDT
* linear_addr: Linear address of LDT base (NB. must be page-aligned).
* nr_ents: Number of entries in LDT.
+ *
+ * cmd: MMUEXT_CLEAR_PAGE
+ * mfn: Machine frame number to be cleared.
+ *
+ * cmd: MMUEXT_COPY_PAGE
+ * mfn: Machine frame number of the destination page.
+ * src_mfn: Machine frame number of the source page.
+ *
+ * cmd: MMUEXT_[UN]MARK_SUPER
+ * mfn: Machine frame number of head of superpage to be [un]marked.
*/
#define MMUEXT_PIN_L1_TABLE 0
#define MMUEXT_PIN_L2_TABLE 1
@@ -183,12 +340,18 @@
#define MMUEXT_FLUSH_CACHE 12
#define MMUEXT_SET_LDT 13
#define MMUEXT_NEW_USER_BASEPTR 15
+#define MMUEXT_CLEAR_PAGE 16
+#define MMUEXT_COPY_PAGE 17
+#define MMUEXT_FLUSH_CACHE_GLOBAL 18
+#define MMUEXT_MARK_SUPER 19
+#define MMUEXT_UNMARK_SUPER 20
#ifndef __ASSEMBLY__
struct mmuext_op {
unsigned int cmd;
union {
- /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR */
+ /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
+ * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
xen_pfn_t mfn;
/* INVLPG_LOCAL, INVLPG_ALL, SET_LDT */
unsigned long linear_addr;
@@ -198,6 +361,8 @@ struct mmuext_op {
unsigned int nr_ents;
/* TLB_FLUSH_MULTI, INVLPG_MULTI */
void *vcpumask;
+ /* COPY_PAGE */
+ xen_pfn_t src_mfn;
} arg2;
};
DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
@@ -225,10 +390,23 @@ DEFINE_GUEST_HANDLE_STRUCT(mmuext_op);
*/
#define VMASST_CMD_enable 0
#define VMASST_CMD_disable 1
+
+/* x86/32 guests: simulate full 4GB segment limits. */
#define VMASST_TYPE_4gb_segments 0
+
+/* x86/32 guests: trap (vector 15) whenever above vmassist is used. */
#define VMASST_TYPE_4gb_segments_notify 1
+
+/*
+ * x86 guests: support writes to bottom-level PTEs.
+ * NB1. Page-directory entries cannot be written.
+ * NB2. Guest must continue to remove all writable mappings of PTEs.
+ */
#define VMASST_TYPE_writable_pagetables 2
+
+/* x86/PAE guests: support PDPTs above 4GB. */
#define VMASST_TYPE_pae_extended_cr3 3
+
#define MAX_VMASST_TYPE 3
#ifndef __ASSEMBLY__
@@ -260,6 +438,15 @@ typedef uint16_t domid_t;
*/
#define DOMID_XEN (0x7FF2U)
+/* DOMID_COW is used as the owner of sharable pages */
+#define DOMID_COW (0x7FF3U)
+
+/* DOMID_INVALID is used to identify pages with unknown owner. */
+#define DOMID_INVALID (0x7FF4U)
+
+/* Idle domain. */
+#define DOMID_IDLE (0x7FFFU)
+
/*
* Send an array of these to HYPERVISOR_mmu_update().
* NB. The fields are natural pointer/address size for this architecture.
@@ -272,7 +459,9 @@ DEFINE_GUEST_HANDLE_STRUCT(mmu_update);
/*
* Send an array of these to HYPERVISOR_multicall().
- * NB. The fields are natural register size for this architecture.
+ * NB. The fields are logically the natural register size for this
+ * architecture. In cases where xen_ulong_t is larger than this then
+ * any unused bits in the upper portion must be zero.
*/
struct multicall_entry {
xen_ulong_t op;
@@ -442,8 +631,48 @@ struct start_info {
unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
int8_t cmd_line[MAX_GUEST_CMDLINE];
+ /* The pfn range here covers both page table and p->m table frames. */
+ unsigned long first_p2m_pfn;/* 1st pfn forming initial P->M table. */
+ unsigned long nr_p2m_frames;/* # of pfns forming initial P->M table. */
};
+/* These flags are passed in the 'flags' field of start_info_t. */
+#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
+#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
+#define SIF_MULTIBOOT_MOD (1<<2) /* Is mod_start a multiboot module? */
+#define SIF_MOD_START_PFN (1<<3) /* Is mod_start a PFN? */
+#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
+
+/*
+ * A multiboot module is a package containing modules very similar to a
+ * multiboot module array. The only differences are:
+ * - the array of module descriptors is by convention simply at the beginning
+ * of the multiboot module,
+ * - addresses in the module descriptors are based on the beginning of the
+ * multiboot module,
+ * - the number of modules is determined by a termination descriptor that has
+ * mod_start == 0.
+ *
+ * This permits to both build it statically and reference it in a configuration
+ * file, and let the PV guest easily rebase the addresses to virtual addresses
+ * and at the same time count the number of modules.
+ */
+struct xen_multiboot_mod_list {
+ /* Address of first byte of the module */
+ uint32_t mod_start;
+ /* Address of last byte of the module (inclusive) */
+ uint32_t mod_end;
+ /* Address of zero-terminated command line */
+ uint32_t cmdline;
+ /* Unused, must be zero */
+ uint32_t pad;
+};
+/*
+ * The console structure in start_info.console.dom0
+ *
+ * This structure includes a variety of information required to
+ * have a working VGA/VESA console.
+ */
struct dom0_vga_console_info {
uint8_t video_type;
#define XEN_VGATYPE_TEXT_MODE_3 0x03
@@ -484,11 +713,6 @@ struct dom0_vga_console_info {
} u;
};
-/* These flags are passed in the 'flags' field of start_info_t. */
-#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
-#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
-#define SIF_PM_MASK (0xFF<<8) /* reserve 1 byte for xen-pm options */
-
typedef uint64_t cpumap_t;
typedef uint8_t xen_domain_handle_t[16];
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 0324c6d340c1..b78f21caf55a 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -86,6 +86,7 @@ struct xenbus_device_id
/* A xenbus driver. */
struct xenbus_driver {
+ const char *name; /* defaults to ids[0].devicetype */
const struct xenbus_device_id *ids;
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
@@ -100,20 +101,22 @@ struct xenbus_driver {
int (*is_ready)(struct xenbus_device *dev);
};
-#define DEFINE_XENBUS_DRIVER(var, drvname, methods...) \
-struct xenbus_driver var ## _driver = { \
- .driver.name = drvname + 0 ?: var ## _ids->devicetype, \
- .driver.owner = THIS_MODULE, \
- .ids = var ## _ids, ## methods \
-}
-
static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)
{
return container_of(drv, struct xenbus_driver, driver);
}
-int __must_check xenbus_register_frontend(struct xenbus_driver *);
-int __must_check xenbus_register_backend(struct xenbus_driver *);
+int __must_check __xenbus_register_frontend(struct xenbus_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+int __must_check __xenbus_register_backend(struct xenbus_driver *drv,
+ struct module *owner,
+ const char *mod_name);
+
+#define xenbus_register_frontend(drv) \
+ __xenbus_register_frontend(drv, THIS_MODULE, KBUILD_MODNAME);
+#define xenbus_register_backend(drv) \
+ __xenbus_register_backend(drv, THIS_MODULE, KBUILD_MODNAME);
void xenbus_unregister_driver(struct xenbus_driver *drv);