Merge branches 'x86/cleanups', 'x86/kexec', 'x86/mce2' and 'linus' into x86/core
This commit is contained in:
@@ -89,6 +89,8 @@ enum {
|
||||
ATA_ID_DLF = 128,
|
||||
ATA_ID_CSFO = 129,
|
||||
ATA_ID_CFA_POWER = 160,
|
||||
ATA_ID_CFA_KEY_MGMT = 162,
|
||||
ATA_ID_CFA_MODES = 163,
|
||||
ATA_ID_ROT_SPEED = 217,
|
||||
ATA_ID_PIO4 = (1 << 1),
|
||||
|
||||
|
||||
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
|
||||
#define BOOTMEM_DEFAULT 0
|
||||
#define BOOTMEM_EXCLUSIVE (1<<0)
|
||||
|
||||
extern int reserve_bootmem(unsigned long addr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
extern int reserve_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long physaddr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
|
||||
#endif
|
||||
unsigned long physaddr,
|
||||
unsigned long size,
|
||||
int flags);
|
||||
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
extern void *__alloc_bootmem(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem(unsigned long size,
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
|
||||
|
||||
#define alloc_bootmem(x) \
|
||||
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_nopanic(x) \
|
||||
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low(x) \
|
||||
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
|
||||
#define alloc_bootmem_pages(x) \
|
||||
__alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_nopanic(x) \
|
||||
__alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_low_pages(x) \
|
||||
__alloc_bootmem_low(x, PAGE_SIZE, 0)
|
||||
#define alloc_bootmem_node(pgdat, x) \
|
||||
__alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_node(pgdat, x) \
|
||||
__alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
|
||||
__alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
|
||||
|
||||
#define alloc_bootmem_low(x) \
|
||||
__alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
|
||||
#define alloc_bootmem_low_pages(x) \
|
||||
__alloc_bootmem_low(x, PAGE_SIZE, 0)
|
||||
#define alloc_bootmem_low_pages_node(pgdat, x) \
|
||||
__alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
|
||||
#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
|
||||
|
||||
extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
|
||||
int flags);
|
||||
|
||||
@@ -234,7 +234,6 @@ struct cpufreq_driver {
|
||||
int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg);
|
||||
int (*resume) (struct cpufreq_policy *policy);
|
||||
struct freq_attr **attr;
|
||||
bool hide_interface;
|
||||
};
|
||||
|
||||
/* flags */
|
||||
|
||||
@@ -97,7 +97,6 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
|
||||
|
||||
/**
|
||||
* struct dma_chan_percpu - the per-CPU part of struct dma_chan
|
||||
* @refcount: local_t used for open-coded "bigref" counting
|
||||
* @memcpy_count: transaction counter
|
||||
* @bytes_transferred: byte counter
|
||||
*/
|
||||
@@ -114,9 +113,6 @@ struct dma_chan_percpu {
|
||||
* @cookie: last cookie value returned to client
|
||||
* @chan_id: channel ID for sysfs
|
||||
* @dev: class device for sysfs
|
||||
* @refcount: kref, used in "bigref" slow-mode
|
||||
* @slow_ref: indicates that the DMA channel is free
|
||||
* @rcu: the DMA channel's RCU head
|
||||
* @device_node: used to add this to the device chan list
|
||||
* @local: per-cpu pointer to a struct dma_chan_percpu
|
||||
* @client-count: how many clients are using this channel
|
||||
@@ -213,8 +209,6 @@ struct dma_async_tx_descriptor {
|
||||
* @global_node: list_head for global dma_device_list
|
||||
* @cap_mask: one or more dma_capability flags
|
||||
* @max_xor: maximum number of xor sources, 0 if no capability
|
||||
* @refcount: reference count
|
||||
* @done: IO completion struct
|
||||
* @dev_id: unique device ID
|
||||
* @dev: struct device reference for dma mapping api
|
||||
* @device_alloc_chan_resources: allocate resources and return the
|
||||
@@ -227,6 +221,7 @@ struct dma_async_tx_descriptor {
|
||||
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation
|
||||
* @device_prep_slave_sg: prepares a slave dma operation
|
||||
* @device_terminate_all: terminate all pending operations
|
||||
* @device_is_tx_complete: poll for transaction completion
|
||||
* @device_issue_pending: push pending transactions to hardware
|
||||
*/
|
||||
struct dma_device {
|
||||
|
||||
@@ -511,7 +511,6 @@ struct hd_driveid {
|
||||
unsigned short words69_70[2]; /* reserved words 69-70
|
||||
* future command overlap and queuing
|
||||
*/
|
||||
/* HDIO_GET_IDENTITY currently returns only words 0 through 70 */
|
||||
unsigned short words71_74[4]; /* reserved words 71-74
|
||||
* for IDENTIFY PACKET DEVICE command
|
||||
*/
|
||||
|
||||
@@ -866,6 +866,7 @@ struct ide_host {
|
||||
unsigned int n_ports;
|
||||
struct device *dev[2];
|
||||
unsigned int (*init_chipset)(struct pci_dev *);
|
||||
irq_handler_t irq_handler;
|
||||
unsigned long host_flags;
|
||||
void *host_priv;
|
||||
ide_hwif_t *cur_port; /* for hosts requiring serialization */
|
||||
|
||||
@@ -275,7 +275,7 @@ enum {
|
||||
* advised to wait only for the following duration before
|
||||
* doing SRST.
|
||||
*/
|
||||
ATA_TMOUT_PMP_SRST_WAIT = 1000,
|
||||
ATA_TMOUT_PMP_SRST_WAIT = 5000,
|
||||
|
||||
/* ATA bus states */
|
||||
BUS_UNKNOWN = 0,
|
||||
@@ -530,6 +530,7 @@ struct ata_queued_cmd {
|
||||
unsigned long flags; /* ATA_QCFLAG_xxx */
|
||||
unsigned int tag;
|
||||
unsigned int n_elem;
|
||||
unsigned int orig_n_elem;
|
||||
|
||||
int dma_dir;
|
||||
|
||||
@@ -750,7 +751,8 @@ struct ata_port {
|
||||
acpi_handle acpi_handle;
|
||||
struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */
|
||||
#endif
|
||||
u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
|
||||
/* owned by EH */
|
||||
u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
/* The following initializer overrides a method to NULL whether one of
|
||||
|
||||
@@ -1079,6 +1079,7 @@ extern void synchronize_net(void);
|
||||
extern int register_netdevice_notifier(struct notifier_block *nb);
|
||||
extern int unregister_netdevice_notifier(struct notifier_block *nb);
|
||||
extern int init_dummy_netdev(struct net_device *dev);
|
||||
extern void netdev_resync_ops(struct net_device *dev);
|
||||
|
||||
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
|
||||
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
#include <linux/slab.h> /* For kmalloc() */
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/pfn.h>
|
||||
|
||||
#include <asm/percpu.h>
|
||||
|
||||
@@ -52,17 +53,18 @@
|
||||
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
|
||||
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
|
||||
|
||||
/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
|
||||
#ifndef PERCPU_ENOUGH_ROOM
|
||||
/* enough to cover all DEFINE_PER_CPUs in modules */
|
||||
#ifdef CONFIG_MODULES
|
||||
#define PERCPU_MODULE_RESERVE 8192
|
||||
#define PERCPU_MODULE_RESERVE (8 << 10)
|
||||
#else
|
||||
#define PERCPU_MODULE_RESERVE 0
|
||||
#define PERCPU_MODULE_RESERVE 0
|
||||
#endif
|
||||
|
||||
#ifndef PERCPU_ENOUGH_ROOM
|
||||
#define PERCPU_ENOUGH_ROOM \
|
||||
(__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
|
||||
#endif /* PERCPU_ENOUGH_ROOM */
|
||||
(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
|
||||
PERCPU_MODULE_RESERVE)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Must be an lvalue. Since @var must be a simple identifier,
|
||||
@@ -76,52 +78,90 @@
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
|
||||
|
||||
/* minimum unit size, also is the maximum supported allocation size */
|
||||
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
|
||||
|
||||
/*
|
||||
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
|
||||
* back on the first chunk for dynamic percpu allocation if arch is
|
||||
* manually allocating and mapping it for faster access (as a part of
|
||||
* large page mapping for example).
|
||||
*
|
||||
* The following values give between one and two pages of free space
|
||||
* after typical minimal boot (2-way SMP, single disk and NIC) with
|
||||
* both defconfig and a distro config on x86_64 and 32. More
|
||||
* intelligent way to determine this would be nice.
|
||||
*/
|
||||
#if BITS_PER_LONG > 32
|
||||
#define PERCPU_DYNAMIC_RESERVE (20 << 10)
|
||||
#else
|
||||
#define PERCPU_DYNAMIC_RESERVE (12 << 10)
|
||||
#endif
|
||||
|
||||
extern void *pcpu_base_addr;
|
||||
|
||||
typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
|
||||
typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
|
||||
|
||||
extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
|
||||
size_t static_size, size_t reserved_size,
|
||||
ssize_t unit_size, ssize_t dyn_size,
|
||||
void *base_addr,
|
||||
pcpu_populate_pte_fn_t populate_pte_fn);
|
||||
|
||||
/*
|
||||
* Use this to get to a cpu's version of the per-cpu object
|
||||
* dynamically allocated. Non-atomic access to the current CPU's
|
||||
* version should probably be combined with get_cpu()/put_cpu().
|
||||
*/
|
||||
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
|
||||
|
||||
extern void *__alloc_reserved_percpu(size_t size, size_t align);
|
||||
|
||||
#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
struct percpu_data {
|
||||
void *ptrs[1];
|
||||
};
|
||||
|
||||
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
|
||||
/*
|
||||
* Use this to get to a cpu's version of the per-cpu object dynamically
|
||||
* allocated. Non-atomic access to the current CPU's version should
|
||||
* probably be combined with get_cpu()/put_cpu().
|
||||
*/
|
||||
#define percpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
struct percpu_data *__p = __percpu_disguise(ptr); \
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
|
||||
#define per_cpu_ptr(ptr, cpu) \
|
||||
({ \
|
||||
struct percpu_data *__p = __percpu_disguise(ptr); \
|
||||
(__typeof__(ptr))__p->ptrs[(cpu)]; \
|
||||
})
|
||||
|
||||
extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
|
||||
extern void percpu_free(void *__pdata);
|
||||
#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
|
||||
|
||||
extern void *__alloc_percpu(size_t size, size_t align);
|
||||
extern void free_percpu(void *__pdata);
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
|
||||
|
||||
static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
|
||||
static inline void *__alloc_percpu(size_t size, size_t align)
|
||||
{
|
||||
return kzalloc(size, gfp);
|
||||
/*
|
||||
* Can't easily make larger alignment work with kmalloc. WARN
|
||||
* on it. Larger alignment should only be used for module
|
||||
* percpu sections on SMP for which this path isn't used.
|
||||
*/
|
||||
WARN_ON_ONCE(align > SMP_CACHE_BYTES);
|
||||
return kzalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void percpu_free(void *__pdata)
|
||||
static inline void free_percpu(void *p)
|
||||
{
|
||||
kfree(__pdata);
|
||||
kfree(p);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#define percpu_alloc_mask(size, gfp, mask) \
|
||||
__percpu_alloc_mask((size), (gfp), &(mask))
|
||||
|
||||
#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
|
||||
|
||||
/* (legacy) interface for use without CPU hotplug handling */
|
||||
|
||||
#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
|
||||
cpu_possible_map)
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
|
||||
#define free_percpu(ptr) percpu_free((ptr))
|
||||
#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
|
||||
#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
|
||||
__alignof__(type))
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
||||
@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
|
||||
#define rcu_enter_nohz() do { } while (0)
|
||||
#define rcu_exit_nohz() do { } while (0)
|
||||
|
||||
/* A context switch is a grace period for rcuclassic. */
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUCLASSIC_H */
|
||||
|
||||
@@ -52,6 +52,9 @@ struct rcu_head {
|
||||
void (*func)(struct rcu_head *head);
|
||||
};
|
||||
|
||||
/* Internal to kernel, but needed by rcupreempt.h. */
|
||||
extern int rcu_scheduler_active;
|
||||
|
||||
#if defined(CONFIG_CLASSIC_RCU)
|
||||
#include <linux/rcuclassic.h>
|
||||
#elif defined(CONFIG_TREE_RCU)
|
||||
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
|
||||
|
||||
/* Internal to kernel */
|
||||
extern void rcu_init(void);
|
||||
extern void rcu_scheduler_starting(void);
|
||||
extern int rcu_needs_cpu(int cpu);
|
||||
|
||||
#endif /* __LINUX_RCUPDATE_H */
|
||||
|
||||
@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
|
||||
#define rcu_exit_nohz() do { } while (0)
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/*
|
||||
* A context switch is a grace period for rcupreempt synchronize_rcu()
|
||||
* only during early boot, before the scheduler has been initialized.
|
||||
* So, how the heck do we get a context switch? Well, if the caller
|
||||
* invokes synchronize_rcu(), they are willing to accept a context
|
||||
* switch, so we simply pretend that one happened.
|
||||
*
|
||||
* After boot, there might be a blocked or preempted task in an RCU
|
||||
* read-side critical section, so we cannot then take the fastpath.
|
||||
*/
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1 && !rcu_scheduler_active;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUPREEMPT_H */
|
||||
|
||||
@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
|
||||
}
|
||||
#endif /* CONFIG_NO_HZ */
|
||||
|
||||
/* A context switch is a grace period for rcutree. */
|
||||
static inline int rcu_blocking_is_gp(void)
|
||||
{
|
||||
return num_online_cpus() == 1;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_RCUTREE_H */
|
||||
|
||||
@@ -2303,9 +2303,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
|
||||
extern int sched_group_set_rt_period(struct task_group *tg,
|
||||
long rt_period_us);
|
||||
extern long sched_group_rt_period(struct task_group *tg);
|
||||
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
extern int task_can_switch_user(struct user_struct *up,
|
||||
struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_TASK_XACCT
|
||||
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
|
||||
{
|
||||
|
||||
@@ -212,7 +212,7 @@ static inline void serio_unpin_driver(struct serio *serio)
|
||||
#define SERIO_FUJITSU 0x35
|
||||
#define SERIO_ZHENHUA 0x36
|
||||
#define SERIO_INEXIO 0x37
|
||||
#define SERIO_TOUCHIT213 0x37
|
||||
#define SERIO_TOUCHIT213 0x38
|
||||
#define SERIO_W8001 0x39
|
||||
|
||||
#endif
|
||||
|
||||
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
|
||||
|
||||
extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
|
||||
struct page ***pages);
|
||||
extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
|
||||
pgprot_t prot, struct page **pages);
|
||||
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
|
||||
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
|
||||
|
||||
/* Allocate/destroy a 'vmalloc' VM area. */
|
||||
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
|
||||
*/
|
||||
extern rwlock_t vmlist_lock;
|
||||
extern struct vm_struct *vmlist;
|
||||
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
|
||||
|
||||
#endif /* _LINUX_VMALLOC_H */
|
||||
|
||||
Reference in New Issue
Block a user