当前位置: 技术问答>linux和unix
想知道这样的补丁怎么生成
来源: 互联网 发布时间:2016-07-20
本文导语: 想知道这样的补丁怎么生成,注意看,不是diff文件或diff目录,我看下面是三个文件的不同。 如果比较目录的话,必须文件名相同,如果比较文件的话,如何同时比较三组文件? 高手指点。。。。 --- linux/kernel/sch...
想知道这样的补丁怎么生成,注意看,不是diff文件或diff目录,我看下面是三个文件的不同。
如果比较目录的话,必须文件名相同,如果比较文件的话,如何同时比较三组文件?
高手指点。。。。
--- linux/kernel/sched.c.fair 2002-09-20 10:58:49.000000000 -0300
+++ linux/kernel/sched.c 2002-09-24 14:46:31.000000000 -0300
@@ -45,31 +45,33 @@
extern void mem_use(void);
+#ifdef CONFIG_FAIRSCHED
+/* Toggle the per-user fair scheduler on/off */
+int fairsched = 1;
+
+/* Move a task to the tail of the tasklist */
+static inline void move_last_tasklist(struct task_struct * p)
+{
+ /* list_del */
+ p->next_task->prev_task = p->prev_task;
+ p->prev_task->next_task = p->next_task;
+
+ /* list_add_tail */
+ p->next_task = &init_task;
+ p->prev_task = init_task.prev_task;
+ init_task.prev_task->next_task = p;
+ init_task.prev_task = p;
+}
+
/*
- * Scheduling quanta.
- *
- * NOTE! The unix "nice" value influences how long a process
- * gets. The nice value ranges from -20 to +19, where a -20
- * is a "high-priority" task, and a "+10" is a low-priority
- * task.
- *
- * We want the time-slice to be around 50ms or so, so this
- * calculation depends on the value of HZ.
+ * Remember p->next, in case we call move_last_tasklist(p) in the
+ * fairsched recalculation code.
*/
-#if HZ > 2)
-#elif HZ > 1)
-#elif HZ counter;
+ newcounter = (oldcounter >> 1) +
+ NICE_TO_TICKS(p->nice);
+ up->cpu_ticks += oldcounter;
+ up->cpu_ticks -= newcounter;
+ /*
+ * If a user is very busy, only some of its
+ * processes can get CPU time. We move those
+ * processes out of the way to prevent
+ * starvation of others.
+ */
+ if (oldcounter != newcounter) {
+ p->counter = newcounter;
+ move_last_tasklist(p);
+ }
+ }
+ }
+ write_unlock_irq(&tasklist_lock);
+ } else
+#endif /* CONFIG_FAIRSCHED */
+ {
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p)
+ p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
+ read_unlock(&tasklist_lock);
+ }
+}
+
+/*
* schedule_tail() is getting called from the fork return path. This
* cleans up all remaining scheduler things, without impacting the
* common case.
@@ -616,13 +666,10 @@
/* Do we need to re-calculate counters? */
if (unlikely(!c)) {
- struct task_struct *p;
-
spin_unlock_irq(&runqueue_lock);
- read_lock(&tasklist_lock);
- for_each_task(p)
- p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
- read_unlock(&tasklist_lock);
+
+ recalculate_priorities();
+
spin_lock_irq(&runqueue_lock);
goto repeat_schedule;
}
--- linux/kernel/user.c.fair 2002-09-20 11:50:56.000000000 -0300
+++ linux/kernel/user.c 2002-09-24 16:06:11.000000000 -0300
@@ -29,9 +29,12 @@
struct user_struct root_user = {
__count: ATOMIC_INIT(1),
processes: ATOMIC_INIT(1),
- files: ATOMIC_INIT(0)
+ files: ATOMIC_INIT(0),
+ cpu_ticks: NICE_TO_TICKS(0)
};
+static LIST_HEAD(user_list);
+
/*
* These routines must be called with the uidhash spinlock held!
*/
@@ -44,6 +47,8 @@
next->pprev = &up->next;
up->pprev = hashent;
*hashent = up;
+
+ list_add(&up->list, &user_list);
}
static inline void uid_hash_remove(struct user_struct *up)
@@ -54,6 +59,8 @@
if (next)
next->pprev = pprev;
*pprev = next;
+
+ list_del(&up->list);
}
static inline struct user_struct *uid_hash_find(uid_t uid, struct user_struct **hashent)
@@ -101,6 +108,7 @@
atomic_set(&new->__count, 1);
atomic_set(&new->processes, 0);
atomic_set(&new->files, 0);
+ new->cpu_ticks = NICE_TO_TICKS(0);
/*
* Before adding this, check whether we raced
@@ -120,6 +128,21 @@
return up;
}
+/* Fair scheduler, recalculate the per user cpu time cap. */
+void recalculate_peruser_cputicks(void)
+{
+ struct list_head * entry;
+ struct user_struct * user;
+
+ spin_lock(&uidhash_lock);
+ list_for_each(entry, &user_list) {
+ user = list_entry(entry, struct user_struct, list);
+ user->cpu_ticks = (user->cpu_ticks / 2) + NICE_TO_TICKS(0);
+ }
+ /* Needed hack, we can get called before uid_cache_init ... */
+ root_user.cpu_ticks = (root_user.cpu_ticks / 2) + NICE_TO_TICKS(0);
+ spin_unlock(&uidhash_lock);
+}
static int __init uid_cache_init(void)
{
--- linux/kernel/sysctl.c.fair 2002-09-21 00:00:36.000000000 -0300
+++ linux/kernel/sysctl.c 2002-09-21 20:40:49.000000000 -0300
@@ -50,6 +50,7 @@
extern int sysrq_enabled;
extern int core_uses_pid;
extern int cad_pid;
+extern int fairsched;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
@@ -256,6 +257,10 @@
{KERN_S390_USER_DEBUG_LOGGING,"userprocess_debug",
&sysctl_userprocess_debug,sizeof(int),0644,NULL,&proc_dointvec},
#endif
+#ifdef CONFIG_FAIRSCHED
+ {KERN_FAIRSCHED, "fairsched", &fairsched, sizeof(int),
+ 0644, NULL, &proc_dointvec},
+#endif
{0}
};
--- linux/include/linux/sched.h.fair 2002-09-20 10:59:03.000000000 -0300
+++ linux/include/linux/sched.h 2002-09-24 15:12:50.000000000 -0300
@@ -275,6 +275,10 @@
/* Hash table maintenance information */
struct user_struct *next, **pprev;
uid_t uid;
+
+ /* Linked list for for_each_user */
+ struct list_head list;
+ long cpu_ticks;
};
#define get_current_user() ({
@@ -282,6 +286,7 @@
atomic_inc(&__user->__count);
__user; })
+extern void recalculate_peruser_cputicks(void);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
@@ -422,6 +427,31 @@
};
/*
+ * Scheduling quanta.
+ *
+ * NOTE! The unix "nice" value influences how long a process
+ * gets. The nice value ranges from -20 to +19, where a -20
+ * is a "high-priority" task, and a "+10" is a low-priority
+ * task.
+ *
+ * We want the time-slice to be around 50ms or so, so this
+ * calculation depends on the value of HZ.
+ */
+#if HZ > 2)
+#elif HZ > 1)
+#elif HZ
如果比较目录的话,必须文件名相同,如果比较文件的话,如何同时比较三组文件?
高手指点。。。。
--- linux/kernel/sched.c.fair 2002-09-20 10:58:49.000000000 -0300
+++ linux/kernel/sched.c 2002-09-24 14:46:31.000000000 -0300
@@ -45,31 +45,33 @@
extern void mem_use(void);
+#ifdef CONFIG_FAIRSCHED
+/* Toggle the per-user fair scheduler on/off */
+int fairsched = 1;
+
+/* Move a task to the tail of the tasklist */
+static inline void move_last_tasklist(struct task_struct * p)
+{
+ /* list_del */
+ p->next_task->prev_task = p->prev_task;
+ p->prev_task->next_task = p->next_task;
+
+ /* list_add_tail */
+ p->next_task = &init_task;
+ p->prev_task = init_task.prev_task;
+ init_task.prev_task->next_task = p;
+ init_task.prev_task = p;
+}
+
/*
- * Scheduling quanta.
- *
- * NOTE! The unix "nice" value influences how long a process
- * gets. The nice value ranges from -20 to +19, where a -20
- * is a "high-priority" task, and a "+10" is a low-priority
- * task.
- *
- * We want the time-slice to be around 50ms or so, so this
- * calculation depends on the value of HZ.
+ * Remember p->next, in case we call move_last_tasklist(p) in the
+ * fairsched recalculation code.
*/
-#if HZ > 2)
-#elif HZ > 1)
-#elif HZ counter;
+ newcounter = (oldcounter >> 1) +
+ NICE_TO_TICKS(p->nice);
+ up->cpu_ticks += oldcounter;
+ up->cpu_ticks -= newcounter;
+ /*
+ * If a user is very busy, only some of its
+ * processes can get CPU time. We move those
+ * processes out of the way to prevent
+ * starvation of others.
+ */
+ if (oldcounter != newcounter) {
+ p->counter = newcounter;
+ move_last_tasklist(p);
+ }
+ }
+ }
+ write_unlock_irq(&tasklist_lock);
+ } else
+#endif /* CONFIG_FAIRSCHED */
+ {
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ for_each_task(p)
+ p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
+ read_unlock(&tasklist_lock);
+ }
+}
+
+/*
* schedule_tail() is getting called from the fork return path. This
* cleans up all remaining scheduler things, without impacting the
* common case.
@@ -616,13 +666,10 @@
/* Do we need to re-calculate counters? */
if (unlikely(!c)) {
- struct task_struct *p;
-
spin_unlock_irq(&runqueue_lock);
- read_lock(&tasklist_lock);
- for_each_task(p)
- p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
- read_unlock(&tasklist_lock);
+
+ recalculate_priorities();
+
spin_lock_irq(&runqueue_lock);
goto repeat_schedule;
}
--- linux/kernel/user.c.fair 2002-09-20 11:50:56.000000000 -0300
+++ linux/kernel/user.c 2002-09-24 16:06:11.000000000 -0300
@@ -29,9 +29,12 @@
struct user_struct root_user = {
__count: ATOMIC_INIT(1),
processes: ATOMIC_INIT(1),
- files: ATOMIC_INIT(0)
+ files: ATOMIC_INIT(0),
+ cpu_ticks: NICE_TO_TICKS(0)
};
+static LIST_HEAD(user_list);
+
/*
* These routines must be called with the uidhash spinlock held!
*/
@@ -44,6 +47,8 @@
next->pprev = &up->next;
up->pprev = hashent;
*hashent = up;
+
+ list_add(&up->list, &user_list);
}
static inline void uid_hash_remove(struct user_struct *up)
@@ -54,6 +59,8 @@
if (next)
next->pprev = pprev;
*pprev = next;
+
+ list_del(&up->list);
}
static inline struct user_struct *uid_hash_find(uid_t uid, struct user_struct **hashent)
@@ -101,6 +108,7 @@
atomic_set(&new->__count, 1);
atomic_set(&new->processes, 0);
atomic_set(&new->files, 0);
+ new->cpu_ticks = NICE_TO_TICKS(0);
/*
* Before adding this, check whether we raced
@@ -120,6 +128,21 @@
return up;
}
+/* Fair scheduler, recalculate the per user cpu time cap. */
+void recalculate_peruser_cputicks(void)
+{
+ struct list_head * entry;
+ struct user_struct * user;
+
+ spin_lock(&uidhash_lock);
+ list_for_each(entry, &user_list) {
+ user = list_entry(entry, struct user_struct, list);
+ user->cpu_ticks = (user->cpu_ticks / 2) + NICE_TO_TICKS(0);
+ }
+ /* Needed hack, we can get called before uid_cache_init ... */
+ root_user.cpu_ticks = (root_user.cpu_ticks / 2) + NICE_TO_TICKS(0);
+ spin_unlock(&uidhash_lock);
+}
static int __init uid_cache_init(void)
{
--- linux/kernel/sysctl.c.fair 2002-09-21 00:00:36.000000000 -0300
+++ linux/kernel/sysctl.c 2002-09-21 20:40:49.000000000 -0300
@@ -50,6 +50,7 @@
extern int sysrq_enabled;
extern int core_uses_pid;
extern int cad_pid;
+extern int fairsched;
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
@@ -256,6 +257,10 @@
{KERN_S390_USER_DEBUG_LOGGING,"userprocess_debug",
&sysctl_userprocess_debug,sizeof(int),0644,NULL,&proc_dointvec},
#endif
+#ifdef CONFIG_FAIRSCHED
+ {KERN_FAIRSCHED, "fairsched", &fairsched, sizeof(int),
+ 0644, NULL, &proc_dointvec},
+#endif
{0}
};
--- linux/include/linux/sched.h.fair 2002-09-20 10:59:03.000000000 -0300
+++ linux/include/linux/sched.h 2002-09-24 15:12:50.000000000 -0300
@@ -275,6 +275,10 @@
/* Hash table maintenance information */
struct user_struct *next, **pprev;
uid_t uid;
+
+ /* Linked list for for_each_user */
+ struct list_head list;
+ long cpu_ticks;
};
#define get_current_user() ({
@@ -282,6 +286,7 @@
atomic_inc(&__user->__count);
__user; })
+extern void recalculate_peruser_cputicks(void);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
@@ -422,6 +427,31 @@
};
/*
+ * Scheduling quanta.
+ *
+ * NOTE! The unix "nice" value influences how long a process
+ * gets. The nice value ranges from -20 to +19, where a -20
+ * is a "high-priority" task, and a "+10" is a low-priority
+ * task.
+ *
+ * We want the time-slice to be around 50ms or so, so this
+ * calculation depends on the value of HZ.
+ */
+#if HZ > 2)
+#elif HZ > 1)
+#elif HZ