Skip to content

Commit 96bcefc

Browse files
committed
Revert cpu_relax & Tidy core.c
1 parent 4497b0a commit 96bcefc

3 files changed

Lines changed: 3 additions & 54 deletions

File tree

arch/x86/include/asm/vdso/processor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ static __always_inline void rep_nop(void)
1313
asm volatile("rep; nop" ::: "memory");
1414
}
1515

16-
#define cpu_relax() asm volatile("sti; hlt")
16+
#define cpu_relax rep_nop
1717

1818
struct getcpu_cache;
1919

arch/x86/um/asm/processor.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ static __always_inline void cpu_relax(void)
3333
time_travel_mode == TT_MODE_EXTERNAL)
3434
time_travel_ndelay(1);
3535
else
36-
asm volatile("sti; hlt");
36+
rep_nop();
3737
}
3838

3939
#define task_pt_regs(t) (&(t)->thread.regs)

kernel/sched/core.c

Lines changed: 1 addition & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -6331,57 +6331,6 @@ static void sched_core_cpu_starting(unsigned int cpu)
63316331
}
63326332
}
63336333

6334-
static void sched_core_cpu_deactivate(unsigned int cpu)
6335-
{
6336-
const struct cpumask *smt_mask = cpu_smt_mask(cpu);
6337-
struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
6338-
int t;
6339-
6340-
guard(core_lock)(&cpu);
6341-
6342-
/* if we're the last man standing, nothing to do */
6343-
if (cpumask_weight(smt_mask) == 1) {
6344-
WARN_ON_ONCE(rq->core != rq);
6345-
return;
6346-
}
6347-
6348-
/* if we're not the leader, nothing to do */
6349-
if (rq->core != rq)
6350-
return;
6351-
6352-
/* find a new leader */
6353-
for_each_cpu(t, smt_mask) {
6354-
if (t == cpu)
6355-
continue;
6356-
core_rq = cpu_rq(t);
6357-
break;
6358-
}
6359-
6360-
if (WARN_ON_ONCE(!core_rq)) /* impossible */
6361-
return;
6362-
6363-
/* copy the shared state to the new leader */
6364-
core_rq->core_task_seq = rq->core_task_seq;
6365-
core_rq->core_pick_seq = rq->core_pick_seq;
6366-
core_rq->core_cookie = rq->core_cookie;
6367-
core_rq->core_forceidle_count = rq->core_forceidle_count;
6368-
core_rq->core_forceidle_seq = rq->core_forceidle_seq;
6369-
core_rq->core_forceidle_occupation = rq->core_forceidle_occupation;
6370-
6371-
/*
6372-
* Accounting edge for forced idle is handled in pick_next_task().
6373-
* Don't need another one here, since the hotplug thread shouldn't
6374-
* have a cookie.
6375-
*/
6376-
core_rq->core_forceidle_start = 0;
6377-
6378-
/* install new leader */
6379-
for_each_cpu(t, smt_mask) {
6380-
rq = cpu_rq(t);
6381-
rq->core = core_rq;
6382-
}
6383-
}
6384-
63856334
static inline void sched_core_cpu_dying(unsigned int cpu)
63866335
{
63876336
struct rq *rq = cpu_rq(cpu);
@@ -11211,4 +11160,4 @@ void sched_mm_cid_fork(struct task_struct *t)
1121111160
WARN_ON_ONCE(!t->mm || t->mm_cid != -1);
1121211161
t->mm_cid_active = 1;
1121311162
}
11214-
#endif
11163+
#endif

0 commit comments

Comments
 (0)