View | Details | Raw Unified | Return to bug 188911 | Differences between
and this patch

Collapse All | Expand All

(-)b/contrib/gdb/gdb/gdbthread.h (+2 lines)
Lines 75-80 struct thread_info Link Here
75
  struct private_thread_info *private;
75
  struct private_thread_info *private;
76
};
76
};
77
77
78
extern int thread_list_empty (void);
79
78
/* Create an empty thread list, or empty the existing one.  */
80
/* Create an empty thread list, or empty the existing one.  */
79
extern void init_thread_list (void);
81
extern void init_thread_list (void);
80
82
(-)b/contrib/gdb/gdb/infrun.c (-2 / +16 lines)
Lines 384-392 follow_inferior_reset_breakpoints (void) Link Here
384
  insert_breakpoints ();
384
  insert_breakpoints ();
385
}
385
}
386
386
387
void 
388
clear_step_resume_breakpoint_thread (void)
389
{
390
  if (step_resume_breakpoint)
391
    step_resume_breakpoint->thread = -1;
392
}
393
394
void 
395
clear_step_resume_breakpoint (void)
396
{
397
  step_resume_breakpoint = NULL;
398
}
399
387
/* EXECD_PATHNAME is assumed to be non-NULL. */
400
/* EXECD_PATHNAME is assumed to be non-NULL. */
388
401
389
static void
402
void
390
follow_exec (int pid, char *execd_pathname)
403
follow_exec (int pid, char *execd_pathname)
391
{
404
{
392
  int saved_pid = pid;
405
  int saved_pid = pid;
Lines 1648-1654 handle_inferior_event (struct execution_control_state *ecs) Link Here
1648
1661
1649
      /* This causes the eventpoints and symbol table to be reset.  Must
1662
      /* This causes the eventpoints and symbol table to be reset.  Must
1650
         do this now, before trying to determine whether to stop. */
1663
         do this now, before trying to determine whether to stop. */
1651
      follow_exec (PIDGET (inferior_ptid), pending_follow.execd_pathname);
1664
      target_follow_exec (PIDGET (inferior_ptid), 
1665
			  pending_follow.execd_pathname);
1652
      xfree (pending_follow.execd_pathname);
1666
      xfree (pending_follow.execd_pathname);
1653
1667
1654
      stop_pc = read_pc_pid (ecs->ptid);
1668
      stop_pc = read_pc_pid (ecs->ptid);
(-)b/contrib/gdb/gdb/objfiles.c (-1 / +1 lines)
Lines 482-492 free_all_objfiles (void) Link Here
482
{
482
{
483
  struct objfile *objfile, *temp;
483
  struct objfile *objfile, *temp;
484
484
485
  clear_symtab_users ();
485
  ALL_OBJFILES_SAFE (objfile, temp)
486
  ALL_OBJFILES_SAFE (objfile, temp)
486
  {
487
  {
487
    free_objfile (objfile);
488
    free_objfile (objfile);
488
  }
489
  }
489
  clear_symtab_users ();
490
}
490
}
491
491
492
/* Relocate OBJFILE to NEW_OFFSETS.  There should be OBJFILE->NUM_SECTIONS
492
/* Relocate OBJFILE to NEW_OFFSETS.  There should be OBJFILE->NUM_SECTIONS
(-)b/contrib/gdb/gdb/target.c (-2 / +48 lines)
Lines 1307-1312 target_async_mask (int mask) Link Here
1307
}
1307
}
1308
1308
1309
/* Look through the list of possible targets for a target that can
1309
/* Look through the list of possible targets for a target that can
1310
   follow forks.  */
1311
1312
int
1313
target_follow_fork (int follow_child)
1314
{
1315
  struct target_ops *t;
1316
1317
  for (t = current_target.beneath; t != NULL; t = t->beneath)
1318
    {
1319
      if (t->to_follow_fork != NULL)
1320
	{
1321
	  int retval = t->to_follow_fork (t, follow_child);
1322
	  if (targetdebug)
1323
	    fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
1324
				follow_child, retval);
1325
	  return retval;
1326
	}
1327
    }
1328
1329
  /* Some target returned a fork event, but did not know how to follow it.  */
1330
  internal_error (__FILE__, __LINE__,
1331
		  "could not find a target to follow fork");
1332
}
1333
1334
void
1335
target_follow_exec (int pid, char *execd_pathname)
1336
{
1337
  struct target_ops *t;
1338
1339
  for (t = current_target.beneath; t != NULL; t = t->beneath)
1340
    {
1341
      if (t->to_follow_exec != NULL)
1342
	{
1343
	  t->to_follow_exec (pid, execd_pathname);
1344
	  if (targetdebug)
1345
	    fprintf_unfiltered (gdb_stdlog, "target_follow_exec (%d, %s)\n",
1346
				pid, execd_pathname);
1347
	  return;
1348
	}
1349
    }
1350
1351
  /* If target does not specify a follow_exec handler, call the default. */
1352
  follow_exec (pid, execd_pathname);
1353
}
1354
1355
/* Look through the list of possible targets for a target that can
1310
   execute a run or attach command without any other data.  This is
1356
   execute a run or attach command without any other data.  This is
1311
   used to locate the default process stratum.
1357
   used to locate the default process stratum.
1312
1358
Lines 2159-2167 debug_to_remove_vfork_catchpoint (int pid) Link Here
2159
}
2205
}
2160
2206
2161
static int
2207
static int
2162
debug_to_follow_fork (int follow_child)
2208
debug_to_follow_fork (struct target_ops* ops, int follow_child)
2163
{
2209
{
2164
  int retval =  debug_target.to_follow_fork (follow_child);
2210
  int retval =  debug_target.to_follow_fork (ops, follow_child);
2165
2211
2166
  fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2212
  fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2167
		      follow_child, retval);
2213
		      follow_child, retval);
(-)b/contrib/gdb/gdb/target.h (-3 / +5 lines)
Lines 362-368 struct target_ops Link Here
362
    int (*to_remove_fork_catchpoint) (int);
362
    int (*to_remove_fork_catchpoint) (int);
363
    int (*to_insert_vfork_catchpoint) (int);
363
    int (*to_insert_vfork_catchpoint) (int);
364
    int (*to_remove_vfork_catchpoint) (int);
364
    int (*to_remove_vfork_catchpoint) (int);
365
    int (*to_follow_fork) (int);
365
    int (*to_follow_fork) (struct target_ops*, int);
366
    void (*to_follow_exec) (int, char*);
366
    int (*to_insert_exec_catchpoint) (int);
367
    int (*to_insert_exec_catchpoint) (int);
367
    int (*to_remove_exec_catchpoint) (int);
368
    int (*to_remove_exec_catchpoint) (int);
368
    int (*to_reported_exec_events_per_exec_call) (void);
369
    int (*to_reported_exec_events_per_exec_call) (void);
Lines 761-768 extern void target_load (char *arg, int from_tty); Link Here
761
   This function returns 1 if the inferior should not be resumed
762
   This function returns 1 if the inferior should not be resumed
762
   (i.e. there is another event pending).  */
763
   (i.e. there is another event pending).  */
763
764
764
#define target_follow_fork(follow_child) \
765
int target_follow_fork (int follow_child);
765
     (*current_target.to_follow_fork) (follow_child)
766
766
767
/* On some targets, we can catch an inferior exec event when it
767
/* On some targets, we can catch an inferior exec event when it
768
   occurs.  These functions insert/remove an already-created
768
   occurs.  These functions insert/remove an already-created
Lines 1248-1251 extern void push_remote_target (char *name, int from_tty); Link Here
1248
/* Blank target vector entries are initialized to target_ignore. */
1248
/* Blank target vector entries are initialized to target_ignore. */
1249
void target_ignore (void);
1249
void target_ignore (void);
1250
1250
1251
void target_follow_exec (int pid, char *execd_pathname);
1252
1251
#endif /* !defined (TARGET_H) */
1253
#endif /* !defined (TARGET_H) */
(-)b/contrib/gdb/gdb/thread.c (+6 lines)
Lines 65-70 static void restore_current_thread (ptid_t); Link Here
65
static void switch_to_thread (ptid_t ptid);
65
static void switch_to_thread (ptid_t ptid);
66
static void prune_threads (void);
66
static void prune_threads (void);
67
67
68
int
69
thread_list_empty ()
70
{
71
  return thread_list == NULL;
72
}
73
68
void
74
void
69
delete_step_resume_breakpoint (void *arg)
75
delete_step_resume_breakpoint (void *arg)
70
{
76
{
(-)b/gnu/usr.bin/gdb/arch/amd64/Makefile (-1 / +1 lines)
Lines 2-8 Link Here
2
2
3
GENSRCS+= xm.h
3
GENSRCS+= xm.h
4
.if !defined(GDB_CROSS_DEBUGGER)
4
.if !defined(GDB_CROSS_DEBUGGER)
5
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
6
LIBSRCS+= amd64-nat.c amd64bsd-nat.c amd64fbsd-nat.c
6
LIBSRCS+= amd64-nat.c amd64bsd-nat.c amd64fbsd-nat.c
7
.endif
7
.endif
8
LIBSRCS+= solib.c solib-svr4.c
8
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/amd64/init.c (+2 lines)
Lines 115-120 extern initialize_file_ftype _initialize_tui_out; Link Here
115
extern initialize_file_ftype _initialize_tui_regs;
115
extern initialize_file_ftype _initialize_tui_regs;
116
extern initialize_file_ftype _initialize_tui_stack;
116
extern initialize_file_ftype _initialize_tui_stack;
117
extern initialize_file_ftype _initialize_tui_win;
117
extern initialize_file_ftype _initialize_tui_win;
118
extern initialize_file_ftype _initialize_fbsdnat;
118
void
119
void
119
initialize_all_files (void)
120
initialize_all_files (void)
120
{
121
{
Lines 231-234 initialize_all_files (void) Link Here
231
  _initialize_tui_regs ();
232
  _initialize_tui_regs ();
232
  _initialize_tui_stack ();
233
  _initialize_tui_stack ();
233
  _initialize_tui_win ();
234
  _initialize_tui_win ();
235
  _initialize_fbsdnat ();
234
}
236
}
(-)b/gnu/usr.bin/gdb/arch/arm/Makefile (-1 / +1 lines)
Lines 1-7 Link Here
1
# $FreeBSD$
1
# $FreeBSD$
2
2
3
GENSRCS+= xm.h
3
GENSRCS+= xm.h
4
LIBSRCS+= armfbsd-nat.c
4
LIBSRCS+= armfbsd-nat.c fbsd-nat.c
5
LIBSRCS+= arm-tdep.c armfbsd-tdep.c solib.c solib-svr4.c
5
LIBSRCS+= arm-tdep.c armfbsd-tdep.c solib.c solib-svr4.c
6
.if !defined(GDB_CROSS_DEBUGGER)
6
.if !defined(GDB_CROSS_DEBUGGER)
7
LIBSRCS+= fbsd-threads.c
7
LIBSRCS+= fbsd-threads.c
(-)b/gnu/usr.bin/gdb/arch/arm/init.c (+2 lines)
Lines 113-118 extern initialize_file_ftype _initialize_tui_out; Link Here
113
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_fbsdnat;
116
void
117
void
117
initialize_all_files (void)
118
initialize_all_files (void)
118
{
119
{
Lines 225-228 initialize_all_files (void) Link Here
225
  _initialize_tui_regs ();
226
  _initialize_tui_regs ();
226
  _initialize_tui_stack ();
227
  _initialize_tui_stack ();
227
  _initialize_tui_win ();
228
  _initialize_tui_win ();
229
  _initialize_fbsdnat ();
228
}
230
}
(-)b/gnu/usr.bin/gdb/arch/i386/Makefile (-1 / +1 lines)
Lines 2-8 Link Here
2
2
3
GENSRCS+= xm.h
3
GENSRCS+= xm.h
4
.if !defined(GDB_CROSS_DEBUGGER)
4
.if !defined(GDB_CROSS_DEBUGGER)
5
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
6
LIBSRCS+= i386-nat.c i386bsd-nat.c i386fbsd-nat.c
6
LIBSRCS+= i386-nat.c i386bsd-nat.c i386fbsd-nat.c
7
.endif
7
.endif
8
LIBSRCS+= solib.c solib-svr4.c
8
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/i386/init.c (+2 lines)
Lines 116-121 extern initialize_file_ftype _initialize_tui_out; Link Here
116
extern initialize_file_ftype _initialize_tui_regs;
116
extern initialize_file_ftype _initialize_tui_regs;
117
extern initialize_file_ftype _initialize_tui_stack;
117
extern initialize_file_ftype _initialize_tui_stack;
118
extern initialize_file_ftype _initialize_tui_win;
118
extern initialize_file_ftype _initialize_tui_win;
119
extern initialize_file_ftype _initialize_fbsdnat;
119
void
120
void
120
initialize_all_files (void)
121
initialize_all_files (void)
121
{
122
{
Lines 233-236 initialize_all_files (void) Link Here
233
  _initialize_tui_regs ();
234
  _initialize_tui_regs ();
234
  _initialize_tui_stack ();
235
  _initialize_tui_stack ();
235
  _initialize_tui_win ();
236
  _initialize_tui_win ();
237
  _initialize_fbsdnat ();
236
}
238
}
(-)b/gnu/usr.bin/gdb/arch/ia64/Makefile (-1 / +1 lines)
Lines 1-7 Link Here
1
# $FreeBSD$
1
# $FreeBSD$
2
2
3
.if !defined(GDB_CROSS_DEBUGGER)
3
.if !defined(GDB_CROSS_DEBUGGER)
4
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
4
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= ia64-fbsd-nat.c
5
LIBSRCS+= ia64-fbsd-nat.c
6
.endif
6
.endif
7
LIBSRCS+= solib.c solib-svr4.c
7
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/ia64/init.c (+2 lines)
Lines 113-118 extern initialize_file_ftype _initialize_tui_out; Link Here
113
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_fbsdnat;
116
void
117
void
117
initialize_all_files (void)
118
initialize_all_files (void)
118
{
119
{
Lines 227-230 initialize_all_files (void) Link Here
227
  _initialize_tui_regs ();
228
  _initialize_tui_regs ();
228
  _initialize_tui_stack ();
229
  _initialize_tui_stack ();
229
  _initialize_tui_win ();
230
  _initialize_tui_win ();
231
  _initialize_fbsdnat ();
230
}
232
}
(-)b/gnu/usr.bin/gdb/arch/mips/Makefile (-1 / +1 lines)
Lines 4-10 Link Here
4
# XXX Should set DEFAULT_BFD_VEC based on target.
4
# XXX Should set DEFAULT_BFD_VEC based on target.
5
#
5
#
6
.if !defined(GDB_CROSS_DEBUGGER)
6
.if !defined(GDB_CROSS_DEBUGGER)
7
LIBSRCS+= mipsfbsd-nat.c fbsd-threads.c
7
LIBSRCS+= fbsd-nat.c mipsfbsd-nat.c fbsd-threads.c
8
.endif
8
.endif
9
LIBSRCS+= solib.c solib-svr4.c
9
LIBSRCS+= solib.c solib-svr4.c
10
LIBSRCS+= mips-tdep.c mipsfbsd-tdep.c fbsd-proc.c
10
LIBSRCS+= mips-tdep.c mipsfbsd-tdep.c fbsd-proc.c
(-)b/gnu/usr.bin/gdb/arch/mips/init.c (+2 lines)
Lines 112-117 extern initialize_file_ftype _initialize_tui_out; Link Here
112
extern initialize_file_ftype _initialize_tui_regs;
112
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_stack;
113
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_win;
114
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_fbsdnat;
115
void
116
void
116
initialize_all_files (void)
117
initialize_all_files (void)
117
{
118
{
Lines 230-233 initialize_all_files (void) Link Here
230
  _initialize_tui_regs ();
231
  _initialize_tui_regs ();
231
  _initialize_tui_stack ();
232
  _initialize_tui_stack ();
232
  _initialize_tui_win ();
233
  _initialize_tui_win ();
234
  _initialize_fbsdnat ();
233
}
235
}
(-)b/gnu/usr.bin/gdb/arch/powerpc/Makefile (-1 / +1 lines)
Lines 1-7 Link Here
1
# $FreeBSD$
1
# $FreeBSD$
2
2
3
.if !defined(GDB_CROSS_DEBUGGER)
3
.if !defined(GDB_CROSS_DEBUGGER)
4
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
4
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= ppcfbsd-nat.c
5
LIBSRCS+= ppcfbsd-nat.c
6
.endif
6
.endif
7
LIBSRCS+= solib.c solib-svr4.c
7
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/powerpc/init.c (+2 lines)
Lines 113-118 extern initialize_file_ftype _initialize_tui_out; Link Here
113
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_fbsdnat;
116
void
117
void
117
initialize_all_files (void)
118
initialize_all_files (void)
118
{
119
{
Lines 227-230 initialize_all_files (void) Link Here
227
  _initialize_tui_regs ();
228
  _initialize_tui_regs ();
228
  _initialize_tui_stack ();
229
  _initialize_tui_stack ();
229
  _initialize_tui_win ();
230
  _initialize_tui_win ();
231
  _initialize_fbsdnat ();
230
}
232
}
(-)b/gnu/usr.bin/gdb/arch/powerpc64/Makefile (-1 / +1 lines)
Lines 1-7 Link Here
1
# $FreeBSD$
1
# $FreeBSD$
2
2
3
.if !defined(GDB_CROSS_DEBUGGER)
3
.if !defined(GDB_CROSS_DEBUGGER)
4
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
4
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= ppcfbsd-nat.c
5
LIBSRCS+= ppcfbsd-nat.c
6
.endif
6
.endif
7
LIBSRCS+= solib.c solib-svr4.c
7
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/powerpc64/init.c (+2 lines)
Lines 113-118 extern initialize_file_ftype _initialize_tui_out; Link Here
113
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_fbsdnat;
116
void
117
void
117
initialize_all_files (void)
118
initialize_all_files (void)
118
{
119
{
Lines 227-230 initialize_all_files (void) Link Here
227
  _initialize_tui_regs ();
228
  _initialize_tui_regs ();
228
  _initialize_tui_stack ();
229
  _initialize_tui_stack ();
229
  _initialize_tui_win ();
230
  _initialize_tui_win ();
231
  _initialize_fbsdnat ();
230
}
232
}
(-)b/gnu/usr.bin/gdb/arch/sparc64/init.c (+2 lines)
Lines 114-119 extern initialize_file_ftype _initialize_tui_out; Link Here
114
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_regs;
115
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_stack;
116
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_tui_win;
117
extern initialize_file_ftype _initialize_fbsdnat;
117
void
118
void
118
initialize_all_files (void)
119
initialize_all_files (void)
119
{
120
{
Lines 229-232 initialize_all_files (void) Link Here
229
  _initialize_tui_regs ();
230
  _initialize_tui_regs ();
230
  _initialize_tui_stack ();
231
  _initialize_tui_stack ();
231
  _initialize_tui_win ();
232
  _initialize_tui_win ();
233
  _initialize_fbsdnat ();
232
}
234
}
(-)b/gnu/usr.bin/gdb/libgdb/fbsd-nat.c (+342 lines)
Added Link Here
1
/* Native-dependent code for FreeBSD.
2
3
   Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
4
5
   This file is part of GDB.
6
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 2 of the License, or
10
   (at your option) any later version.
11
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
17
   You should have received a copy of the GNU General Public License
18
   along with this program; if not, write to the Free Software
19
   Foundation, Inc., 51 Franklin Street, Fifth Floor,
20
   Boston, MA 02110-1301, USA.  */
21
22
#include "defs.h"
23
#include "inferior.h"
24
#include "symfile.h"
25
#include "gdbcore.h"
26
#include "gdbthread.h"
27
#include "gdb_assert.h"
28
#include <sys/types.h>
29
#include <sys/ptrace.h>
30
#include <sys/wait.h>
31
32
extern struct target_ops child_ops;
33
void clear_step_resume_breakpoint (void);
34
void clear_step_resume_breakpoint_thread (void);
35
void (*reactivate_threads) (char*) = NULL;
36
void (*disable_threads) (void) = NULL;
37
38
static void (*mourn_inferior_beneath) (void);
39
static void (*detach_beneath) (char *args, int from_tty);
40
static ptid_t (*wait_beneath) (ptid_t ptid, 
41
			       struct target_waitstatus *ourstatus);
42
int follow_event_pid = 0;
43
44
/* Return a the name of file that can be opened to get the symbols for
45
   the child process identified by PID.  */
46
47
char *
48
fbsd_pid_to_exec_file (int pid)
49
{
50
  size_t len = MAXPATHLEN;
51
  char *buf = xcalloc (len, sizeof (char));
52
  char *path;
53
54
#ifdef KERN_PROC_PATHNAME
55
  int mib[4];
56
57
  mib[0] = CTL_KERN;
58
  mib[1] = KERN_PROC;
59
  mib[2] = KERN_PROC_PATHNAME;
60
  mib[3] = pid;
61
  if (sysctl (mib, 4, buf, &len, NULL, 0) == 0)
62
    return buf;
63
#endif
64
65
  path = xstrprintf ("/proc/%d/file", pid);
66
  if (readlink (path, buf, MAXPATHLEN) == -1)
67
    {
68
      xfree (buf);
69
      buf = NULL;
70
    }
71
72
  xfree (path);
73
  return buf;
74
}
75
76
/* Wait for the child specified by PTID to do something.  Return the
77
   process ID of the child, or MINUS_ONE_PTID in case of error; store
78
   the status in *OURSTATUS.  */
79
80
static ptid_t
81
inf_ptrace_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
82
{
83
  pid_t pid;
84
  int status, save_errno;
85
86
  do
87
    {
88
      set_sigint_trap ();
89
      set_sigio_trap ();
90
      do
91
	{
92
	  pid = waitpid (PIDGET (ptid), &status, 0);
93
	  save_errno = errno;
94
	}
95
      while (pid == -1 && errno == EINTR);
96
97
      clear_sigio_trap ();
98
      clear_sigint_trap ();
99
100
      if (pid == -1)
101
	{
102
	  fprintf_unfiltered (gdb_stderr,
103
			      _("Child process unexpectedly missing: %s.\n"),
104
			      safe_strerror (save_errno));
105
106
	  /* Claim it exited with unknown signal.  */
107
	  ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
108
	  ourstatus->value.sig = TARGET_SIGNAL_UNKNOWN;
109
	  return minus_one_ptid;
110
	}
111
112
      /* Ignore terminated detached child processes.  */
113
      if (!WIFSTOPPED (status) && pid != PIDGET (inferior_ptid))
114
	pid = -1;
115
    }
116
  while (pid == -1);
117
118
  store_waitstatus (ourstatus, status);
119
  return pid_to_ptid (pid);
120
}
121
122
static ptid_t
123
fbsd_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
124
{
125
  long lwp;
126
  struct ptrace_lwpinfo lwpinfo;
127
  struct target_waitstatus stat;
128
  ptid_t ret;
129
  static ptid_t forking_child = {0,0,0};
130
131
  ret = wait_beneath (ptid, ourstatus);
132
133
  if (PIDGET (ret) >= 0 && ourstatus->kind == TARGET_WAITKIND_STOPPED &&
134
      (ourstatus->value.sig == TARGET_SIGNAL_TRAP ||
135
       ourstatus->value.sig == TARGET_SIGNAL_STOP) &&
136
      (ptrace(PT_LWPINFO, PIDGET (ret), (caddr_t)&lwpinfo, 
137
	      sizeof lwpinfo) == 0))
138
    {
139
      if (lwpinfo.pl_flags & PL_FLAG_CHILD)
140
	{
141
	  /* Leave the child in a stopped state until we get a fork event in 
142
	     the parent. That's when we decide which process to follow. */
143
	  ourstatus->kind = TARGET_WAITKIND_IGNORE;
144
	  forking_child = ret;
145
	}
146
      else if (lwpinfo.pl_flags & PL_FLAG_FORKED)
147
	{
148
	  /* We'd better be in the middle of processing a fork() event. */
149
	  gdb_assert (!ptid_equal (forking_child, null_ptid));
150
	  ourstatus->kind = TARGET_WAITKIND_FORKED;
151
	  ourstatus->value.related_pid = lwpinfo.pl_child_pid;
152
	  forking_child = null_ptid;
153
	}
154
      else if (lwpinfo.pl_flags & PL_FLAG_EXEC &&
155
	  PIDGET (ret) == follow_event_pid)
156
	{
157
	  ourstatus->kind = TARGET_WAITKIND_EXECD;
158
	  ourstatus->value.execd_pathname =
159
	    xstrdup (fbsd_pid_to_exec_file (PIDGET (ret)));
160
	}
161
    }
162
163
  return ret;
164
}
165
166
static void
167
fbsd_enable_event_reporting (int pid)
168
{
169
#ifdef PT_FOLLOW_FORK
170
  follow_event_pid = pid;
171
  if (ptrace(PT_FOLLOW_FORK, pid, 0, 1) < 0)
172
    error (_("Cannot follow fork on this target."));
173
#endif 
174
}
175
176
static void
177
fbsd_post_attach (int pid)
178
{
179
  fbsd_enable_event_reporting (pid);
180
}
181
182
static void
183
fbsd_post_startup_inferior (ptid_t ptid)
184
{
185
  fbsd_enable_event_reporting (PIDGET (ptid));
186
}
187
188
int
189
fbsd_follow_fork (struct target_ops *ops, int follow_child)
190
{
191
  ptid_t last_ptid, ret, child_ptid;
192
  struct target_waitstatus last_status;
193
  int parent_pid, child_pid;
194
  struct target_waitstatus ourstatus;
195
196
  get_last_target_status (&last_ptid, &last_status);
197
  parent_pid = PIDGET (last_ptid);
198
  child_pid = last_status.value.related_pid;
199
200
  if (follow_child)
201
    {
202
      detach_breakpoints (child_pid);
203
      remove_breakpoints ();
204
      child_ptid = pid_to_ptid (child_pid);
205
206
      target_detach (NULL, 0);
207
      inferior_ptid = child_ptid;
208
209
      /* Reinstall ourselves, since we might have been removed in
210
	 target_detach (which does other necessary cleanup).  */
211
      push_target (ops);
212
213
      /* Need to restore some of the actions done by the threaded detach */
214
      if (reactivate_threads) 
215
	{
216
	  reactivate_threads (fbsd_pid_to_exec_file (child_pid));
217
	  reactivate_threads = NULL;
218
	}
219
220
      /* Reset breakpoints in the child as appropriate.  */
221
      clear_step_resume_breakpoint_thread ();
222
      follow_inferior_reset_breakpoints ();
223
224
      /* Enable fork/exec event reporting for the child. */
225
      fbsd_enable_event_reporting (child_pid);
226
    }
227
  else /* Follow parent */
228
    {
229
      /* Before detaching from the child, remove all breakpoints from
230
         it.  (This won't actually modify the breakpoint list, but will
231
         physically remove the breakpoints from the child.) */
232
      detach_breakpoints (child_pid);
233
      ptrace (PT_DETACH, child_pid, (caddr_t) 1, 0);
234
    }
235
236
  return 0;
237
}
238
239
/* EXECD_PATHNAME is assumed to be non-NULL. */
240
241
static void
242
fbsd_follow_exec (int pid, char *execd_pathname)
243
{
244
  struct target_waitstatus status;
245
  ptid_t ret = inferior_ptid;
246
247
  /* This is an exec event that we actually wish to pay attention to.
248
     Refresh our symbol table to the newly exec'd program, remove any
249
     momentary bp's, etc.
250
251
     If there are breakpoints, they aren't really inserted now,
252
     since the exec() transformed our inferior into a fresh set
253
     of instructions.
254
255
     We want to preserve symbolic breakpoints on the list, since
256
     we have hopes that they can be reset after the new a.out's
257
     symbol table is read.
258
259
     However, any "raw" breakpoints must be removed from the list
260
     (e.g., the solib bp's), since their address is probably invalid
261
     now.
262
263
     And, we DON'T want to call delete_breakpoints() here, since
264
     that may write the bp's "shadow contents" (the instruction
265
     value that was overwritten witha TRAP instruction).  Since
266
     we now have a new a.out, those shadow contents aren't valid. */
267
  update_breakpoints_after_exec ();
268
269
  /* If there was one, it's gone now.  We cannot truly step-to-next
270
     statement through an exec(). */
271
  clear_step_resume_breakpoint ();
272
  step_range_start = 0;
273
  step_range_end = 0;
274
275
  /* What is this a.out's name? */
276
  printf_unfiltered (_("Executing new program: %s\n"), execd_pathname);
277
278
  /* We've followed the inferior through an exec.  Therefore, the
279
     inferior has essentially been killed & reborn. */
280
281
  gdb_flush (gdb_stdout);
282
283
  /* Disable thread library */
284
  if (disable_threads)
285
    {
286
      disable_threads ();
287
      disable_threads = NULL;
288
    }
289
290
  generic_mourn_inferior ();
291
  inferior_ptid = ret;
292
293
  /* That a.out is now the one to use. */
294
  exec_file_attach (execd_pathname, 0);
295
296
  /* And also is where symbols can be found. */
297
  symbol_file_add_main (execd_pathname, 0);
298
299
  /* Reset the shared library package.  This ensures that we get
300
     a shlib event when the child reaches "_start", at which point
301
     the dld will have had a chance to initialize the child. */
302
#if defined(SOLIB_RESTART)
303
  SOLIB_RESTART ();
304
#endif
305
#ifdef SOLIB_CREATE_INFERIOR_HOOK
306
  SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
307
#else
308
  solib_create_inferior_hook ();
309
#endif
310
311
  /* Reinsert all breakpoints.  (Those which were symbolic have
312
     been reset to the proper address in the new a.out, thanks
313
     to symbol_file_command...) */
314
  insert_breakpoints ();
315
}
316
317
static void fbsd_mourn_inferior (void)
318
{
319
  follow_event_pid = 0;
320
  mourn_inferior_beneath ();
321
}
322
323
static void fbsd_detach (char *args, int from_tty)
324
{
325
  follow_event_pid = 0;
326
  detach_beneath (args, from_tty);
327
}
328
329
void
330
_initialize_fbsdnat (void)
331
{
332
  wait_beneath = inf_ptrace_wait;
333
  detach_beneath = child_ops.to_detach;
334
  mourn_inferior_beneath = child_ops.to_mourn_inferior;
335
  child_ops.to_wait = fbsd_wait;
336
  child_ops.to_detach = fbsd_detach;
337
  child_ops.to_mourn_inferior = fbsd_mourn_inferior;
338
  child_ops.to_post_attach = fbsd_post_attach;
339
  child_ops.to_post_startup_inferior = fbsd_post_startup_inferior;
340
  child_ops.to_follow_fork = fbsd_follow_fork;
341
  child_ops.to_follow_exec = fbsd_follow_exec;
342
}
(-)b/gnu/usr.bin/gdb/libgdb/fbsd-threads.c (-3 / +51 lines)
Lines 68-73 extern struct target_ops core_ops; Link Here
68
68
69
/* Pointer to the next function on the objfile event chain.  */
69
/* Pointer to the next function on the objfile event chain.  */
70
static void (*target_new_objfile_chain) (struct objfile *objfile);
70
static void (*target_new_objfile_chain) (struct objfile *objfile);
71
 
72
/* Non-zero while processing thread library re-activation after fork() */
73
static int fbsd_forking;
71
74
72
/* Non-zero if there is a thread module */
75
/* Non-zero if there is a thread module */
73
static int fbsd_thread_present;
76
static int fbsd_thread_present;
Lines 154-159 static int fbsd_thread_alive (ptid_t ptid); Link Here
154
static void attach_thread (ptid_t ptid, const td_thrhandle_t *th_p,
157
static void attach_thread (ptid_t ptid, const td_thrhandle_t *th_p,
155
               const td_thrinfo_t *ti_p, int verbose);
158
               const td_thrinfo_t *ti_p, int verbose);
156
static void fbsd_thread_detach (char *args, int from_tty);
159
static void fbsd_thread_detach (char *args, int from_tty);
160
extern void (*reactivate_threads) (char*);
161
extern void (*disable_threads) (void);
162
static void fbsd_thread_activate (void);
163
static void fbsd_thread_deactivate (void);
157
164
158
/* Building process ids.  */
165
/* Building process ids.  */
159
166
Lines 405-419 disable_thread_event_reporting (void) Link Here
405
  td_death_bp_addr = 0;
412
  td_death_bp_addr = 0;
406
}
413
}
407
414
415
static void 
416
fbsd_thread_reactivate_after_fork (char *pathname)
417
{
418
  fbsd_forking = 1;
419
420
  /* That a.out is now the one to use. */
421
  exec_file_attach (pathname, 0);
422
423
  /* And also is where symbols can be found. */
424
  symbol_file_add_main (pathname, 0);
425
  push_target (&fbsd_thread_ops);
426
427
#ifdef SOLIB_CREATE_INFERIOR_HOOK
428
  SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
429
#else
430
  solib_create_inferior_hook ();
431
#endif
432
  fbsd_forking = 0;
433
}
434
435
static void 
436
fbsd_thread_disable_after_exec (void)
437
{
438
  if (fbsd_thread_active)
439
    fbsd_thread_deactivate ();
440
441
  unpush_target (&fbsd_thread_ops);
442
}
443
408
static void
444
static void
409
fbsd_thread_activate (void)
445
fbsd_thread_activate (void)
410
{
446
{
411
  fbsd_thread_active = 1;
447
  fbsd_thread_active = 1;
448
  reactivate_threads = fbsd_thread_reactivate_after_fork;
449
  disable_threads = fbsd_thread_disable_after_exec;
412
  init_thread_list();
450
  init_thread_list();
413
  if (fbsd_thread_core == 0)
451
  if (fbsd_thread_core == 0)
414
    enable_thread_event_reporting ();
452
    enable_thread_event_reporting ();
415
  fbsd_thread_find_new_threads ();
453
416
  get_current_thread ();
454
  if (!fbsd_forking) 
455
    {
456
      fbsd_thread_find_new_threads ();
457
      get_current_thread ();
458
    }
417
}
459
}
418
460
419
static void
461
static void
Lines 626-632 fbsd_thread_resume (ptid_t ptid, int step, enum target_signal signo) Link Here
626
    }
668
    }
627
669
628
  lwp = GET_LWP (work_ptid);
670
  lwp = GET_LWP (work_ptid);
629
  if (lwp == 0)
671
  if (lwp == 0 && GET_THREAD (work_ptid) != 0)
630
    {
672
    {
631
      /* check user thread */
673
      /* check user thread */
632
      ret = td_ta_map_id2thr_p (thread_agent, GET_THREAD(work_ptid), &th);
674
      ret = td_ta_map_id2thr_p (thread_agent, GET_THREAD(work_ptid), &th);
Lines 790-795 fbsd_thread_wait (ptid_t ptid, struct target_waitstatus *ourstatus) Link Here
790
  ret = child_ops.to_wait (ptid, ourstatus);
832
  ret = child_ops.to_wait (ptid, ourstatus);
791
  if (GET_PID(ret) >= 0 && ourstatus->kind == TARGET_WAITKIND_STOPPED)
833
  if (GET_PID(ret) >= 0 && ourstatus->kind == TARGET_WAITKIND_STOPPED)
792
    {
834
    {
835
      if (thread_list_empty ())
836
	fbsd_thread_find_new_threads ();
837
793
      lwp = get_current_lwp (GET_PID(ret));
838
      lwp = get_current_lwp (GET_PID(ret));
794
      ret = thread_from_lwp (BUILD_LWP(lwp, GET_PID(ret)),
839
      ret = thread_from_lwp (BUILD_LWP(lwp, GET_PID(ret)),
795
         &th, &ti);
840
         &th, &ti);
Lines 1065-1070 fbsd_thread_create_inferior (char *exec_file, char *allargs, char **env) Link Here
1065
static void
1110
static void
1066
fbsd_thread_post_startup_inferior (ptid_t ptid)
1111
fbsd_thread_post_startup_inferior (ptid_t ptid)
1067
{
1112
{
1113
  if (child_ops.to_post_startup_inferior)
1114
    child_ops.to_post_startup_inferior (ptid);
1115
1068
  if (fbsd_thread_present && !fbsd_thread_active)
1116
  if (fbsd_thread_present && !fbsd_thread_active)
1069
    {
1117
    {
1070
      /* The child process is now the actual multi-threaded
1118
      /* The child process is now the actual multi-threaded
(-)b/share/man/man9/fpu_kern.9 (+10 lines)
Lines 120-125 could be used from both kernel thread and syscall contexts. Link Here
120
The
120
The
121
.Fn fpu_kern_leave
121
.Fn fpu_kern_leave
122
function correctly handles such contexts.
122
function correctly handles such contexts.
123
.It Dv FPU_KERN_NOCTX
124
Avoid nesting save area.
125
If the flag is specified, the
126
.Fa ctx
127
must be passed as
128
.Va NULL .
129
The flag should only be used for really short code blocks
130
which can be executed in a critical section.
131
It avoids the need to allocate the FPU context by the cost
132
of increased system latency.
123
.El
133
.El
124
.El
134
.El
125
.Pp
135
.Pp
(-)b/sys/amd64/amd64/fpu.c (-4 / +43 lines)
Lines 348-354 fpuexit(struct thread *td) Link Here
348
		stop_emulating();
348
		stop_emulating();
349
		fpusave(curpcb->pcb_save);
349
		fpusave(curpcb->pcb_save);
350
		start_emulating();
350
		start_emulating();
351
		PCPU_SET(fpcurthread, 0);
351
		PCPU_SET(fpcurthread, NULL);
352
	}
352
	}
353
	critical_exit();
353
	critical_exit();
354
}
354
}
Lines 603-608 fpudna(void) Link Here
603
{
603
{
604
604
605
	critical_enter();
605
	critical_enter();
606
	KASSERT((curpcb->pcb_flags & PCB_FPUNOSAVE) == 0,
607
	    ("fpudna while in fpu_kern_enter(FPU_KERN_NOCTX)"));
606
	if (PCPU_GET(fpcurthread) == curthread) {
608
	if (PCPU_GET(fpcurthread) == curthread) {
607
		printf("fpudna: fpcurthread == curthread %d times\n",
609
		printf("fpudna: fpcurthread == curthread %d times\n",
608
		    ++err_count);
610
		    ++err_count);
Lines 636-642 fpudna(void) Link Here
636
		 * fpu_initialstate, to ignite the XSAVEOPT
638
		 * fpu_initialstate, to ignite the XSAVEOPT
637
		 * tracking engine.
639
		 * tracking engine.
638
		 */
640
		 */
639
		bcopy(fpu_initialstate, curpcb->pcb_save, cpu_max_ext_state_size);
641
		bcopy(fpu_initialstate, curpcb->pcb_save,
642
		    cpu_max_ext_state_size);
640
		fpurestore(curpcb->pcb_save);
643
		fpurestore(curpcb->pcb_save);
641
		if (curpcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
644
		if (curpcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
642
			fldcw(curpcb->pcb_initial_fpucw);
645
			fldcw(curpcb->pcb_initial_fpucw);
Lines 934-944 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) Link Here
934
{
937
{
935
	struct pcb *pcb;
938
	struct pcb *pcb;
936
939
940
	KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
941
	    ("ctx is required when !FPU_KERN_NOCTX"));
942
	pcb = td->td_pcb;
943
	KASSERT((pcb->pcb_flags & PCB_FPUNOSAVE) == 0,
944
	    ("recursive fpu_kern_enter while in PCB_FPUNOSAVE state"));
945
	if ((flags & FPU_KERN_NOCTX) != 0) {
946
		critical_enter();
947
		stop_emulating();
948
		if (curthread == PCPU_GET(fpcurthread)) {
949
			fpusave(curpcb->pcb_save);
950
			PCPU_SET(fpcurthread, NULL);
951
		} else {
952
			KASSERT(PCPU_GET(fpcurthread) == NULL,
953
			    ("invalid fpcurthread"));
954
		}
955
956
		/*
957
		 * This breaks XSAVEOPT tracker, but
958
		 * PCB_FPUNOSAVE state is supposed to never need to
959
		 * save FPU context at all.
960
		 */
961
		fpurestore(fpu_initialstate);
962
		set_pcb_flags(pcb, PCB_KERNFPU | PCB_FPUNOSAVE |
963
		    PCB_FPUINITDONE);
964
		return (0);
965
	}
937
	if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
966
	if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
938
		ctx->flags = FPU_KERN_CTX_DUMMY;
967
		ctx->flags = FPU_KERN_CTX_DUMMY;
939
		return (0);
968
		return (0);
940
	}
969
	}
941
	pcb = td->td_pcb;
942
	KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
970
	KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
943
	    get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
971
	    get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
944
	ctx->flags = 0;
972
	ctx->flags = 0;
Lines 957-971 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) Link Here
957
{
985
{
958
	struct pcb *pcb;
986
	struct pcb *pcb;
959
987
988
	pcb = td->td_pcb;
989
	if ((pcb->pcb_flags & PCB_FPUNOSAVE) != 0) {
990
		KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
991
		KASSERT(PCPU_GET(fpcurthread) == NULL,
992
		    ("non-NULL fpcurthread for PCB_FPUNOSAVE"));
993
		CRITICAL_ASSERT(td);
994
		clear_pcb_flags(pcb,  PCB_FPUNOSAVE | PCB_FPUINITDONE);
995
		start_emulating();
996
		critical_exit();
997
		goto restore_flags;
998
	}
960
	if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
999
	if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
961
		return (0);
1000
		return (0);
962
	KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
1001
	KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
963
	pcb = td->td_pcb;
964
	critical_enter();
1002
	critical_enter();
965
	if (curthread == PCPU_GET(fpcurthread))
1003
	if (curthread == PCPU_GET(fpcurthread))
966
		fpudrop();
1004
		fpudrop();
967
	critical_exit();
1005
	critical_exit();
968
	pcb->pcb_save = ctx->prev;
1006
	pcb->pcb_save = ctx->prev;
1007
restore_flags:
969
	if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1008
	if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
970
		if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
1009
		if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
971
			set_pcb_flags(pcb, PCB_FPUINITDONE);
1010
			set_pcb_flags(pcb, PCB_FPUINITDONE);
(-)b/sys/amd64/amd64/initcpu.c (+5 lines)
Lines 88-93 static void Link Here
88
init_amd(void)
88
init_amd(void)
89
{
89
{
90
90
91
	if (CPUID_TO_FAMILY(cpu_id) == 0x9) {
92
		if ((cpu_feature2 & CPUID2_HV) == 0)
93
			wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) | (1 << 6));
94
	}
95
91
	/*
96
	/*
92
	 * Work around Erratum 721 for Family 10h and 12h processors.
97
	 * Work around Erratum 721 for Family 10h and 12h processors.
93
	 * These processors may incorrectly update the stack pointer
98
	 * These processors may incorrectly update the stack pointer
(-)b/sys/amd64/amd64/mp_machdep.c (+75 lines)
Lines 58-63 __FBSDID("$FreeBSD$"); Link Here
58
#include <vm/vm_kern.h>
58
#include <vm/vm_kern.h>
59
#include <vm/vm_extern.h>
59
#include <vm/vm_extern.h>
60
60
61
#include "opt_ddb.h"
62
#ifdef DDB
63
#include <ddb/ddb.h>
64
#include <machine/setjmp.h>
65
#endif
66
61
#include <x86/apicreg.h>
67
#include <x86/apicreg.h>
62
#include <machine/clock.h>
68
#include <machine/clock.h>
63
#include <machine/cputypes.h>
69
#include <machine/cputypes.h>
Lines 1415-1420 ipi_nmi_handler() Link Here
1415
	cpustop_handler();
1421
	cpustop_handler();
1416
	return (0);
1422
	return (0);
1417
}
1423
}
1424
1425
#ifdef DDB
1426
static int ddb_migrate_cpu = -1;
1427
static int ddb_orig_cpu = -1;
1428
static jmp_buf ddb_migrate_buf;
1429
void db_command_loop(void);
1430
#endif
1418
     
1431
     
1419
/*
1432
/*
1420
 * Handle an IPI_STOP by saving our current context and spinning until we
1433
 * Handle an IPI_STOP by saving our current context and spinning until we
Lines 1429-1434 cpustop_handler(void) Link Here
1429
1442
1430
	savectx(&stoppcbs[cpu]);
1443
	savectx(&stoppcbs[cpu]);
1431
1444
1445
#ifdef DDB
1446
migration_exited:
1447
#endif
1432
	/* Indicate that we are stopped */
1448
	/* Indicate that we are stopped */
1433
	CPU_SET_ATOMIC(cpu, &stopped_cpus);
1449
	CPU_SET_ATOMIC(cpu, &stopped_cpus);
1434
1450
Lines 1436-1441 cpustop_handler(void) Link Here
1436
	while (!CPU_ISSET(cpu, &started_cpus))
1452
	while (!CPU_ISSET(cpu, &started_cpus))
1437
	    ia32_pause();
1453
	    ia32_pause();
1438
1454
1455
#ifdef DDB
1456
	if (ddb_migrate_cpu == cpu) {
1457
		if (setjmp(ddb_migrate_buf)) {
1458
			db_printf("leaving cpu %d\n", cpu);
1459
			ddb_migrate_cpu = -1;
1460
			CPU_CLR_ATOMIC(cpu, &started_cpus);
1461
			CPU_SET_ATOMIC(ddb_orig_cpu, &started_cpus);
1462
			goto migration_exited;
1463
		}
1464
		db_printf("current cpu %d\n", cpu);
1465
		db_command_loop();
1466
		panic("continued from migrated\n");
1467
	}
1468
#endif
1469
1439
	CPU_CLR_ATOMIC(cpu, &started_cpus);
1470
	CPU_CLR_ATOMIC(cpu, &started_cpus);
1440
	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
1471
	CPU_CLR_ATOMIC(cpu, &stopped_cpus);
1441
1472
Lines 1449-1454 cpustop_handler(void) Link Here
1449
	}
1480
	}
1450
}
1481
}
1451
1482
1483
#ifdef DDB
1484
DB_COMMAND(cpuret, db_cpuret)
1485
{
1486
1487
	if (ddb_migrate_cpu == -1) {
1488
		db_printf("not migrated\n");
1489
		return;
1490
	}
1491
	longjmp(ddb_migrate_buf, 1);
1492
}
1493
1494
DB_COMMAND(cpu, db_cpu)
1495
{
1496
	int mcpu, currcpu;
1497
1498
	if (ddb_migrate_cpu != -1) {
1499
		db_printf("already migrated, return to orig cpu first\n");
1500
		return;
1501
	}
1502
	if (!have_addr) {
1503
		db_printf("specify cpu to migrate\n");
1504
		return;
1505
	}
1506
	mcpu = (int)addr;
1507
	if (mcpu < 0 || mcpu >= mp_ncpus) {
1508
		db_printf("cpu %d does not exist\n", mcpu);
1509
		return;
1510
	}
1511
1512
	ddb_migrate_cpu = mcpu;
1513
	currcpu = PCPU_GET(cpuid);
1514
	ddb_orig_cpu = cpu;
1515
	savectx(&stoppcbs[currcpu]);
1516
	CPU_CLR_ATOMIC(currcpu, &started_cpus);
1517
	CPU_SET_ATOMIC(currcpu, &stopped_cpus);
1518
	CPU_SET_ATOMIC(mcpu, &started_cpus);
1519
	while (!CPU_ISSET(currcpu, &started_cpus))
1520
	    ia32_pause();
1521
	CPU_CLR_ATOMIC(currcpu, &started_cpus);
1522
	CPU_CLR_ATOMIC(currcpu, &stopped_cpus);
1523
	db_printf("current cpu %d\n", currcpu);
1524
}
1525
#endif
1526
1452
/*
1527
/*
1453
 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1528
 * Handle an IPI_SUSPEND by saving our current context and spinning until we
1454
 * are resumed.
1529
 * are resumed.
(-)b/sys/amd64/include/fpu.h (+1 lines)
Lines 85-90 void fpu_save_area_reset(struct savefpu *fsa); Link Here
85
#define	FPU_KERN_NORMAL	0x0000
85
#define	FPU_KERN_NORMAL	0x0000
86
#define	FPU_KERN_NOWAIT	0x0001
86
#define	FPU_KERN_NOWAIT	0x0001
87
#define	FPU_KERN_KTHR	0x0002
87
#define	FPU_KERN_KTHR	0x0002
88
#define	FPU_KERN_NOCTX	0x0004
88
89
89
#endif
90
#endif
90
91
(-)b/sys/amd64/include/pcb.h (+1 lines)
Lines 79-84 struct pcb { Link Here
79
#define	PCB_FPUINITDONE	0x08	/* fpu state is initialized */
79
#define	PCB_FPUINITDONE	0x08	/* fpu state is initialized */
80
#define	PCB_USERFPUINITDONE 0x10 /* fpu user state is initialized */
80
#define	PCB_USERFPUINITDONE 0x10 /* fpu user state is initialized */
81
#define	PCB_32BIT	0x40	/* process has 32 bit context (segs etc) */
81
#define	PCB_32BIT	0x40	/* process has 32 bit context (segs etc) */
82
#define	PCB_FPUNOSAVE	0x80	/* no save area for current FPU ctx */
82
83
83
	uint16_t	pcb_initial_fpucw;
84
	uint16_t	pcb_initial_fpucw;
84
85
(-)b/sys/dev/random/ivy.c (-1 / +2 lines)
Lines 58-64 static int random_ivy_read(void *, int); Link Here
58
static struct random_hardware_source random_ivy = {
58
static struct random_hardware_source random_ivy = {
59
	.ident = "Hardware, Intel Secure Key RNG",
59
	.ident = "Hardware, Intel Secure Key RNG",
60
	.source = RANDOM_PURE_RDRAND,
60
	.source = RANDOM_PURE_RDRAND,
61
	.read = random_ivy_read
61
	.read = random_ivy_read,
62
	.entropy_cdev_name = "ivy",
62
};
63
};
63
64
64
static inline int
65
static inline int
(-)b/sys/dev/random/live_entropy_sources.c (-3 / +54 lines)
Lines 28-33 Link Here
28
#include <sys/param.h>
28
#include <sys/param.h>
29
__FBSDID("$FreeBSD$");
29
__FBSDID("$FreeBSD$");
30
30
31
#include <sys/conf.h>
31
#include <sys/kernel.h>
32
#include <sys/kernel.h>
32
#include <sys/libkern.h>
33
#include <sys/libkern.h>
33
#include <sys/lock.h>
34
#include <sys/lock.h>
Lines 38-43 __FBSDID("$FreeBSD$"); Link Here
38
#include <sys/sx.h>
39
#include <sys/sx.h>
39
#include <sys/sysctl.h>
40
#include <sys/sysctl.h>
40
#include <sys/systm.h>
41
#include <sys/systm.h>
42
#include <sys/uio.h>
41
#include <sys/unistd.h>
43
#include <sys/unistd.h>
42
44
43
#include <machine/cpu.h>
45
#include <machine/cpu.h>
Lines 57-62 static struct les_head sources = LIST_HEAD_INITIALIZER(sources); Link Here
57
 */
59
 */
58
static struct sx les_lock; /* need a sleepable lock */
60
static struct sx les_lock; /* need a sleepable lock */
59
61
62
static int
63
entropy_read(struct cdev *dev, struct uio *uio, int flags)
64
{
65
	uint8_t buf[HARVESTSIZE];
66
	struct random_hardware_source *rsource;
67
	ssize_t resid;
68
	int c, error;
69
70
	sx_slock(&les_lock);
71
	rsource = dev->si_drv1;
72
	if (rsource == NULL) {
73
		error = ENXIO;
74
	} else {
75
		error = 0;
76
		resid = uio->uio_resid;
77
		while (uio->uio_resid > 0) {
78
			c = rsource->read(buf, sizeof(buf));
79
			if (c > 0)
80
				error = uiomove(buf, c, uio);
81
			if (error != 0 || c == 0)
82
				break;
83
		}
84
		if (resid != uio->uio_resid)
85
			error = 0;
86
	}
87
	sx_sunlock(&les_lock);
88
	return (error);
89
}
90
91
static struct cdevsw entropy_cdevsw = {
92
	.d_version = D_VERSION,
93
	.d_read = entropy_read,
94
	.d_name = "entropy",
95
};
96
60
void
97
void
61
live_entropy_source_register(struct random_hardware_source *rsource)
98
live_entropy_source_register(struct random_hardware_source *rsource)
62
{
99
{
Lines 66-73 live_entropy_source_register(struct random_hardware_source *rsource) Link Here
66
103
67
	les = malloc(sizeof(struct live_entropy_sources), M_ENTROPY, M_WAITOK);
104
	les = malloc(sizeof(struct live_entropy_sources), M_ENTROPY, M_WAITOK);
68
	les->rsource = rsource;
105
	les->rsource = rsource;
106
	les->dev = make_dev_credf(MAKEDEV_ETERNAL_KLD | MAKEDEV_WAITOK |
107
	    MAKEDEV_CHECKNAME, &entropy_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL,
108
	    0400, "entropy/%s", rsource->entropy_cdev_name);
69
109
70
	sx_xlock(&les_lock);
110
	sx_xlock(&les_lock);
111
	if (les->dev != NULL)
112
		les->dev->si_drv1 = rsource;
71
	LIST_INSERT_HEAD(&sources, les, entries);
113
	LIST_INSERT_HEAD(&sources, les, entries);
72
	sx_xunlock(&les_lock);
114
	sx_xunlock(&les_lock);
73
}
115
}
Lines 76-93 void Link Here
76
live_entropy_source_deregister(struct random_hardware_source *rsource)
118
live_entropy_source_deregister(struct random_hardware_source *rsource)
77
{
119
{
78
	struct live_entropy_sources *les = NULL;
120
	struct live_entropy_sources *les = NULL;
121
	struct cdev *dev;
79
122
80
	KASSERT(rsource != NULL, ("invalid input to %s", __func__));
123
	KASSERT(rsource != NULL, ("invalid input to %s", __func__));
81
124
125
	dev = NULL;
82
	sx_xlock(&les_lock);
126
	sx_xlock(&les_lock);
83
	LIST_FOREACH(les, &sources, entries)
127
	LIST_FOREACH(les, &sources, entries) {
84
		if (les->rsource == rsource) {
128
		if (les->rsource == rsource) {
85
			LIST_REMOVE(les, entries);
129
			LIST_REMOVE(les, entries);
86
			break;
130
			break;
87
		}
131
		}
132
	}
133
	if (les != NULL) {
134
		dev = les->dev;
135
		if (dev != NULL)
136
			dev->si_drv1 = NULL;
137
	}
88
	sx_xunlock(&les_lock);
138
	sx_xunlock(&les_lock);
89
	if (les != NULL)
139
	if (dev != NULL)
90
		free(les, M_ENTROPY);
140
		destroy_dev(dev);
141
	free(les, M_ENTROPY);
91
}
142
}
92
143
93
static int
144
static int
(-)b/sys/dev/random/live_entropy_sources.h (+1 lines)
Lines 38-43 Link Here
38
struct live_entropy_sources {
38
struct live_entropy_sources {
39
	LIST_ENTRY(live_entropy_sources) entries;	/* list of providers */
39
	LIST_ENTRY(live_entropy_sources) entries;	/* list of providers */
40
	struct random_hardware_source	*rsource;	/* associated random adaptor */
40
	struct random_hardware_source	*rsource;	/* associated random adaptor */
41
	struct cdev *dev;
41
};
42
};
42
43
43
extern struct mtx live_mtx;
44
extern struct mtx live_mtx;
(-)b/sys/dev/random/nehemiah.c (-1 / +2 lines)
Lines 55-61 static int random_nehemiah_read(void *, int); Link Here
55
static struct random_hardware_source random_nehemiah = {
55
static struct random_hardware_source random_nehemiah = {
56
	.ident = "Hardware, VIA Nehemiah Padlock RNG",
56
	.ident = "Hardware, VIA Nehemiah Padlock RNG",
57
	.source = RANDOM_PURE_NEHEMIAH,
57
	.source = RANDOM_PURE_NEHEMIAH,
58
	.read = random_nehemiah_read
58
	.read = random_nehemiah_read,
59
	.entropy_cdev_name = "nehemiah",
59
};
60
};
60
61
61
/* TODO: now that the Davies-Meyer hash is gone and we only use
62
/* TODO: now that the Davies-Meyer hash is gone and we only use
(-)b/sys/dev/random/randomdev.h (+1 lines)
Lines 55-60 struct random_adaptor { Link Here
55
55
56
struct random_hardware_source {
56
struct random_hardware_source {
57
	const char		*ident;
57
	const char		*ident;
58
	const char		*entropy_cdev_name;
58
	enum esource		source;
59
	enum esource		source;
59
	random_read_func_t	*read;
60
	random_read_func_t	*read;
60
};
61
};
(-)b/sys/fs/nullfs/null_subr.c (+1 lines)
Lines 251-256 null_nodeget(mp, lowervp, vpp) Link Here
251
	vp->v_type = lowervp->v_type;
251
	vp->v_type = lowervp->v_type;
252
	vp->v_data = xp;
252
	vp->v_data = xp;
253
	vp->v_vnlock = lowervp->v_vnlock;
253
	vp->v_vnlock = lowervp->v_vnlock;
254
	vp->v_vflag = lowervp->v_vflag & VV_ROOT;
254
	error = insmntque1(vp, mp, null_insmntque_dtr, xp);
255
	error = insmntque1(vp, mp, null_insmntque_dtr, xp);
255
	if (error != 0)
256
	if (error != 0)
256
		return (error);
257
		return (error);
(-)b/sys/fs/tmpfs/tmpfs.h (-1 / +2 lines)
Lines 51-57 Link Here
51
#include <sys/systm.h>
51
#include <sys/systm.h>
52
#include <sys/tree.h>
52
#include <sys/tree.h>
53
#include <sys/vmmeter.h>
53
#include <sys/vmmeter.h>
54
#include <vm/swap_pager.h>
54
#include <vm/vm.h>
55
#include <vm/vm_param.h>
55
56
56
MALLOC_DECLARE(M_TMPFSMNT);
57
MALLOC_DECLARE(M_TMPFSMNT);
57
MALLOC_DECLARE(M_TMPFSNAME);
58
MALLOC_DECLARE(M_TMPFSNAME);
(-)b/sys/fs/tmpfs/tmpfs_subr.c (-1 / +2 lines)
Lines 53-61 __FBSDID("$FreeBSD$"); Link Here
53
#include <vm/vm_param.h>
53
#include <vm/vm_param.h>
54
#include <vm/vm_object.h>
54
#include <vm/vm_object.h>
55
#include <vm/vm_page.h>
55
#include <vm/vm_page.h>
56
#include <vm/vm_pageout.h>
57
#include <vm/vm_pager.h>
56
#include <vm/vm_pager.h>
57
#include <vm/vm_pageout.h>
58
#include <vm/vm_extern.h>
58
#include <vm/vm_extern.h>
59
#include <vm/swap_pager.h>
59
60
60
#include <fs/tmpfs/tmpfs.h>
61
#include <fs/tmpfs/tmpfs.h>
61
#include <fs/tmpfs/tmpfs_fifoops.h>
62
#include <fs/tmpfs/tmpfs_fifoops.h>
(-)b/sys/fs/tmpfs/tmpfs_vnops.c (-7 lines)
Lines 45-63 __FBSDID("$FreeBSD$"); Link Here
45
#include <sys/proc.h>
45
#include <sys/proc.h>
46
#include <sys/rwlock.h>
46
#include <sys/rwlock.h>
47
#include <sys/sched.h>
47
#include <sys/sched.h>
48
#include <sys/sf_buf.h>
49
#include <sys/stat.h>
48
#include <sys/stat.h>
50
#include <sys/systm.h>
49
#include <sys/systm.h>
51
#include <sys/sysctl.h>
50
#include <sys/sysctl.h>
52
#include <sys/unistd.h>
51
#include <sys/unistd.h>
53
#include <sys/vnode.h>
52
#include <sys/vnode.h>
54
53
55
#include <vm/vm.h>
56
#include <vm/vm_param.h>
57
#include <vm/vm_object.h>
58
#include <vm/vm_page.h>
59
#include <vm/vm_pager.h>
60
61
#include <fs/tmpfs/tmpfs_vnops.h>
54
#include <fs/tmpfs/tmpfs_vnops.h>
62
#include <fs/tmpfs/tmpfs.h>
55
#include <fs/tmpfs/tmpfs.h>
63
56
(-)b/sys/i386/i386/initcpu.c (-18 / +27 lines)
Lines 651-656 init_transmeta(void) Link Here
651
}
651
}
652
#endif
652
#endif
653
653
654
static void
655
init_amd(void)
656
{
657
658
#ifdef CPU_ATHLON_SSE_HACK
659
	/*
660
	 * Sometimes the BIOS doesn't enable SSE instructions.
661
	 * According to AMD document 20734, the mobile Duron, the
662
	 * (mobile) Athlon 4 and the Athlon MP support SSE. These
663
	 * correspond to cpu_id 0x66X or 0x67X.
664
	 */
665
	if ((cpu_feature & CPUID_XMM) == 0 && ((cpu_id & ~0xf) == 0x660 ||
666
	    (cpu_id & ~0xf) == 0x670 || (cpu_id & ~0xf) == 0x680)) {
667
		u_int regs[4];
668
669
		wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
670
		do_cpuid(1, regs);
671
		cpu_feature = regs[3];
672
	}
673
#endif
674
	if (CPUID_TO_FAMILY(cpu_id) == 0x9) {
675
		if ((cpu_feature2 & CPUID2_HV) == 0)
676
			wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) | (1 << 6));
677
	}
678
}
679
654
/*
680
/*
655
 * Initialize CR4 (Control register 4) to enable SSE instructions.
681
 * Initialize CR4 (Control register 4) to enable SSE instructions.
656
 */
682
 */
Lines 725-750 initializecpu(void) Link Here
725
				break;
751
				break;
726
			}
752
			}
727
			break;
753
			break;
728
#ifdef CPU_ATHLON_SSE_HACK
729
		case CPU_VENDOR_AMD:
754
		case CPU_VENDOR_AMD:
730
			/*
755
			init_amd();
731
			 * Sometimes the BIOS doesn't enable SSE instructions.
732
			 * According to AMD document 20734, the mobile
733
			 * Duron, the (mobile) Athlon 4 and the Athlon MP
734
			 * support SSE. These correspond to cpu_id 0x66X
735
			 * or 0x67X.
736
			 */
737
			if ((cpu_feature & CPUID_XMM) == 0 &&
738
			    ((cpu_id & ~0xf) == 0x660 ||
739
			     (cpu_id & ~0xf) == 0x670 ||
740
			     (cpu_id & ~0xf) == 0x680)) {
741
				u_int regs[4];
742
				wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
743
				do_cpuid(1, regs);
744
				cpu_feature = regs[3];
745
			}
746
			break;
756
			break;
747
#endif
748
		case CPU_VENDOR_CENTAUR:
757
		case CPU_VENDOR_CENTAUR:
749
			init_via();
758
			init_via();
750
			break;
759
			break;
(-)b/sys/i386/i386/pmap.c (-9 / +13 lines)
Lines 3477-3493 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, Link Here
3477
	PMAP_LOCK(pmap);
3477
	PMAP_LOCK(pmap);
3478
	sched_pin();
3478
	sched_pin();
3479
3479
3480
	/*
3480
	pde = pmap_pde(pmap, va);
3481
	 * In the case that a page table page is not
3481
	if ((*pde & PG_PS) != 0) {
3482
	 * resident, we are creating it here.
3482
		/* PG_V is asserted by pmap_demote_pde */
3483
	 */
3483
		pmap_demote_pde(pmap, pde, va);
3484
	if (va < VM_MAXUSER_ADDRESS) {
3484
		if (va < VM_MAXUSER_ADDRESS) {
3485
			mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
3486
			mpte->wire_count++;
3487
		}
3488
	} else if (va < VM_MAXUSER_ADDRESS) {
3489
		/*
3490
		 * In the case that a page table page is not resident,
3491
		 * we are creating it here.
3492
		 */
3485
		mpte = pmap_allocpte(pmap, va, M_WAITOK);
3493
		mpte = pmap_allocpte(pmap, va, M_WAITOK);
3486
	}
3494
	}
3487
3488
	pde = pmap_pde(pmap, va);
3489
	if ((*pde & PG_PS) != 0)
3490
		panic("pmap_enter: attempted pmap_enter on 4MB page");
3491
	pte = pmap_pte_quick(pmap, va);
3495
	pte = pmap_pte_quick(pmap, va);
3492
3496
3493
	/*
3497
	/*
(-)b/sys/kern/kern_descrip.c (-12 / +12 lines)
Lines 314-333 struct getdtablesize_args { Link Here
314
	int	dummy;
314
	int	dummy;
315
};
315
};
316
#endif
316
#endif
317
/* ARGSUSED */
317
318
int
318
int
319
sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
319
sys_getdtablesize(struct thread *td, struct getdtablesize_args *uap)
320
{
320
{
321
	struct proc *p = td->td_proc;
321
	struct proc *p;
322
	uint64_t lim;
322
	uint64_t lim;
323
	int maxfd, res;
323
324
325
	p = td->td_proc;
324
	PROC_LOCK(p);
326
	PROC_LOCK(p);
325
	td->td_retval[0] =
327
	res = lim_cur(p, RLIMIT_NOFILE);
326
	    min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
327
	lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
328
	lim = racct_get_limit(td->td_proc, RACCT_NOFILE);
328
	PROC_UNLOCK(p);
329
	PROC_UNLOCK(p);
329
	if (lim < td->td_retval[0])
330
	maxfd = maxfilesperproc;
330
		td->td_retval[0] = lim;
331
	if (maxfd > res)
332
		maxfd = res;
333
	if (maxfd > lim)
334
		maxfd = lim;
335
	td->td_retval[0] = maxfd;
331
	return (0);
336
	return (0);
332
}
337
}
333
338
Lines 775-787 kern_fcntl(struct thread *td, int fd, int cmd, intptr_t arg) Link Here
775
static int
780
static int
776
getmaxfd(struct proc *p)
781
getmaxfd(struct proc *p)
777
{
782
{
778
	int maxfd;
779
780
	PROC_LOCK(p);
781
	maxfd = min((int)lim_cur(p, RLIMIT_NOFILE), maxfilesperproc);
782
	PROC_UNLOCK(p);
783
783
784
	return (maxfd);
784
	return (imin(lim_cur_unlocked(p, RLIMIT_NOFILE), maxfilesperproc));
785
}
785
}
786
786
787
/*
787
/*
(-)b/sys/kern/kern_proc.c (-32 / +53 lines)
Lines 141-146 uma_zone_t proc_zone; Link Here
141
int kstack_pages = KSTACK_PAGES;
141
int kstack_pages = KSTACK_PAGES;
142
SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0,
142
SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0,
143
    "Kernel stack size in pages");
143
    "Kernel stack size in pages");
144
static int vmmap_skip_res_cnt = 1;
145
SYSCTL_INT(_kern, OID_AUTO, proc_vmmap_skip_resident_count, CTLFLAG_RW,
146
    &vmmap_skip_res_cnt, 0,
147
    "Skip calculation of the pages resident count in kern.proc.vmmap");
144
148
145
CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
149
CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
146
#ifdef COMPAT_FREEBSD32
150
#ifdef COMPAT_FREEBSD32
Lines 2136-2150 int Link Here
2136
kern_proc_vmmap_out(struct proc *p, struct sbuf *sb)
2140
kern_proc_vmmap_out(struct proc *p, struct sbuf *sb)
2137
{
2141
{
2138
	vm_map_entry_t entry, tmp_entry;
2142
	vm_map_entry_t entry, tmp_entry;
2139
	unsigned int last_timestamp;
2143
	struct vattr va;
2144
	vm_map_t map;
2145
	vm_page_t m;
2146
	vm_object_t obj, tobj, lobj;
2140
	char *fullpath, *freepath;
2147
	char *fullpath, *freepath;
2141
	struct kinfo_vmentry *kve;
2148
	struct kinfo_vmentry *kve;
2142
	struct vattr va;
2143
	struct ucred *cred;
2149
	struct ucred *cred;
2144
	int error;
2145
	struct vnode *vp;
2150
	struct vnode *vp;
2146
	struct vmspace *vm;
2151
	struct vmspace *vm;
2147
	vm_map_t map;
2152
	vm_pindex_t pindex;
2153
	vm_offset_t addr, clp;
2154
	unsigned int last_timestamp;
2155
	int error;
2148
2156
2149
	PROC_LOCK_ASSERT(p, MA_OWNED);
2157
	PROC_LOCK_ASSERT(p, MA_OWNED);
2150
2158
Lines 2162-2205 kern_proc_vmmap_out(struct proc *p, struct sbuf *sb) Link Here
2162
	vm_map_lock_read(map);
2170
	vm_map_lock_read(map);
2163
	for (entry = map->header.next; entry != &map->header;
2171
	for (entry = map->header.next; entry != &map->header;
2164
	    entry = entry->next) {
2172
	    entry = entry->next) {
2165
		vm_object_t obj, tobj, lobj;
2166
		vm_offset_t addr;
2167
		vm_paddr_t locked_pa;
2168
		int mincoreinfo;
2169
2170
		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2173
		if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
2171
			continue;
2174
			continue;
2172
2175
2173
		bzero(kve, sizeof(*kve));
2176
		bzero(kve, sizeof(*kve));
2174
2177
2175
		kve->kve_private_resident = 0;
2178
		kve->kve_private_resident = 0;
2179
		kve->kve_resident = 0;
2176
		obj = entry->object.vm_object;
2180
		obj = entry->object.vm_object;
2177
		if (obj != NULL) {
2181
		if (obj != NULL) {
2178
			VM_OBJECT_RLOCK(obj);
2182
			for (tobj = obj; tobj != NULL;
2183
			    tobj = tobj->backing_object) {
2184
				VM_OBJECT_RLOCK(tobj);
2185
				lobj = tobj;
2186
			}
2179
			if (obj->shadow_count == 1)
2187
			if (obj->shadow_count == 1)
2180
				kve->kve_private_resident =
2188
				kve->kve_private_resident =
2181
				    obj->resident_page_count;
2189
				    obj->resident_page_count;
2182
		}
2190
			if (vmmap_skip_res_cnt)
2183
		kve->kve_resident = 0;
2191
				goto skip_resident_count;
2184
		addr = entry->start;
2192
			for (addr = entry->start; addr < entry->end;) {
2185
		while (addr < entry->end) {
2193
				pindex = OFF_TO_IDX(entry->offset + addr -
2186
			locked_pa = 0;
2194
				    entry->start);
2187
			mincoreinfo = pmap_mincore(map->pmap, addr, &locked_pa);
2195
				for (tobj = obj;;) {
2188
			if (locked_pa != 0)
2196
					m = vm_page_lookup(tobj, pindex);
2189
				vm_page_unlock(PHYS_TO_VM_PAGE(locked_pa));
2197
					if (m != NULL)
2190
			if (mincoreinfo & MINCORE_INCORE)
2198
						break;
2191
				kve->kve_resident++;
2199
					if (tobj->backing_object == NULL)
2192
			if (mincoreinfo & MINCORE_SUPER)
2200
						break;
2193
				kve->kve_flags |= KVME_FLAG_SUPER;
2201
					pindex += OFF_TO_IDX(
2194
			addr += PAGE_SIZE;
2202
					    tobj->backing_object_offset);
2195
		}
2203
					tobj = tobj->backing_object;
2196
2204
				}
2197
		for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
2205
				if (m == NULL) {
2198
			if (tobj != obj)
2206
					addr += PAGE_SIZE;
2199
				VM_OBJECT_RLOCK(tobj);
2207
					continue;
2200
			if (lobj != obj)
2208
				}
2201
				VM_OBJECT_RUNLOCK(lobj);
2209
				if (m->psind != 0)
2202
			lobj = tobj;
2210
					kve->kve_flags |= KVME_FLAG_SUPER;
2211
				clp = addr + pagesizes[m->psind] <= entry->end ?
2212
				    pagesizes[m->psind] : entry->end - addr;
2213
				kve->kve_resident += clp / PAGE_SIZE;
2214
				addr += pagesizes[m->psind];
2215
			}
2216
skip_resident_count:
2217
			for (tobj = obj; tobj != NULL;
2218
			    tobj = tobj->backing_object) {
2219
				if (tobj != obj && tobj != lobj)
2220
					VM_OBJECT_RUNLOCK(tobj);
2221
			}
2222
		} else {
2223
			lobj = NULL;
2203
		}
2224
		}
2204
2225
2205
		kve->kve_start = entry->start;
2226
		kve->kve_start = entry->start;
Lines 2229-2235 kern_proc_vmmap_out(struct proc *p, struct sbuf *sb) Link Here
2229
2250
2230
		freepath = NULL;
2251
		freepath = NULL;
2231
		fullpath = "";
2252
		fullpath = "";
2232
		if (lobj) {
2253
		if (lobj != NULL) {
2233
			vp = NULL;
2254
			vp = NULL;
2234
			switch (lobj->type) {
2255
			switch (lobj->type) {
2235
			case OBJT_DEFAULT:
2256
			case OBJT_DEFAULT:
(-)b/sys/kern/kern_resource.c (+11 lines)
Lines 1212-1217 lim_cur(struct proc *p, int which) Link Here
1212
	return (rl.rlim_cur);
1212
	return (rl.rlim_cur);
1213
}
1213
}
1214
1214
1215
rlim_t
1216
lim_cur_unlocked(struct proc *p, int which)
1217
{
1218
	struct rlimit rl;
1219
1220
	PROC_LOCK(p);
1221
	lim_rlimit(p, which, &rl);
1222
	PROC_UNLOCK(p);
1223
	return (rl.rlim_cur);
1224
}
1225
1215
/*
1226
/*
1216
 * Return a copy of the entire rlimit structure for the system limit
1227
 * Return a copy of the entire rlimit structure for the system limit
1217
 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1228
 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
(-)b/sys/kern/sys_pipe.c (-5 / +5 lines)
Lines 1293-1305 pipe_write(fp, uio, active_cred, flags, td) Link Here
1293
	}
1293
	}
1294
1294
1295
	/*
1295
	/*
1296
	 * Don't return EPIPE if I/O was successful
1296
	 * Don't return EPIPE if any byte was written.
1297
	 * EINTR and other interrupts are handled by generic I/O layer.
1298
	 * Do not pretend that I/O succeeded for obvious user error
1299
	 * like EFAULT.
1297
	 */
1300
	 */
1298
	if ((wpipe->pipe_buffer.cnt == 0) &&
1301
	if (uio->uio_resid != orig_resid && error == EPIPE)
1299
	    (uio->uio_resid == 0) &&
1300
	    (error == EPIPE)) {
1301
		error = 0;
1302
		error = 0;
1302
	}
1303
1303
1304
	if (error == 0)
1304
	if (error == 0)
1305
		vfs_timestamp(&wpipe->pipe_mtime);
1305
		vfs_timestamp(&wpipe->pipe_mtime);
(-)b/sys/kern/vfs_vnops.c (-5 / +4 lines)
Lines 2177-2188 vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, Link Here
2177
{
2177
{
2178
	int error;
2178
	int error;
2179
2179
2180
	error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td);
2181
2182
	/*
2180
	/*
2183
	 * From utimes(2):
2181
	 * Grant permission if the caller is the owner of the file, or
2184
	 * Grant permission if the caller is the owner of the file or
2182
	 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on
2185
	 * the super-user.  If the time pointer is null, then write
2183
	 * on the file.  If the time pointer is null, then write
2186
	 * permission on the file is also sufficient.
2184
	 * permission on the file is also sufficient.
2187
	 *
2185
	 *
2188
	 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes:
2186
	 * From NFSv4.1, draft 21, 6.2.1.3.1, Discussion of Mask Attributes:
Lines 2190-2195 vn_utimes_perm(struct vnode *vp, struct vattr *vap, struct ucred *cred, Link Here
2190
	 * will be allowed to set the times [..] to the current
2188
	 * will be allowed to set the times [..] to the current
2191
	 * server time.
2189
	 * server time.
2192
	 */
2190
	 */
2191
	error = VOP_ACCESSX(vp, VWRITE_ATTRIBUTES, cred, td);
2193
	if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0)
2192
	if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0)
2194
		error = VOP_ACCESS(vp, VWRITE, cred, td);
2193
		error = VOP_ACCESS(vp, VWRITE, cred, td);
2195
	return (error);
2194
	return (error);
(-)b/sys/sys/resourcevar.h (+1 lines)
Lines 128-133 struct plimit Link Here
128
	*lim_alloc(void);
128
	*lim_alloc(void);
129
void	 lim_copy(struct plimit *dst, struct plimit *src);
129
void	 lim_copy(struct plimit *dst, struct plimit *src);
130
rlim_t	 lim_cur(struct proc *p, int which);
130
rlim_t	 lim_cur(struct proc *p, int which);
131
rlim_t	 lim_cur_unlocked(struct proc *p, int which);
131
void	 lim_fork(struct proc *p1, struct proc *p2);
132
void	 lim_fork(struct proc *p1, struct proc *p2);
132
void	 lim_free(struct plimit *limp);
133
void	 lim_free(struct plimit *limp);
133
struct plimit
134
struct plimit
(-)b/sys/vm/vm_fault.c (-26 / +43 lines)
Lines 1252-1281 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, Link Here
1252
	src_object = src_entry->object.vm_object;
1252
	src_object = src_entry->object.vm_object;
1253
	src_pindex = OFF_TO_IDX(src_entry->offset);
1253
	src_pindex = OFF_TO_IDX(src_entry->offset);
1254
1254
1255
	KASSERT(upgrade || dst_entry->object.vm_object == NULL,
1256
	    ("vm_fault_copy_entry: vm_object not NULL"));
1255
	if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1257
	if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1256
		dst_object = src_object;
1258
		dst_object = src_object;
1257
		vm_object_reference(dst_object);
1259
		vm_object_reference(dst_object);
1258
	} else {
1260
	} else {
1259
		/*
1261
		/*
1260
		 * Create the top-level object for the destination entry. (Doesn't
1262
		 * Create the top-level object for the destination
1261
		 * actually shadow anything - we copy the pages directly.)
1263
		 * entry. (Doesn't actually shadow anything - we copy
1264
		 * the pages directly.)
1262
		 */
1265
		 */
1263
		dst_object = vm_object_allocate(OBJT_DEFAULT,
1266
		vm_object_shadow(&dst_entry->object.vm_object,
1264
		    OFF_TO_IDX(dst_entry->end - dst_entry->start));
1267
		    &dst_entry->offset, OFF_TO_IDX(dst_entry->end -
1268
		    dst_entry->start));
1269
		dst_object = dst_entry->object.vm_object;
1265
#if VM_NRESERVLEVEL > 0
1270
#if VM_NRESERVLEVEL > 0
1266
		dst_object->flags |= OBJ_COLORED;
1271
		if (dst_object != src_object) {
1267
		dst_object->pg_color = atop(dst_entry->start);
1272
			dst_object->flags |= OBJ_COLORED;
1273
			dst_object->pg_color = atop(dst_entry->start);
1274
		}
1268
#endif
1275
#endif
1276
1277
		/*
1278
		 * If not an upgrade, then enter the mappings in the
1279
		 * pmap as read and/or execute accesses.  Otherwise,
1280
		 * enter them as write accesses.
1281
		 *
1282
		 * A writeable large page mapping is only created if
1283
		 * all of the constituent small page mappings are
1284
		 * modified. Marking PTEs as modified on inception
1285
		 * allows promotion to happen without taking
1286
		 * potentially large number of soft faults.
1287
		 */
1288
		access &= ~VM_PROT_WRITE;
1269
	}
1289
	}
1290
	/*
1291
	 * dst_entry->offset is either left unchanged in the upgrade
1292
	 * case, or vm_object_shadow takes care of recalculating the
1293
	 * offset depending on creation of the new object.
1294
	 */
1270
1295
1271
	VM_OBJECT_WLOCK(dst_object);
1296
	/*
1272
	KASSERT(upgrade || dst_entry->object.vm_object == NULL,
1297
	 * This can only happen for upgrade case, due to src_object
1273
	    ("vm_fault_copy_entry: vm_object not NULL"));
1298
	 * reference bump above, and it means that all pages are
1274
	if (src_object != dst_object) {
1299
	 * private already.
1275
		dst_entry->object.vm_object = dst_object;
1300
	 */
1276
		dst_entry->offset = 0;
1301
	if (dst_object == src_object &&
1277
		dst_object->charge = dst_entry->end - dst_entry->start;
1302
	    (src_entry->protection & VM_PROT_WRITE) == 0) {
1303
		KASSERT(upgrade, ("XXX"));
1304
		goto uncow;
1278
	}
1305
	}
1306
1307
	VM_OBJECT_WLOCK(dst_object);
1279
	if (fork_charge != NULL) {
1308
	if (fork_charge != NULL) {
1280
		KASSERT(dst_entry->cred == NULL,
1309
		KASSERT(dst_entry->cred == NULL,
1281
		    ("vm_fault_copy_entry: leaked swp charge"));
1310
		    ("vm_fault_copy_entry: leaked swp charge"));
Lines 1290-1308 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, Link Here
1290
	}
1319
	}
1291
1320
1292
	/*
1321
	/*
1293
	 * If not an upgrade, then enter the mappings in the pmap as
1294
	 * read and/or execute accesses.  Otherwise, enter them as
1295
	 * write accesses.
1296
	 *
1297
	 * A writeable large page mapping is only created if all of
1298
	 * the constituent small page mappings are modified. Marking
1299
	 * PTEs as modified on inception allows promotion to happen
1300
	 * without taking potentially large number of soft faults.
1301
	 */
1302
	if (!upgrade)
1303
		access &= ~VM_PROT_WRITE;
1304
1305
	/*
1306
	 * Loop through all of the virtual pages within the entry's
1322
	 * Loop through all of the virtual pages within the entry's
1307
	 * range, copying each page from the source object to the
1323
	 * range, copying each page from the source object to the
1308
	 * destination object.  Since the source is wired, those pages
1324
	 * destination object.  Since the source is wired, those pages
Lines 1408-1413 again: Link Here
1408
	}
1424
	}
1409
	VM_OBJECT_WUNLOCK(dst_object);
1425
	VM_OBJECT_WUNLOCK(dst_object);
1410
	if (upgrade) {
1426
	if (upgrade) {
1427
uncow:
1411
		dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
1428
		dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
1412
		vm_object_deallocate(src_object);
1429
		vm_object_deallocate(src_object);
1413
	}
1430
	}
(-)b/sys/vm/vm_object.c (-17 / +57 lines)
Lines 2096-2112 boolean_t Link Here
2096
vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2096
vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2097
    vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2097
    vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2098
{
2098
{
2099
	vm_pindex_t next_pindex;
2099
	vm_object_t shadow_object;
2100
	vm_page_t m;
2101
	vm_pindex_t next_pindex, pi;
2102
	boolean_t ret;
2100
2103
2101
	if (prev_object == NULL)
2104
	if (prev_object == NULL)
2102
		return (TRUE);
2105
		return (TRUE);
2106
	ret = FALSE;
2103
	VM_OBJECT_WLOCK(prev_object);
2107
	VM_OBJECT_WLOCK(prev_object);
2104
	if ((prev_object->type != OBJT_DEFAULT &&
2108
	if ((prev_object->type != OBJT_DEFAULT &&
2105
	    prev_object->type != OBJT_SWAP) ||
2109
	    prev_object->type != OBJT_SWAP) ||
2106
	    (prev_object->flags & OBJ_TMPFS) != 0) {
2110
	    (prev_object->flags & OBJ_TMPFS) != 0)
2107
		VM_OBJECT_WUNLOCK(prev_object);
2111
		goto out;
2108
		return (FALSE);
2109
	}
2110
2112
2111
	/*
2113
	/*
2112
	 * Try to collapse the object first
2114
	 * Try to collapse the object first
Lines 2114-2137 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, Link Here
2114
	vm_object_collapse(prev_object);
2116
	vm_object_collapse(prev_object);
2115
2117
2116
	/*
2118
	/*
2117
	 * Can't coalesce if: . more than one reference . paged out . shadows
2119
	 * Can't coalesce if shadows another object, which means that
2118
	 * another object . has a copy elsewhere (any of which mean that the
2120
	 * the pages not mapped to prev_entry may be in use anyway.
2119
	 * pages not mapped to prev_entry may be in use anyway)
2120
	 */
2121
	 */
2121
	if (prev_object->backing_object != NULL) {
2122
	if (prev_object->backing_object != NULL)
2122
		VM_OBJECT_WUNLOCK(prev_object);
2123
		goto out;
2123
		return (FALSE);
2124
	}
2125
2124
2126
	prev_size >>= PAGE_SHIFT;
2125
	prev_size >>= PAGE_SHIFT;
2127
	next_size >>= PAGE_SHIFT;
2126
	next_size >>= PAGE_SHIFT;
2128
	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2127
	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2129
2128
2130
	if ((prev_object->ref_count > 1) &&
2129
	/*
2131
	    (prev_object->size != next_pindex)) {
2130
	 * If object has more than one reference or is larger than the
2132
		VM_OBJECT_WUNLOCK(prev_object);
2131
	 * end of the previous mapping, still allow coalescing map
2133
		return (FALSE);
2132
	 * entries for the case when this is due to other mappings of
2133
	 * the object into the current address space.
2134
	 */
2135
	if (prev_object->ref_count > 1 && prev_object->size != next_pindex) {
2136
		/*
2137
		 * Only one mapping allowed, otherwise coalesce could
2138
		 * result in the contradictory content in the regions.
2139
		 */
2140
		if ((prev_object->flags & OBJ_ONEMAPPING) == 0)
2141
			goto out;
2142
2143
		/* No pages in the region, either resident ... */
2144
		m = vm_page_find_least(prev_object, next_pindex);
2145
		if (m != NULL && m->pindex < next_pindex + next_size)
2146
			goto out;
2147
		/* ... or swapped out. */
2148
		if (prev_object->type == OBJT_SWAP) {
2149
			for (pi = next_pindex; pi < next_pindex + next_size;
2150
			    pi++) {
2151
				if (vm_pager_has_page(prev_object, pi, NULL,
2152
				    NULL))
2153
					goto out;
2154
			}
2155
		}
2156
2157
		/*
2158
		 * Region must be not shadowed, otherwise the
2159
		 * instantiated page in the our (backing) object could
2160
		 * leak to the shadow.
2161
		 */
2162
		LIST_FOREACH(shadow_object, &prev_object->shadow_head,
2163
		    shadow_list) {
2164
			KASSERT(shadow_object->backing_object == prev_object,
2165
			    ("corrupted shadow"));
2166
			if (shadow_object->backing_object_offset <
2167
			    next_pindex + next_size &&
2168
			    shadow_object->backing_object_offset +
2169
			    shadow_object->size > next_pindex)
2170
				goto out;
2171
		}
2134
	}
2172
	}
2173
	ret = TRUE;
2135
2174
2136
	/*
2175
	/*
2137
	 * Account for the charge.
2176
	 * Account for the charge.
Lines 2183-2190 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, Link Here
2183
	if (next_pindex + next_size > prev_object->size)
2222
	if (next_pindex + next_size > prev_object->size)
2184
		prev_object->size = next_pindex + next_size;
2223
		prev_object->size = next_pindex + next_size;
2185
2224
2225
out:
2186
	VM_OBJECT_WUNLOCK(prev_object);
2226
	VM_OBJECT_WUNLOCK(prev_object);
2187
	return (TRUE);
2227
	return (ret);
2188
}
2228
}
2189
2229
2190
void
2230
void
(-)b/sys/vm/vm_page.h (+1 lines)
Lines 227-232 struct vm_domain { Link Here
227
	long vmd_segs;	/* bitmask of the segments */
227
	long vmd_segs;	/* bitmask of the segments */
228
	boolean_t vmd_oom;
228
	boolean_t vmd_oom;
229
	int vmd_pass;	/* local pagedaemon pass */
229
	int vmd_pass;	/* local pagedaemon pass */
230
	int vmd_oom_seq;
230
	struct vm_page vmd_marker; /* marker for pagedaemon private use */
231
	struct vm_page vmd_marker; /* marker for pagedaemon private use */
231
};
232
};
232
233
(-)b/sys/vm/vm_pageout.c (-14 / +40 lines)
Lines 117-123 __FBSDID("$FreeBSD$"); Link Here
117
static void vm_pageout(void);
117
static void vm_pageout(void);
118
static int vm_pageout_clean(vm_page_t);
118
static int vm_pageout_clean(vm_page_t);
119
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
119
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
120
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
120
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
121
    int starting_page_shortage);
121
122
122
struct proc *pageproc;
123
struct proc *pageproc;
123
124
Lines 147-152 int vm_pages_needed; /* Event on which pageout daemon sleeps */ Link Here
147
int vm_pageout_deficit;		/* Estimated number of pages deficit */
148
int vm_pageout_deficit;		/* Estimated number of pages deficit */
148
int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
149
int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
149
int vm_pageout_wakeup_thresh;
150
int vm_pageout_wakeup_thresh;
151
static int vm_pageout_oom_seq = 24;
150
152
151
#if !defined(NO_SWAPPING)
153
#if !defined(NO_SWAPPING)
152
static int vm_pageout_req_swapout;	/* XXX */
154
static int vm_pageout_req_swapout;	/* XXX */
Lines 206-211 static int pageout_lock_miss; Link Here
206
SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
208
SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
207
	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
209
	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
208
210
211
SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
212
	CTLFLAG_RW, &vm_pageout_oom_seq, 0,
213
	"side-to-side calls to oom detector to start OOM");
214
209
#define VM_PAGEOUT_PAGE_COUNT 16
215
#define VM_PAGEOUT_PAGE_COUNT 16
210
int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
216
int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
211
217
Lines 910-916 vm_pageout_scan(struct vm_domain *vmd, int pass) Link Here
910
	vm_page_t m, next;
916
	vm_page_t m, next;
911
	struct vm_pagequeue *pq;
917
	struct vm_pagequeue *pq;
912
	vm_object_t object;
918
	vm_object_t object;
913
	int act_delta, addl_page_shortage, deficit, maxscan, page_shortage;
919
	int act_delta, addl_page_shortage, deficit, maxscan;
920
	int page_shortage, starting_page_shortage;
914
	int vnodes_skipped = 0;
921
	int vnodes_skipped = 0;
915
	int maxlaunder;
922
	int maxlaunder;
916
	int lockmode;
923
	int lockmode;
Lines 951-956 vm_pageout_scan(struct vm_domain *vmd, int pass) Link Here
951
		page_shortage = vm_paging_target() + deficit;
958
		page_shortage = vm_paging_target() + deficit;
952
	} else
959
	} else
953
		page_shortage = deficit = 0;
960
		page_shortage = deficit = 0;
961
	starting_page_shortage = page_shortage;
954
962
955
	/*
963
	/*
956
	 * maxlaunder limits the number of dirty pages we flush per scan.
964
	 * maxlaunder limits the number of dirty pages we flush per scan.
Lines 1309-1314 relock_queues: Link Here
1309
	vm_pagequeue_unlock(pq);
1317
	vm_pagequeue_unlock(pq);
1310
1318
1311
	/*
1319
	/*
1320
	 * If we are critically low on one of RAM or swap and low on
1321
	 * the other, kill the largest process.  However, we avoid
1322
	 * doing this on the first pass in order to give ourselves a
1323
	 * chance to flush out dirty vnode-backed pages and to allow
1324
	 * active pages to be moved to the inactive queue and reclaimed.
1325
	 */
1326
	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1327
1328
	/*
1312
	 * Compute the number of pages we want to try to move from the
1329
	 * Compute the number of pages we want to try to move from the
1313
	 * active queue to the inactive queue.
1330
	 * active queue to the inactive queue.
1314
	 */
1331
	 */
Lines 1431-1445 relock_queues: Link Here
1431
			vm_req_vmdaemon(VM_SWAP_NORMAL);
1448
			vm_req_vmdaemon(VM_SWAP_NORMAL);
1432
#endif
1449
#endif
1433
	}
1450
	}
1434
1435
	/*
1436
	 * If we are critically low on one of RAM or swap and low on
1437
	 * the other, kill the largest process.  However, we avoid
1438
	 * doing this on the first pass in order to give ourselves a
1439
	 * chance to flush out dirty vnode-backed pages and to allow
1440
	 * active pages to be moved to the inactive queue and reclaimed.
1441
	 */
1442
	vm_pageout_mightbe_oom(vmd, pass);
1443
}
1451
}
1444
1452
1445
static int vm_pageout_oom_vote;
1453
static int vm_pageout_oom_vote;
Lines 1450-1467 static int vm_pageout_oom_vote; Link Here
1450
 * failed to reach free target is premature.
1458
 * failed to reach free target is premature.
1451
 */
1459
 */
1452
static void
1460
static void
1453
vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass)
1461
vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1462
    int starting_page_shortage)
1454
{
1463
{
1455
	int old_vote;
1464
	int old_vote;
1456
1465
1457
	if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) ||
1466
	if (starting_page_shortage <= 0 || starting_page_shortage !=
1458
	    (swap_pager_full && vm_paging_target() > 0))) {
1467
	    page_shortage) {
1468
#if 0
1469
		if (vmd->vmd_oom_seq != 0)
1470
			printf("CLR oom_seq %d ps %d sps %d\n", vmd->vmd_oom_seq, page_shortage, starting_page_shortage);
1471
#endif
1472
		vmd->vmd_oom_seq = 0;
1473
	} else
1474
		vmd->vmd_oom_seq++;
1475
	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1459
		if (vmd->vmd_oom) {
1476
		if (vmd->vmd_oom) {
1460
			vmd->vmd_oom = FALSE;
1477
			vmd->vmd_oom = FALSE;
1461
			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1478
			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1462
		}
1479
		}
1463
		return;
1480
		return;
1464
	}
1481
	}
1482
#if 0
1483
printf("OOM oom_seq %d ps %d sps %d\n", vmd->vmd_oom_seq, page_shortage, starting_page_shortage);
1484
#endif
1485
1486
	/*
1487
	 * Do not follow the call sequence until OOM condition is
1488
	 * cleared.
1489
	 */
1490
	vmd->vmd_oom_seq = 0;
1465
1491
1466
	if (vmd->vmd_oom)
1492
	if (vmd->vmd_oom)
1467
		return;
1493
		return;

Return to bug 188911