View | Details | Raw Unified | Return to bug 200992 | Differences between
and this patch

Collapse All | Expand All

(-)b/bin/dd/dd.c (-16 / +13 lines)
Lines 254-280 getfdtype(IO *io) Link Here
254
	int type;
254
	int type;
255
255
256
	if (fstat(io->fd, &sb) == -1)
256
	if (fstat(io->fd, &sb) == -1)
257
		err(1, "%s", io->name);
257
		return;
258
	if (S_ISREG(sb.st_mode))
258
	if (S_ISREG(sb.st_mode))
259
		io->flags |= ISTRUNC;
259
		io->flags |= ISTRUNC;
260
	if (S_ISCHR(sb.st_mode) || S_ISBLK(sb.st_mode)) { 
260
	if (S_ISCHR(sb.st_mode) || S_ISBLK(sb.st_mode)) {
261
		if (ioctl(io->fd, FIODTYPE, &type) == -1) {
261
		if (S_ISCHR(sb.st_mode))
262
			err(1, "%s", io->name);
262
			io->flags |= ISCHR;
263
		} else {
263
		if (ioctl(io->fd, FIODTYPE, &type) == -1)
264
			if (type & D_TAPE)
264
			return;
265
				io->flags |= ISTAPE;
265
		if (type & D_TAPE)
266
			else if (type & (D_DISK | D_MEM))
266
			io->flags |= ISTAPE;
267
				io->flags |= ISSEEK;
267
		else if (type & (D_DISK | D_MEM))
268
			if (S_ISCHR(sb.st_mode) && (type & D_TAPE) == 0)
268
			io->flags |= ISSEEK;
269
				io->flags |= ISCHR;
270
		}
271
		return;
269
		return;
272
	}
270
	}
273
	errno = 0;
271
	if (lseek(io->fd, (off_t)0, SEEK_CUR) != -1)
274
	if (lseek(io->fd, (off_t)0, SEEK_CUR) == -1 && errno == ESPIPE)
275
		io->flags |= ISPIPE;
276
	else
277
		io->flags |= ISSEEK;
272
		io->flags |= ISSEEK;
273
	else if (errno == ESPIPE)
274
		io->flags |= ISPIPE;
278
}
275
}
279
276
280
static void
277
static void
(-)b/contrib/gdb/gdb/gdbthread.h (+2 lines)
Lines 75-80 struct thread_info Link Here
75
  struct private_thread_info *private;
75
  struct private_thread_info *private;
76
};
76
};
77
77
78
extern int thread_list_empty (void);
79
78
/* Create an empty thread list, or empty the existing one.  */
80
/* Create an empty thread list, or empty the existing one.  */
79
extern void init_thread_list (void);
81
extern void init_thread_list (void);
80
82
(-)b/contrib/gdb/gdb/infrun.c (-2 / +16 lines)
Lines 384-392 follow_inferior_reset_breakpoints (void) Link Here
384
  insert_breakpoints ();
384
  insert_breakpoints ();
385
}
385
}
386
386
387
void 
388
clear_step_resume_breakpoint_thread (void)
389
{
390
  if (step_resume_breakpoint)
391
    step_resume_breakpoint->thread = -1;
392
}
393
394
void 
395
clear_step_resume_breakpoint (void)
396
{
397
  step_resume_breakpoint = NULL;
398
}
399
387
/* EXECD_PATHNAME is assumed to be non-NULL. */
400
/* EXECD_PATHNAME is assumed to be non-NULL. */
388
401
389
static void
402
void
390
follow_exec (int pid, char *execd_pathname)
403
follow_exec (int pid, char *execd_pathname)
391
{
404
{
392
  int saved_pid = pid;
405
  int saved_pid = pid;
Lines 1648-1654 handle_inferior_event (struct execution_control_state *ecs) Link Here
1648
1661
1649
      /* This causes the eventpoints and symbol table to be reset.  Must
1662
      /* This causes the eventpoints and symbol table to be reset.  Must
1650
         do this now, before trying to determine whether to stop. */
1663
         do this now, before trying to determine whether to stop. */
1651
      follow_exec (PIDGET (inferior_ptid), pending_follow.execd_pathname);
1664
      target_follow_exec (PIDGET (inferior_ptid), 
1665
			  pending_follow.execd_pathname);
1652
      xfree (pending_follow.execd_pathname);
1666
      xfree (pending_follow.execd_pathname);
1653
1667
1654
      stop_pc = read_pc_pid (ecs->ptid);
1668
      stop_pc = read_pc_pid (ecs->ptid);
(-)b/contrib/gdb/gdb/objfiles.c (-1 / +1 lines)
Lines 482-492 free_all_objfiles (void) Link Here
482
{
482
{
483
  struct objfile *objfile, *temp;
483
  struct objfile *objfile, *temp;
484
484
485
  clear_symtab_users ();
485
  ALL_OBJFILES_SAFE (objfile, temp)
486
  ALL_OBJFILES_SAFE (objfile, temp)
486
  {
487
  {
487
    free_objfile (objfile);
488
    free_objfile (objfile);
488
  }
489
  }
489
  clear_symtab_users ();
490
}
490
}
491
491
492
/* Relocate OBJFILE to NEW_OFFSETS.  There should be OBJFILE->NUM_SECTIONS
492
/* Relocate OBJFILE to NEW_OFFSETS.  There should be OBJFILE->NUM_SECTIONS
(-)b/contrib/gdb/gdb/target.c (-2 / +48 lines)
Lines 1307-1312 target_async_mask (int mask) Link Here
1307
}
1307
}
1308
1308
1309
/* Look through the list of possible targets for a target that can
1309
/* Look through the list of possible targets for a target that can
1310
   follow forks.  */
1311
1312
int
1313
target_follow_fork (int follow_child)
1314
{
1315
  struct target_ops *t;
1316
1317
  for (t = current_target.beneath; t != NULL; t = t->beneath)
1318
    {
1319
      if (t->to_follow_fork != NULL)
1320
	{
1321
	  int retval = t->to_follow_fork (t, follow_child);
1322
	  if (targetdebug)
1323
	    fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
1324
				follow_child, retval);
1325
	  return retval;
1326
	}
1327
    }
1328
1329
  /* Some target returned a fork event, but did not know how to follow it.  */
1330
  internal_error (__FILE__, __LINE__,
1331
		  "could not find a target to follow fork");
1332
}
1333
1334
void
1335
target_follow_exec (int pid, char *execd_pathname)
1336
{
1337
  struct target_ops *t;
1338
1339
  for (t = current_target.beneath; t != NULL; t = t->beneath)
1340
    {
1341
      if (t->to_follow_exec != NULL)
1342
	{
1343
	  t->to_follow_exec (pid, execd_pathname);
1344
	  if (targetdebug)
1345
	    fprintf_unfiltered (gdb_stdlog, "target_follow_exec (%d, %s)\n",
1346
				pid, execd_pathname);
1347
	  return;
1348
	}
1349
    }
1350
1351
  /* If target does not specify a follow_exec handler, call the default. */
1352
  follow_exec (pid, execd_pathname);
1353
}
1354
1355
/* Look through the list of possible targets for a target that can
1310
   execute a run or attach command without any other data.  This is
1356
   execute a run or attach command without any other data.  This is
1311
   used to locate the default process stratum.
1357
   used to locate the default process stratum.
1312
1358
Lines 2159-2167 debug_to_remove_vfork_catchpoint (int pid) Link Here
2159
}
2205
}
2160
2206
2161
static int
2207
static int
2162
debug_to_follow_fork (int follow_child)
2208
debug_to_follow_fork (struct target_ops* ops, int follow_child)
2163
{
2209
{
2164
  int retval =  debug_target.to_follow_fork (follow_child);
2210
  int retval =  debug_target.to_follow_fork (ops, follow_child);
2165
2211
2166
  fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2212
  fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2167
		      follow_child, retval);
2213
		      follow_child, retval);
(-)b/contrib/gdb/gdb/target.h (-3 / +5 lines)
Lines 362-368 struct target_ops Link Here
362
    int (*to_remove_fork_catchpoint) (int);
362
    int (*to_remove_fork_catchpoint) (int);
363
    int (*to_insert_vfork_catchpoint) (int);
363
    int (*to_insert_vfork_catchpoint) (int);
364
    int (*to_remove_vfork_catchpoint) (int);
364
    int (*to_remove_vfork_catchpoint) (int);
365
    int (*to_follow_fork) (int);
365
    int (*to_follow_fork) (struct target_ops*, int);
366
    void (*to_follow_exec) (int, char*);
366
    int (*to_insert_exec_catchpoint) (int);
367
    int (*to_insert_exec_catchpoint) (int);
367
    int (*to_remove_exec_catchpoint) (int);
368
    int (*to_remove_exec_catchpoint) (int);
368
    int (*to_reported_exec_events_per_exec_call) (void);
369
    int (*to_reported_exec_events_per_exec_call) (void);
Lines 761-768 extern void target_load (char *arg, int from_tty); Link Here
761
   This function returns 1 if the inferior should not be resumed
762
   This function returns 1 if the inferior should not be resumed
762
   (i.e. there is another event pending).  */
763
   (i.e. there is another event pending).  */
763
764
764
#define target_follow_fork(follow_child) \
765
int target_follow_fork (int follow_child);
765
     (*current_target.to_follow_fork) (follow_child)
766
766
767
/* On some targets, we can catch an inferior exec event when it
767
/* On some targets, we can catch an inferior exec event when it
768
   occurs.  These functions insert/remove an already-created
768
   occurs.  These functions insert/remove an already-created
Lines 1248-1251 extern void push_remote_target (char *name, int from_tty); Link Here
1248
/* Blank target vector entries are initialized to target_ignore. */
1248
/* Blank target vector entries are initialized to target_ignore. */
1249
void target_ignore (void);
1249
void target_ignore (void);
1250
1250
1251
void target_follow_exec (int pid, char *execd_pathname);
1252
1251
#endif /* !defined (TARGET_H) */
1253
#endif /* !defined (TARGET_H) */
(-)b/contrib/gdb/gdb/thread.c (+6 lines)
Lines 65-70 static void restore_current_thread (ptid_t); Link Here
65
static void switch_to_thread (ptid_t ptid);
65
static void switch_to_thread (ptid_t ptid);
66
static void prune_threads (void);
66
static void prune_threads (void);
67
67
68
int
69
thread_list_empty ()
70
{
71
  return thread_list == NULL;
72
}
73
68
void
74
void
69
delete_step_resume_breakpoint (void *arg)
75
delete_step_resume_breakpoint (void *arg)
70
{
76
{
(-)b/gnu/usr.bin/gdb/arch/amd64/Makefile (-1 / +1 lines)
Lines 2-8 Link Here
2
2
3
GENSRCS+= xm.h
3
GENSRCS+= xm.h
4
.if !defined(GDB_CROSS_DEBUGGER)
4
.if !defined(GDB_CROSS_DEBUGGER)
5
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
6
LIBSRCS+= amd64-nat.c amd64bsd-nat.c amd64fbsd-nat.c
6
LIBSRCS+= amd64-nat.c amd64bsd-nat.c amd64fbsd-nat.c
7
.endif
7
.endif
8
LIBSRCS+= solib.c solib-svr4.c
8
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/amd64/init.c (+2 lines)
Lines 115-120 extern initialize_file_ftype _initialize_tui_out; Link Here
115
extern initialize_file_ftype _initialize_tui_regs;
115
extern initialize_file_ftype _initialize_tui_regs;
116
extern initialize_file_ftype _initialize_tui_stack;
116
extern initialize_file_ftype _initialize_tui_stack;
117
extern initialize_file_ftype _initialize_tui_win;
117
extern initialize_file_ftype _initialize_tui_win;
118
extern initialize_file_ftype _initialize_fbsdnat;
118
void
119
void
119
initialize_all_files (void)
120
initialize_all_files (void)
120
{
121
{
Lines 231-234 initialize_all_files (void) Link Here
231
  _initialize_tui_regs ();
232
  _initialize_tui_regs ();
232
  _initialize_tui_stack ();
233
  _initialize_tui_stack ();
233
  _initialize_tui_win ();
234
  _initialize_tui_win ();
235
  _initialize_fbsdnat ();
234
}
236
}
(-)b/gnu/usr.bin/gdb/arch/arm/Makefile (-1 / +1 lines)
Lines 1-7 Link Here
1
# $FreeBSD$
1
# $FreeBSD$
2
2
3
GENSRCS+= xm.h
3
GENSRCS+= xm.h
4
LIBSRCS+= armfbsd-nat.c
4
LIBSRCS+= armfbsd-nat.c fbsd-nat.c
5
LIBSRCS+= arm-tdep.c armfbsd-tdep.c solib.c solib-svr4.c
5
LIBSRCS+= arm-tdep.c armfbsd-tdep.c solib.c solib-svr4.c
6
.if !defined(GDB_CROSS_DEBUGGER)
6
.if !defined(GDB_CROSS_DEBUGGER)
7
LIBSRCS+= fbsd-threads.c
7
LIBSRCS+= fbsd-threads.c
(-)b/gnu/usr.bin/gdb/arch/arm/init.c (+2 lines)
Lines 113-118 extern initialize_file_ftype _initialize_tui_out; Link Here
113
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_fbsdnat;
116
void
117
void
117
initialize_all_files (void)
118
initialize_all_files (void)
118
{
119
{
Lines 225-228 initialize_all_files (void) Link Here
225
  _initialize_tui_regs ();
226
  _initialize_tui_regs ();
226
  _initialize_tui_stack ();
227
  _initialize_tui_stack ();
227
  _initialize_tui_win ();
228
  _initialize_tui_win ();
229
  _initialize_fbsdnat ();
228
}
230
}
(-)b/gnu/usr.bin/gdb/arch/i386/Makefile (-1 / +1 lines)
Lines 2-8 Link Here
2
2
3
GENSRCS+= xm.h
3
GENSRCS+= xm.h
4
.if !defined(GDB_CROSS_DEBUGGER)
4
.if !defined(GDB_CROSS_DEBUGGER)
5
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
6
LIBSRCS+= i386-nat.c i386bsd-nat.c i386fbsd-nat.c
6
LIBSRCS+= i386-nat.c i386bsd-nat.c i386fbsd-nat.c
7
.endif
7
.endif
8
LIBSRCS+= solib.c solib-svr4.c
8
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/i386/init.c (+2 lines)
Lines 116-121 extern initialize_file_ftype _initialize_tui_out; Link Here
116
extern initialize_file_ftype _initialize_tui_regs;
116
extern initialize_file_ftype _initialize_tui_regs;
117
extern initialize_file_ftype _initialize_tui_stack;
117
extern initialize_file_ftype _initialize_tui_stack;
118
extern initialize_file_ftype _initialize_tui_win;
118
extern initialize_file_ftype _initialize_tui_win;
119
extern initialize_file_ftype _initialize_fbsdnat;
119
void
120
void
120
initialize_all_files (void)
121
initialize_all_files (void)
121
{
122
{
Lines 233-236 initialize_all_files (void) Link Here
233
  _initialize_tui_regs ();
234
  _initialize_tui_regs ();
234
  _initialize_tui_stack ();
235
  _initialize_tui_stack ();
235
  _initialize_tui_win ();
236
  _initialize_tui_win ();
237
  _initialize_fbsdnat ();
236
}
238
}
(-)b/gnu/usr.bin/gdb/arch/mips/Makefile (-1 / +1 lines)
Lines 4-10 Link Here
4
# XXX Should set DEFAULT_BFD_VEC based on target.
4
# XXX Should set DEFAULT_BFD_VEC based on target.
5
#
5
#
6
.if !defined(GDB_CROSS_DEBUGGER)
6
.if !defined(GDB_CROSS_DEBUGGER)
7
LIBSRCS+= mipsfbsd-nat.c fbsd-threads.c
7
LIBSRCS+= fbsd-nat.c mipsfbsd-nat.c fbsd-threads.c
8
.endif
8
.endif
9
LIBSRCS+= solib.c solib-svr4.c
9
LIBSRCS+= solib.c solib-svr4.c
10
LIBSRCS+= mips-tdep.c mipsfbsd-tdep.c fbsd-proc.c
10
LIBSRCS+= mips-tdep.c mipsfbsd-tdep.c fbsd-proc.c
(-)b/gnu/usr.bin/gdb/arch/mips/init.c (+2 lines)
Lines 112-117 extern initialize_file_ftype _initialize_tui_out; Link Here
112
extern initialize_file_ftype _initialize_tui_regs;
112
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_stack;
113
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_win;
114
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_fbsdnat;
115
void
116
void
116
initialize_all_files (void)
117
initialize_all_files (void)
117
{
118
{
Lines 230-233 initialize_all_files (void) Link Here
230
  _initialize_tui_regs ();
231
  _initialize_tui_regs ();
231
  _initialize_tui_stack ();
232
  _initialize_tui_stack ();
232
  _initialize_tui_win ();
233
  _initialize_tui_win ();
234
  _initialize_fbsdnat ();
233
}
235
}
(-)b/gnu/usr.bin/gdb/arch/powerpc/Makefile (-1 / +1 lines)
Lines 1-7 Link Here
1
# $FreeBSD$
1
# $FreeBSD$
2
2
3
.if !defined(GDB_CROSS_DEBUGGER)
3
.if !defined(GDB_CROSS_DEBUGGER)
4
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
4
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= ppcfbsd-nat.c
5
LIBSRCS+= ppcfbsd-nat.c
6
.endif
6
.endif
7
LIBSRCS+= solib.c solib-svr4.c
7
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/powerpc/init.c (+2 lines)
Lines 113-118 extern initialize_file_ftype _initialize_tui_out; Link Here
113
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_fbsdnat;
116
void
117
void
117
initialize_all_files (void)
118
initialize_all_files (void)
118
{
119
{
Lines 227-230 initialize_all_files (void) Link Here
227
  _initialize_tui_regs ();
228
  _initialize_tui_regs ();
228
  _initialize_tui_stack ();
229
  _initialize_tui_stack ();
229
  _initialize_tui_win ();
230
  _initialize_tui_win ();
231
  _initialize_fbsdnat ();
230
}
232
}
(-)b/gnu/usr.bin/gdb/arch/powerpc64/Makefile (-1 / +1 lines)
Lines 1-7 Link Here
1
# $FreeBSD$
1
# $FreeBSD$
2
2
3
.if !defined(GDB_CROSS_DEBUGGER)
3
.if !defined(GDB_CROSS_DEBUGGER)
4
LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c
4
LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c
5
LIBSRCS+= ppcfbsd-nat.c
5
LIBSRCS+= ppcfbsd-nat.c
6
.endif
6
.endif
7
LIBSRCS+= solib.c solib-svr4.c
7
LIBSRCS+= solib.c solib-svr4.c
(-)b/gnu/usr.bin/gdb/arch/powerpc64/init.c (+2 lines)
Lines 113-118 extern initialize_file_ftype _initialize_tui_out; Link Here
113
extern initialize_file_ftype _initialize_tui_regs;
113
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_stack;
114
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_win;
115
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_fbsdnat;
116
void
117
void
117
initialize_all_files (void)
118
initialize_all_files (void)
118
{
119
{
Lines 227-230 initialize_all_files (void) Link Here
227
  _initialize_tui_regs ();
228
  _initialize_tui_regs ();
228
  _initialize_tui_stack ();
229
  _initialize_tui_stack ();
229
  _initialize_tui_win ();
230
  _initialize_tui_win ();
231
  _initialize_fbsdnat ();
230
}
232
}
(-)b/gnu/usr.bin/gdb/arch/sparc64/init.c (+2 lines)
Lines 114-119 extern initialize_file_ftype _initialize_tui_out; Link Here
114
extern initialize_file_ftype _initialize_tui_regs;
114
extern initialize_file_ftype _initialize_tui_regs;
115
extern initialize_file_ftype _initialize_tui_stack;
115
extern initialize_file_ftype _initialize_tui_stack;
116
extern initialize_file_ftype _initialize_tui_win;
116
extern initialize_file_ftype _initialize_tui_win;
117
extern initialize_file_ftype _initialize_fbsdnat;
117
void
118
void
118
initialize_all_files (void)
119
initialize_all_files (void)
119
{
120
{
Lines 229-232 initialize_all_files (void) Link Here
229
  _initialize_tui_regs ();
230
  _initialize_tui_regs ();
230
  _initialize_tui_stack ();
231
  _initialize_tui_stack ();
231
  _initialize_tui_win ();
232
  _initialize_tui_win ();
233
  _initialize_fbsdnat ();
232
}
234
}
(-)b/gnu/usr.bin/gdb/libgdb/fbsd-nat.c (+342 lines)
Added Link Here
1
/* Native-dependent code for FreeBSD.
2
3
   Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
4
5
   This file is part of GDB.
6
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 2 of the License, or
10
   (at your option) any later version.
11
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
17
   You should have received a copy of the GNU General Public License
18
   along with this program; if not, write to the Free Software
19
   Foundation, Inc., 51 Franklin Street, Fifth Floor,
20
   Boston, MA 02110-1301, USA.  */
21
22
#include "defs.h"
23
#include "inferior.h"
24
#include "symfile.h"
25
#include "gdbcore.h"
26
#include "gdbthread.h"
27
#include "gdb_assert.h"
28
#include <sys/types.h>
29
#include <sys/ptrace.h>
30
#include <sys/wait.h>
31
32
extern struct target_ops child_ops;
33
void clear_step_resume_breakpoint (void);
34
void clear_step_resume_breakpoint_thread (void);
35
void (*reactivate_threads) (char*) = NULL;
36
void (*disable_threads) (void) = NULL;
37
38
static void (*mourn_inferior_beneath) (void);
39
static void (*detach_beneath) (char *args, int from_tty);
40
static ptid_t (*wait_beneath) (ptid_t ptid, 
41
			       struct target_waitstatus *ourstatus);
42
int follow_event_pid = 0;
43
44
/* Return a the name of file that can be opened to get the symbols for
45
   the child process identified by PID.  */
46
47
char *
48
fbsd_pid_to_exec_file (int pid)
49
{
50
  size_t len = MAXPATHLEN;
51
  char *buf = xcalloc (len, sizeof (char));
52
  char *path;
53
54
#ifdef KERN_PROC_PATHNAME
55
  int mib[4];
56
57
  mib[0] = CTL_KERN;
58
  mib[1] = KERN_PROC;
59
  mib[2] = KERN_PROC_PATHNAME;
60
  mib[3] = pid;
61
  if (sysctl (mib, 4, buf, &len, NULL, 0) == 0)
62
    return buf;
63
#endif
64
65
  path = xstrprintf ("/proc/%d/file", pid);
66
  if (readlink (path, buf, MAXPATHLEN) == -1)
67
    {
68
      xfree (buf);
69
      buf = NULL;
70
    }
71
72
  xfree (path);
73
  return buf;
74
}
75
76
/* Wait for the child specified by PTID to do something.  Return the
77
   process ID of the child, or MINUS_ONE_PTID in case of error; store
78
   the status in *OURSTATUS.  */
79
80
static ptid_t
81
inf_ptrace_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
82
{
83
  pid_t pid;
84
  int status, save_errno;
85
86
  do
87
    {
88
      set_sigint_trap ();
89
      set_sigio_trap ();
90
      do
91
	{
92
	  pid = waitpid (PIDGET (ptid), &status, 0);
93
	  save_errno = errno;
94
	}
95
      while (pid == -1 && errno == EINTR);
96
97
      clear_sigio_trap ();
98
      clear_sigint_trap ();
99
100
      if (pid == -1)
101
	{
102
	  fprintf_unfiltered (gdb_stderr,
103
			      _("Child process unexpectedly missing: %s.\n"),
104
			      safe_strerror (save_errno));
105
106
	  /* Claim it exited with unknown signal.  */
107
	  ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
108
	  ourstatus->value.sig = TARGET_SIGNAL_UNKNOWN;
109
	  return minus_one_ptid;
110
	}
111
112
      /* Ignore terminated detached child processes.  */
113
      if (!WIFSTOPPED (status) && pid != PIDGET (inferior_ptid))
114
	pid = -1;
115
    }
116
  while (pid == -1);
117
118
  store_waitstatus (ourstatus, status);
119
  return pid_to_ptid (pid);
120
}
121
122
static ptid_t
123
fbsd_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
124
{
125
  long lwp;
126
  struct ptrace_lwpinfo lwpinfo;
127
  struct target_waitstatus stat;
128
  ptid_t ret;
129
  static ptid_t forking_child = {0,0,0};
130
131
  ret = wait_beneath (ptid, ourstatus);
132
133
  if (PIDGET (ret) >= 0 && ourstatus->kind == TARGET_WAITKIND_STOPPED &&
134
      (ourstatus->value.sig == TARGET_SIGNAL_TRAP ||
135
       ourstatus->value.sig == TARGET_SIGNAL_STOP) &&
136
      (ptrace(PT_LWPINFO, PIDGET (ret), (caddr_t)&lwpinfo, 
137
	      sizeof lwpinfo) == 0))
138
    {
139
      if (lwpinfo.pl_flags & PL_FLAG_CHILD)
140
	{
141
	  /* Leave the child in a stopped state until we get a fork event in 
142
	     the parent. That's when we decide which process to follow. */
143
	  ourstatus->kind = TARGET_WAITKIND_IGNORE;
144
	  forking_child = ret;
145
	}
146
      else if (lwpinfo.pl_flags & PL_FLAG_FORKED)
147
	{
148
	  /* We'd better be in the middle of processing a fork() event. */
149
	  gdb_assert (!ptid_equal (forking_child, null_ptid));
150
	  ourstatus->kind = TARGET_WAITKIND_FORKED;
151
	  ourstatus->value.related_pid = lwpinfo.pl_child_pid;
152
	  forking_child = null_ptid;
153
	}
154
      else if (lwpinfo.pl_flags & PL_FLAG_EXEC &&
155
	  PIDGET (ret) == follow_event_pid)
156
	{
157
	  ourstatus->kind = TARGET_WAITKIND_EXECD;
158
	  ourstatus->value.execd_pathname =
159
	    xstrdup (fbsd_pid_to_exec_file (PIDGET (ret)));
160
	}
161
    }
162
163
  return ret;
164
}
165
166
static void
167
fbsd_enable_event_reporting (int pid)
168
{
169
#ifdef PT_FOLLOW_FORK
170
  follow_event_pid = pid;
171
  if (ptrace(PT_FOLLOW_FORK, pid, 0, 1) < 0)
172
    error (_("Cannot follow fork on this target."));
173
#endif 
174
}
175
176
static void
177
fbsd_post_attach (int pid)
178
{
179
  fbsd_enable_event_reporting (pid);
180
}
181
182
static void
183
fbsd_post_startup_inferior (ptid_t ptid)
184
{
185
  fbsd_enable_event_reporting (PIDGET (ptid));
186
}
187
188
int
189
fbsd_follow_fork (struct target_ops *ops, int follow_child)
190
{
191
  ptid_t last_ptid, ret, child_ptid;
192
  struct target_waitstatus last_status;
193
  int parent_pid, child_pid;
194
  struct target_waitstatus ourstatus;
195
196
  get_last_target_status (&last_ptid, &last_status);
197
  parent_pid = PIDGET (last_ptid);
198
  child_pid = last_status.value.related_pid;
199
200
  if (follow_child)
201
    {
202
      detach_breakpoints (child_pid);
203
      remove_breakpoints ();
204
      child_ptid = pid_to_ptid (child_pid);
205
206
      target_detach (NULL, 0);
207
      inferior_ptid = child_ptid;
208
209
      /* Reinstall ourselves, since we might have been removed in
210
	 target_detach (which does other necessary cleanup).  */
211
      push_target (ops);
212
213
      /* Need to restore some of the actions done by the threaded detach */
214
      if (reactivate_threads) 
215
	{
216
	  reactivate_threads (fbsd_pid_to_exec_file (child_pid));
217
	  reactivate_threads = NULL;
218
	}
219
220
      /* Reset breakpoints in the child as appropriate.  */
221
      clear_step_resume_breakpoint_thread ();
222
      follow_inferior_reset_breakpoints ();
223
224
      /* Enable fork/exec event reporting for the child. */
225
      fbsd_enable_event_reporting (child_pid);
226
    }
227
  else /* Follow parent */
228
    {
229
      /* Before detaching from the child, remove all breakpoints from
230
         it.  (This won't actually modify the breakpoint list, but will
231
         physically remove the breakpoints from the child.) */
232
      detach_breakpoints (child_pid);
233
      ptrace (PT_DETACH, child_pid, (caddr_t) 1, 0);
234
    }
235
236
  return 0;
237
}
238
239
/* EXECD_PATHNAME is assumed to be non-NULL. */
240
241
static void
242
fbsd_follow_exec (int pid, char *execd_pathname)
243
{
244
  struct target_waitstatus status;
245
  ptid_t ret = inferior_ptid;
246
247
  /* This is an exec event that we actually wish to pay attention to.
248
     Refresh our symbol table to the newly exec'd program, remove any
249
     momentary bp's, etc.
250
251
     If there are breakpoints, they aren't really inserted now,
252
     since the exec() transformed our inferior into a fresh set
253
     of instructions.
254
255
     We want to preserve symbolic breakpoints on the list, since
256
     we have hopes that they can be reset after the new a.out's
257
     symbol table is read.
258
259
     However, any "raw" breakpoints must be removed from the list
260
     (e.g., the solib bp's), since their address is probably invalid
261
     now.
262
263
     And, we DON'T want to call delete_breakpoints() here, since
264
     that may write the bp's "shadow contents" (the instruction
265
     value that was overwritten witha TRAP instruction).  Since
266
     we now have a new a.out, those shadow contents aren't valid. */
267
  update_breakpoints_after_exec ();
268
269
  /* If there was one, it's gone now.  We cannot truly step-to-next
270
     statement through an exec(). */
271
  clear_step_resume_breakpoint ();
272
  step_range_start = 0;
273
  step_range_end = 0;
274
275
  /* What is this a.out's name? */
276
  printf_unfiltered (_("Executing new program: %s\n"), execd_pathname);
277
278
  /* We've followed the inferior through an exec.  Therefore, the
279
     inferior has essentially been killed & reborn. */
280
281
  gdb_flush (gdb_stdout);
282
283
  /* Disable thread library */
284
  if (disable_threads)
285
    {
286
      disable_threads ();
287
      disable_threads = NULL;
288
    }
289
290
  generic_mourn_inferior ();
291
  inferior_ptid = ret;
292
293
  /* That a.out is now the one to use. */
294
  exec_file_attach (execd_pathname, 0);
295
296
  /* And also is where symbols can be found. */
297
  symbol_file_add_main (execd_pathname, 0);
298
299
  /* Reset the shared library package.  This ensures that we get
300
     a shlib event when the child reaches "_start", at which point
301
     the dld will have had a chance to initialize the child. */
302
#if defined(SOLIB_RESTART)
303
  SOLIB_RESTART ();
304
#endif
305
#ifdef SOLIB_CREATE_INFERIOR_HOOK
306
  SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
307
#else
308
  solib_create_inferior_hook ();
309
#endif
310
311
  /* Reinsert all breakpoints.  (Those which were symbolic have
312
     been reset to the proper address in the new a.out, thanks
313
     to symbol_file_command...) */
314
  insert_breakpoints ();
315
}
316
317
static void fbsd_mourn_inferior (void)
318
{
319
  follow_event_pid = 0;
320
  mourn_inferior_beneath ();
321
}
322
323
static void fbsd_detach (char *args, int from_tty)
324
{
325
  follow_event_pid = 0;
326
  detach_beneath (args, from_tty);
327
}
328
329
void
330
_initialize_fbsdnat (void)
331
{
332
  wait_beneath = inf_ptrace_wait;
333
  detach_beneath = child_ops.to_detach;
334
  mourn_inferior_beneath = child_ops.to_mourn_inferior;
335
  child_ops.to_wait = fbsd_wait;
336
  child_ops.to_detach = fbsd_detach;
337
  child_ops.to_mourn_inferior = fbsd_mourn_inferior;
338
  child_ops.to_post_attach = fbsd_post_attach;
339
  child_ops.to_post_startup_inferior = fbsd_post_startup_inferior;
340
  child_ops.to_follow_fork = fbsd_follow_fork;
341
  child_ops.to_follow_exec = fbsd_follow_exec;
342
}
(-)b/gnu/usr.bin/gdb/libgdb/fbsd-threads.c (-3 / +51 lines)
Lines 68-73 extern struct target_ops core_ops; Link Here
68
68
69
/* Pointer to the next function on the objfile event chain.  */
69
/* Pointer to the next function on the objfile event chain.  */
70
static void (*target_new_objfile_chain) (struct objfile *objfile);
70
static void (*target_new_objfile_chain) (struct objfile *objfile);
71
 
72
/* Non-zero while processing thread library re-activation after fork() */
73
static int fbsd_forking;
71
74
72
/* Non-zero if there is a thread module */
75
/* Non-zero if there is a thread module */
73
static int fbsd_thread_present;
76
static int fbsd_thread_present;
Lines 154-159 static int fbsd_thread_alive (ptid_t ptid); Link Here
154
static void attach_thread (ptid_t ptid, const td_thrhandle_t *th_p,
157
static void attach_thread (ptid_t ptid, const td_thrhandle_t *th_p,
155
               const td_thrinfo_t *ti_p, int verbose);
158
               const td_thrinfo_t *ti_p, int verbose);
156
static void fbsd_thread_detach (char *args, int from_tty);
159
static void fbsd_thread_detach (char *args, int from_tty);
160
extern void (*reactivate_threads) (char*);
161
extern void (*disable_threads) (void);
162
static void fbsd_thread_activate (void);
163
static void fbsd_thread_deactivate (void);
157
164
158
/* Building process ids.  */
165
/* Building process ids.  */
159
166
Lines 405-419 disable_thread_event_reporting (void) Link Here
405
  td_death_bp_addr = 0;
412
  td_death_bp_addr = 0;
406
}
413
}
407
414
415
static void 
416
fbsd_thread_reactivate_after_fork (char *pathname)
417
{
418
  fbsd_forking = 1;
419
420
  /* That a.out is now the one to use. */
421
  exec_file_attach (pathname, 0);
422
423
  /* And also is where symbols can be found. */
424
  symbol_file_add_main (pathname, 0);
425
  push_target (&fbsd_thread_ops);
426
427
#ifdef SOLIB_CREATE_INFERIOR_HOOK
428
  SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
429
#else
430
  solib_create_inferior_hook ();
431
#endif
432
  fbsd_forking = 0;
433
}
434
435
static void 
436
fbsd_thread_disable_after_exec (void)
437
{
438
  if (fbsd_thread_active)
439
    fbsd_thread_deactivate ();
440
441
  unpush_target (&fbsd_thread_ops);
442
}
443
408
static void
444
static void
409
fbsd_thread_activate (void)
445
fbsd_thread_activate (void)
410
{
446
{
411
  fbsd_thread_active = 1;
447
  fbsd_thread_active = 1;
448
  reactivate_threads = fbsd_thread_reactivate_after_fork;
449
  disable_threads = fbsd_thread_disable_after_exec;
412
  init_thread_list();
450
  init_thread_list();
413
  if (fbsd_thread_core == 0)
451
  if (fbsd_thread_core == 0)
414
    enable_thread_event_reporting ();
452
    enable_thread_event_reporting ();
415
  fbsd_thread_find_new_threads ();
453
416
  get_current_thread ();
454
  if (!fbsd_forking) 
455
    {
456
      fbsd_thread_find_new_threads ();
457
      get_current_thread ();
458
    }
417
}
459
}
418
460
419
static void
461
static void
Lines 626-632 fbsd_thread_resume (ptid_t ptid, int step, enum target_signal signo) Link Here
626
    }
668
    }
627
669
628
  lwp = GET_LWP (work_ptid);
670
  lwp = GET_LWP (work_ptid);
629
  if (lwp == 0)
671
  if (lwp == 0 && GET_THREAD (work_ptid) != 0)
630
    {
672
    {
631
      /* check user thread */
673
      /* check user thread */
632
      ret = td_ta_map_id2thr_p (thread_agent, GET_THREAD(work_ptid), &th);
674
      ret = td_ta_map_id2thr_p (thread_agent, GET_THREAD(work_ptid), &th);
Lines 790-795 fbsd_thread_wait (ptid_t ptid, struct target_waitstatus *ourstatus) Link Here
790
  ret = child_ops.to_wait (ptid, ourstatus);
832
  ret = child_ops.to_wait (ptid, ourstatus);
791
  if (GET_PID(ret) >= 0 && ourstatus->kind == TARGET_WAITKIND_STOPPED)
833
  if (GET_PID(ret) >= 0 && ourstatus->kind == TARGET_WAITKIND_STOPPED)
792
    {
834
    {
835
      if (thread_list_empty ())
836
	fbsd_thread_find_new_threads ();
837
793
      lwp = get_current_lwp (GET_PID(ret));
838
      lwp = get_current_lwp (GET_PID(ret));
794
      ret = thread_from_lwp (BUILD_LWP(lwp, GET_PID(ret)),
839
      ret = thread_from_lwp (BUILD_LWP(lwp, GET_PID(ret)),
795
         &th, &ti);
840
         &th, &ti);
Lines 1065-1070 fbsd_thread_create_inferior (char *exec_file, char *allargs, char **env) Link Here
1065
static void
1110
static void
1066
fbsd_thread_post_startup_inferior (ptid_t ptid)
1111
fbsd_thread_post_startup_inferior (ptid_t ptid)
1067
{
1112
{
1113
  if (child_ops.to_post_startup_inferior)
1114
    child_ops.to_post_startup_inferior (ptid);
1115
1068
  if (fbsd_thread_present && !fbsd_thread_active)
1116
  if (fbsd_thread_present && !fbsd_thread_active)
1069
    {
1117
    {
1070
      /* The child process is now the actual multi-threaded
1118
      /* The child process is now the actual multi-threaded
(-)b/lib/libc/amd64/sys/__vdso_gettc.c (-2 lines)
Lines 43-49 __vdso_gettc_low(const struct vdso_timehands *th) Link Here
43
	return (rv);
43
	return (rv);
44
}
44
}
45
45
46
#pragma weak __vdso_gettc
47
u_int
46
u_int
48
__vdso_gettc(const struct vdso_timehands *th)
47
__vdso_gettc(const struct vdso_timehands *th)
49
{
48
{
Lines 51-57 __vdso_gettc(const struct vdso_timehands *th) Link Here
51
	return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : rdtsc32());
50
	return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : rdtsc32());
52
}
51
}
53
52
54
#pragma weak __vdso_gettimekeep
55
int
53
int
56
__vdso_gettimekeep(struct vdso_timekeep **tk)
54
__vdso_gettimekeep(struct vdso_timekeep **tk)
57
{
55
{
(-)b/lib/libc/i386/sys/__vdso_gettc.c (-2 lines)
Lines 43-49 __vdso_gettc_low(const struct vdso_timehands *th) Link Here
43
	return (rv);
43
	return (rv);
44
}
44
}
45
45
46
#pragma weak __vdso_gettc
47
u_int
46
u_int
48
__vdso_gettc(const struct vdso_timehands *th)
47
__vdso_gettc(const struct vdso_timehands *th)
49
{
48
{
Lines 51-57 __vdso_gettc(const struct vdso_timehands *th) Link Here
51
	return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : rdtsc32());
50
	return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : rdtsc32());
52
}
51
}
53
52
54
#pragma weak __vdso_gettimekeep
55
int
53
int
56
__vdso_gettimekeep(struct vdso_timekeep **tk)
54
__vdso_gettimekeep(struct vdso_timekeep **tk)
57
{
55
{
(-)b/lib/libc/sys/__vdso_gettimeofday.c (-2 lines)
Lines 79-85 binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) Link Here
79
79
80
static struct vdso_timekeep *tk;
80
static struct vdso_timekeep *tk;
81
81
82
#pragma weak __vdso_gettimeofday
83
int
82
int
84
__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
83
__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
85
{
84
{
Lines 102-108 __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) Link Here
102
	return (0);
101
	return (0);
103
}
102
}
104
103
105
#pragma weak __vdso_clock_gettime
106
int
104
int
107
__vdso_clock_gettime(clockid_t clock_id, struct timespec *ts)
105
__vdso_clock_gettime(clockid_t clock_id, struct timespec *ts)
108
{
106
{
(-)b/lib/libc/sys/trivial-vdso_tc.c (-2 lines)
Lines 31-37 __FBSDID("$FreeBSD$"); Link Here
31
#include <sys/vdso.h>
31
#include <sys/vdso.h>
32
#include <errno.h>
32
#include <errno.h>
33
33
34
#pragma weak __vdso_gettc
35
u_int
34
u_int
36
__vdso_gettc(const struct vdso_timehands *th)
35
__vdso_gettc(const struct vdso_timehands *th)
37
{
36
{
Lines 39-45 __vdso_gettc(const struct vdso_timehands *th) Link Here
39
	return (0);
38
	return (0);
40
}
39
}
41
40
42
#pragma weak __vdso_gettimekeep
43
int
41
int
44
__vdso_gettimekeep(struct vdso_timekeep **tk)
42
__vdso_gettimekeep(struct vdso_timekeep **tk)
45
{
43
{
(-)b/share/man/man9/fpu_kern.9 (+10 lines)
Lines 120-125 could be used from both kernel thread and syscall contexts. Link Here
120
The
120
The
121
.Fn fpu_kern_leave
121
.Fn fpu_kern_leave
122
function correctly handles such contexts.
122
function correctly handles such contexts.
123
.It Dv FPU_KERN_NOCTX
124
Avoid nesting save area.
125
If the flag is specified, the
126
.Fa ctx
127
must be passed as
128
.Va NULL .
129
The flag should only be used for really short code blocks
130
which can be executed in a critical section.
131
It avoids the need to allocate the FPU context by the cost
132
of increased system latency.
123
.El
133
.El
124
.El
134
.El
125
.Pp
135
.Pp
(-)b/sys/amd64/amd64/fpu.c (-3 / +42 lines)
Lines 631-636 fpudna(void) Link Here
631
	 */
631
	 */
632
	critical_enter();
632
	critical_enter();
633
633
634
	KASSERT((curpcb->pcb_flags & PCB_FPUNOSAVE) == 0,
635
	    ("fpudna while in fpu_kern_enter(FPU_KERN_NOCTX)"));
634
	if (PCPU_GET(fpcurthread) == curthread) {
636
	if (PCPU_GET(fpcurthread) == curthread) {
635
		printf("fpudna: fpcurthread == curthread\n");
637
		printf("fpudna: fpcurthread == curthread\n");
636
		stop_emulating();
638
		stop_emulating();
Lines 661-667 fpudna(void) Link Here
661
		 * fpu_initialstate, to ignite the XSAVEOPT
663
		 * fpu_initialstate, to ignite the XSAVEOPT
662
		 * tracking engine.
664
		 * tracking engine.
663
		 */
665
		 */
664
		bcopy(fpu_initialstate, curpcb->pcb_save, cpu_max_ext_state_size);
666
		bcopy(fpu_initialstate, curpcb->pcb_save,
667
		    cpu_max_ext_state_size);
665
		fpurestore(curpcb->pcb_save);
668
		fpurestore(curpcb->pcb_save);
666
		if (curpcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
669
		if (curpcb->pcb_initial_fpucw != __INITIAL_FPUCW__)
667
			fldcw(curpcb->pcb_initial_fpucw);
670
			fldcw(curpcb->pcb_initial_fpucw);
Lines 959-969 fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) Link Here
959
{
962
{
960
	struct pcb *pcb;
963
	struct pcb *pcb;
961
964
965
	KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL,
966
	    ("ctx is required when !FPU_KERN_NOCTX"));
967
	pcb = td->td_pcb;
968
	KASSERT((pcb->pcb_flags & PCB_FPUNOSAVE) == 0,
969
	    ("recursive fpu_kern_enter while in PCB_FPUNOSAVE state"));
970
	if ((flags & FPU_KERN_NOCTX) != 0) {
971
		critical_enter();
972
		stop_emulating();
973
		if (curthread == PCPU_GET(fpcurthread)) {
974
			fpusave(curpcb->pcb_save);
975
			PCPU_SET(fpcurthread, NULL);
976
		} else {
977
			KASSERT(PCPU_GET(fpcurthread) == NULL,
978
			    ("invalid fpcurthread"));
979
		}
980
981
		/*
982
		 * This breaks XSAVEOPT tracker, but
983
		 * PCB_FPUNOSAVE state is supposed to never need to
984
		 * save FPU context at all.
985
		 */
986
		fpurestore(fpu_initialstate);
987
		set_pcb_flags(pcb, PCB_KERNFPU | PCB_FPUNOSAVE |
988
		    PCB_FPUINITDONE);
989
		return (0);
990
	}
962
	if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
991
	if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) {
963
		ctx->flags = FPU_KERN_CTX_DUMMY;
992
		ctx->flags = FPU_KERN_CTX_DUMMY;
964
		return (0);
993
		return (0);
965
	}
994
	}
966
	pcb = td->td_pcb;
967
	KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
995
	KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save ==
968
	    get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
996
	    get_pcb_user_save_pcb(pcb), ("mangled pcb_save"));
969
	ctx->flags = 0;
997
	ctx->flags = 0;
Lines 982-996 fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) Link Here
982
{
1010
{
983
	struct pcb *pcb;
1011
	struct pcb *pcb;
984
1012
1013
	pcb = td->td_pcb;
1014
	if ((pcb->pcb_flags & PCB_FPUNOSAVE) != 0) {
1015
		KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX"));
1016
		KASSERT(PCPU_GET(fpcurthread) == NULL,
1017
		    ("non-NULL fpcurthread for PCB_FPUNOSAVE"));
1018
		CRITICAL_ASSERT(td);
1019
		clear_pcb_flags(pcb,  PCB_FPUNOSAVE | PCB_FPUINITDONE);
1020
		start_emulating();
1021
		critical_exit();
1022
		goto restore_flags;
1023
	}
985
	if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
1024
	if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0)
986
		return (0);
1025
		return (0);
987
	KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
1026
	KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx"));
988
	pcb = td->td_pcb;
989
	critical_enter();
1027
	critical_enter();
990
	if (curthread == PCPU_GET(fpcurthread))
1028
	if (curthread == PCPU_GET(fpcurthread))
991
		fpudrop();
1029
		fpudrop();
992
	critical_exit();
1030
	critical_exit();
993
	pcb->pcb_save = ctx->prev;
1031
	pcb->pcb_save = ctx->prev;
1032
restore_flags:
994
	if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
1033
	if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) {
995
		if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
1034
		if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) {
996
			set_pcb_flags(pcb, PCB_FPUINITDONE);
1035
			set_pcb_flags(pcb, PCB_FPUINITDONE);
(-)b/sys/amd64/amd64/initcpu.c (+5 lines)
Lines 90-95 static void Link Here
90
init_amd(void)
90
init_amd(void)
91
{
91
{
92
92
93
	if (CPUID_TO_FAMILY(cpu_id) == 0x9) {
94
		if ((cpu_feature2 & CPUID2_HV) == 0)
95
			wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) | (1 << 6));
96
	}
97
93
	/*
98
	/*
94
	 * Work around Erratum 721 for Family 10h and 12h processors.
99
	 * Work around Erratum 721 for Family 10h and 12h processors.
95
	 * These processors may incorrectly update the stack pointer
100
	 * These processors may incorrectly update the stack pointer
(-)b/sys/amd64/include/fpu.h (+1 lines)
Lines 86-91 void fpu_save_area_reset(struct savefpu *fsa); Link Here
86
#define	FPU_KERN_NORMAL	0x0000
86
#define	FPU_KERN_NORMAL	0x0000
87
#define	FPU_KERN_NOWAIT	0x0001
87
#define	FPU_KERN_NOWAIT	0x0001
88
#define	FPU_KERN_KTHR	0x0002
88
#define	FPU_KERN_KTHR	0x0002
89
#define	FPU_KERN_NOCTX	0x0004
89
90
90
#endif
91
#endif
91
92
(-)b/sys/amd64/include/pcb.h (+1 lines)
Lines 79-84 struct pcb { Link Here
79
#define	PCB_FPUINITDONE	0x08	/* fpu state is initialized */
79
#define	PCB_FPUINITDONE	0x08	/* fpu state is initialized */
80
#define	PCB_USERFPUINITDONE 0x10 /* fpu user state is initialized */
80
#define	PCB_USERFPUINITDONE 0x10 /* fpu user state is initialized */
81
#define	PCB_32BIT	0x40	/* process has 32 bit context (segs etc) */
81
#define	PCB_32BIT	0x40	/* process has 32 bit context (segs etc) */
82
#define	PCB_FPUNOSAVE	0x80	/* no save area for current FPU ctx */
82
83
83
	uint16_t	pcb_initial_fpucw;
84
	uint16_t	pcb_initial_fpucw;
84
85
(-)b/sys/dev/drm2/i915/intel_iic.c (+1 lines)
Lines 142-147 intel_iic_reset(struct drm_device *dev) Link Here
142
142
143
	dev_priv = dev->dev_private;
143
	dev_priv = dev->dev_private;
144
	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
144
	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
145
	I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
145
}
146
}
146
147
147
static int
148
static int
(-)b/sys/fs/msdosfs/msdosfs_fat.c (-1 / +4 lines)
Lines 689-696 chainalloc(struct msdosfsmount *pmp, u_long start, u_long count, Link Here
689
		pmp->pm_nxtfree = CLUST_FIRST;
689
		pmp->pm_nxtfree = CLUST_FIRST;
690
	pmp->pm_flags |= MSDOSFS_FSIMOD;
690
	pmp->pm_flags |= MSDOSFS_FSIMOD;
691
	error = fatchain(pmp, start, count, fillwith);
691
	error = fatchain(pmp, start, count, fillwith);
692
	if (error != 0)
692
	if (error != 0) {
693
		for (cl = start, n = count; n-- > 0;)
694
			usemap_free(pmp, cl++);
693
		return (error);
695
		return (error);
696
	}
694
#ifdef MSDOSFS_DEBUG
697
#ifdef MSDOSFS_DEBUG
695
	printf("clusteralloc(): allocated cluster chain at %lu (%lu clusters)\n",
698
	printf("clusteralloc(): allocated cluster chain at %lu (%lu clusters)\n",
696
	    start, count);
699
	    start, count);
(-)b/sys/i386/i386/pmap.c (-9 / +13 lines)
Lines 3387-3397 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, Link Here
3387
	PMAP_LOCK(pmap);
3387
	PMAP_LOCK(pmap);
3388
	sched_pin();
3388
	sched_pin();
3389
3389
3390
	/*
3390
	pde = pmap_pde(pmap, va);
3391
	 * In the case that a page table page is not
3391
	if ((*pde & PG_PS) != 0) {
3392
	 * resident, we are creating it here.
3392
		/* PG_V is asserted by pmap_demote_pde */
3393
	 */
3393
		pmap_demote_pde(pmap, pde, va);
3394
	if (va < VM_MAXUSER_ADDRESS) {
3394
		if (va < VM_MAXUSER_ADDRESS) {
3395
			mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
3396
			mpte->wire_count++;
3397
		}
3398
	} else if (va < VM_MAXUSER_ADDRESS) {
3399
		/*
3400
		 * In the case that a page table page is not resident,
3401
		 * we are creating it here.
3402
		 */
3395
		mpte = pmap_allocpte(pmap, va, flags);
3403
		mpte = pmap_allocpte(pmap, va, flags);
3396
		if (mpte == NULL) {
3404
		if (mpte == NULL) {
3397
			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
3405
			KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
Lines 3402-3411 pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, Link Here
3402
			return (KERN_RESOURCE_SHORTAGE);
3410
			return (KERN_RESOURCE_SHORTAGE);
3403
		}
3411
		}
3404
	}
3412
	}
3405
3406
	pde = pmap_pde(pmap, va);
3407
	if ((*pde & PG_PS) != 0)
3408
		panic("pmap_enter: attempted pmap_enter on 4MB page");
3409
	pte = pmap_pte_quick(pmap, va);
3413
	pte = pmap_pte_quick(pmap, va);
3410
3414
3411
	/*
3415
	/*
(-)b/sys/kern/kern_proc.c (+7 lines)
Lines 3001-3006 resume_all_proc(void) Link Here
3001
3001
3002
	cp = curproc;
3002
	cp = curproc;
3003
	sx_xlock(&allproc_lock);
3003
	sx_xlock(&allproc_lock);
3004
again:
3004
	LIST_REMOVE(cp, p_list);
3005
	LIST_REMOVE(cp, p_list);
3005
	LIST_INSERT_HEAD(&allproc, cp, p_list);
3006
	LIST_INSERT_HEAD(&allproc, cp, p_list);
3006
	for (;;) {
3007
	for (;;) {
Lines 3021-3026 resume_all_proc(void) Link Here
3021
			PROC_UNLOCK(p);
3022
			PROC_UNLOCK(p);
3022
		}
3023
		}
3023
	}
3024
	}
3025
	/*  Did the loop above missed any stopped process ? */
3026
	LIST_FOREACH(p, &allproc, p_list) {
3027
		/* No need for proc lock. */
3028
		if ((p->p_flag & P_TOTAL_STOP) != 0)
3029
			goto again;
3030
	}
3024
	sx_xunlock(&allproc_lock);
3031
	sx_xunlock(&allproc_lock);
3025
}
3032
}
3026
3033
(-)b/sys/kern/kern_timeout.c (-3 / +3 lines)
Lines 1237-1243 again: Link Here
1237
			CC_UNLOCK(cc);
1237
			CC_UNLOCK(cc);
1238
			if (sq_locked)
1238
			if (sq_locked)
1239
				sleepq_release(&cc_exec_waiting(cc, direct));
1239
				sleepq_release(&cc_exec_waiting(cc, direct));
1240
			return (0);
1240
			return (not_on_a_list);
1241
		}
1241
		}
1242
1242
1243
		if (safe) {
1243
		if (safe) {
Lines 1352-1364 again: Link Here
1352
			CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1352
			CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
1353
			    c, c->c_func, c->c_arg);
1353
			    c, c->c_func, c->c_arg);
1354
			CC_UNLOCK(cc);
1354
			CC_UNLOCK(cc);
1355
			return (0);
1355
			return (not_on_a_list);
1356
		}
1356
		}
1357
		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1357
		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
1358
		    c, c->c_func, c->c_arg);
1358
		    c, c->c_func, c->c_arg);
1359
		CC_UNLOCK(cc);
1359
		CC_UNLOCK(cc);
1360
		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1360
		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
1361
		return (0);
1361
		return (not_on_a_list);
1362
	}
1362
	}
1363
	if (sq_locked)
1363
	if (sq_locked)
1364
		sleepq_release(&cc_exec_waiting(cc, direct));
1364
		sleepq_release(&cc_exec_waiting(cc, direct));
(-)b/sys/nlm/nlm_advlock.c (-5 / +9 lines)
Lines 210-216 nlm_advlock_internal(struct vnode *vp, void *id, int op, struct flock *fl, Link Here
210
	struct rpc_callextra ext;
210
	struct rpc_callextra ext;
211
	struct nlm_feedback_arg nf;
211
	struct nlm_feedback_arg nf;
212
	AUTH *auth;
212
	AUTH *auth;
213
	struct ucred *cred;
213
	struct ucred *cred, *cred1;
214
	struct nlm_file_svid *ns;
214
	struct nlm_file_svid *ns;
215
	int svid;
215
	int svid;
216
	int error;
216
	int error;
Lines 240-254 nlm_advlock_internal(struct vnode *vp, void *id, int op, struct flock *fl, Link Here
240
	else
240
	else
241
		retries = INT_MAX;
241
		retries = INT_MAX;
242
242
243
	if (unlock_vp)
244
		VOP_UNLOCK(vp, 0);
245
246
	/*
243
	/*
247
	 * We need to switch to mount-point creds so that we can send
244
	 * We need to switch to mount-point creds so that we can send
248
	 * packets from a privileged port.
245
	 * packets from a privileged port.  Reference mnt_cred and
246
	 * switch to them before unlocking the vnode, since mount
247
	 * point could be unmounted right after unlock.
249
	 */
248
	 */
250
	cred = td->td_ucred;
249
	cred = td->td_ucred;
251
	td->td_ucred = vp->v_mount->mnt_cred;
250
	td->td_ucred = vp->v_mount->mnt_cred;
251
	crhold(td->td_ucred);
252
	if (unlock_vp)
253
		VOP_UNLOCK(vp, 0);
252
254
253
	host = nlm_find_host_by_name(servername, sa, vers);
255
	host = nlm_find_host_by_name(servername, sa, vers);
254
	auth = authunix_create(cred);
256
	auth = authunix_create(cred);
Lines 373-379 nlm_advlock_internal(struct vnode *vp, void *id, int op, struct flock *fl, Link Here
373
	if (ns)
375
	if (ns)
374
		nlm_free_svid(ns);
376
		nlm_free_svid(ns);
375
377
378
	cred1 = td->td_ucred;
376
	td->td_ucred = cred;
379
	td->td_ucred = cred;
380
	crfree(cred1);
377
	AUTH_DESTROY(auth);
381
	AUTH_DESTROY(auth);
378
382
379
	nlm_host_release(host);
383
	nlm_host_release(host);
(-)b/sys/sys/vdso.h (+5 lines)
Lines 60-65 struct timespec; Link Here
60
struct timeval;
60
struct timeval;
61
struct timezone;
61
struct timezone;
62
62
63
#pragma weak __vdso_clock_gettime
64
#pragma weak __vdso_gettimeofday
65
#pragma weak __vdso_gettc
66
#pragma weak __vdso_gettimekeep
67
63
int __vdso_clock_gettime(clockid_t clock_id, struct timespec *ts);
68
int __vdso_clock_gettime(clockid_t clock_id, struct timespec *ts);
64
int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
69
int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
65
u_int __vdso_gettc(const struct vdso_timehands *vdso_th);
70
u_int __vdso_gettc(const struct vdso_timehands *vdso_th);
(-)b/sys/ufs/ffs/ffs_vnops.c (-3 / +5 lines)
Lines 730-739 ffs_write(ap) Link Here
730
			vnode_pager_setsize(vp, uio->uio_offset + xfersize);
730
			vnode_pager_setsize(vp, uio->uio_offset + xfersize);
731
731
732
		/*
732
		/*
733
		 * We must perform a read-before-write if the transfer size
733
		 * We must perform a read-before-write if the transfer
734
		 * does not cover the entire buffer.
734
		 * size does not cover the entire buffer or the valid
735
		 * part of the last buffer for the file.
735
		 */
736
		 */
736
		if (fs->fs_bsize > xfersize)
737
		if (fs->fs_bsize > xfersize && (blkoffset != 0 ||
738
		    uio->uio_offset + xfersize < ip->i_size))
737
			flags |= BA_CLRBUF;
739
			flags |= BA_CLRBUF;
738
		else
740
		else
739
			flags &= ~BA_CLRBUF;
741
			flags &= ~BA_CLRBUF;
(-)b/sys/vm/memguard.c (+4 lines)
Lines 505-510 memguard_cmp_zone(uma_zone_t zone) Link Here
505
	    zone->uz_flags & UMA_ZONE_NOFREE)
505
	    zone->uz_flags & UMA_ZONE_NOFREE)
506
		return (0);
506
		return (0);
507
507
508
	 if (zone->uz_link.kl_keg != NULL &&
509
	     (zone->uz_link.kl_keg->uk_flags & UMA_ZFLAG_CACHEONLY) != 0)
510
		 return (0);
511
508
	if (memguard_cmp(zone->uz_size))
512
	if (memguard_cmp(zone->uz_size))
509
		return (1);
513
		return (1);
510
514
(-)b/sys/vm/vm_fault.c (-26 / +43 lines)
Lines 1294-1323 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, Link Here
1294
	src_object = src_entry->object.vm_object;
1294
	src_object = src_entry->object.vm_object;
1295
	src_pindex = OFF_TO_IDX(src_entry->offset);
1295
	src_pindex = OFF_TO_IDX(src_entry->offset);
1296
1296
1297
	KASSERT(upgrade || dst_entry->object.vm_object == NULL,
1298
	    ("vm_fault_copy_entry: vm_object not NULL"));
1297
	if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1299
	if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) {
1298
		dst_object = src_object;
1300
		dst_object = src_object;
1299
		vm_object_reference(dst_object);
1301
		vm_object_reference(dst_object);
1300
	} else {
1302
	} else {
1301
		/*
1303
		/*
1302
		 * Create the top-level object for the destination entry. (Doesn't
1304
		 * Create the top-level object for the destination
1303
		 * actually shadow anything - we copy the pages directly.)
1305
		 * entry. (Doesn't actually shadow anything - we copy
1306
		 * the pages directly.)
1304
		 */
1307
		 */
1305
		dst_object = vm_object_allocate(OBJT_DEFAULT,
1308
		vm_object_shadow(&dst_entry->object.vm_object,
1306
		    OFF_TO_IDX(dst_entry->end - dst_entry->start));
1309
		    &dst_entry->offset, OFF_TO_IDX(dst_entry->end -
1310
		    dst_entry->start));
1311
		dst_object = dst_entry->object.vm_object;
1307
#if VM_NRESERVLEVEL > 0
1312
#if VM_NRESERVLEVEL > 0
1308
		dst_object->flags |= OBJ_COLORED;
1313
		if (dst_object != src_object) {
1309
		dst_object->pg_color = atop(dst_entry->start);
1314
			dst_object->flags |= OBJ_COLORED;
1315
			dst_object->pg_color = atop(dst_entry->start);
1316
		}
1310
#endif
1317
#endif
1318
1319
		/*
1320
		 * If not an upgrade, then enter the mappings in the
1321
		 * pmap as read and/or execute accesses.  Otherwise,
1322
		 * enter them as write accesses.
1323
		 *
1324
		 * A writeable large page mapping is only created if
1325
		 * all of the constituent small page mappings are
1326
		 * modified. Marking PTEs as modified on inception
1327
		 * allows promotion to happen without taking
1328
		 * potentially large number of soft faults.
1329
		 */
1330
		access &= ~VM_PROT_WRITE;
1311
	}
1331
	}
1332
	/*
1333
	 * dst_entry->offset is either left unchanged in the upgrade
1334
	 * case, or vm_object_shadow takes care of recalculating the
1335
	 * offset depending on creation of the new object.
1336
	 */
1312
1337
1313
	VM_OBJECT_WLOCK(dst_object);
1338
	/*
1314
	KASSERT(upgrade || dst_entry->object.vm_object == NULL,
1339
	 * This can only happen for upgrade case, due to src_object
1315
	    ("vm_fault_copy_entry: vm_object not NULL"));
1340
	 * reference bump above, and it means that all pages are
1316
	if (src_object != dst_object) {
1341
	 * private already.
1317
		dst_entry->object.vm_object = dst_object;
1342
	 */
1318
		dst_entry->offset = 0;
1343
	if (dst_object == src_object &&
1319
		dst_object->charge = dst_entry->end - dst_entry->start;
1344
	    (src_entry->protection & VM_PROT_WRITE) == 0) {
1345
		KASSERT(upgrade, ("XXX"));
1346
		goto uncow;
1320
	}
1347
	}
1348
1349
	VM_OBJECT_WLOCK(dst_object);
1321
	if (fork_charge != NULL) {
1350
	if (fork_charge != NULL) {
1322
		KASSERT(dst_entry->cred == NULL,
1351
		KASSERT(dst_entry->cred == NULL,
1323
		    ("vm_fault_copy_entry: leaked swp charge"));
1352
		    ("vm_fault_copy_entry: leaked swp charge"));
Lines 1332-1350 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, Link Here
1332
	}
1361
	}
1333
1362
1334
	/*
1363
	/*
1335
	 * If not an upgrade, then enter the mappings in the pmap as
1336
	 * read and/or execute accesses.  Otherwise, enter them as
1337
	 * write accesses.
1338
	 *
1339
	 * A writeable large page mapping is only created if all of
1340
	 * the constituent small page mappings are modified. Marking
1341
	 * PTEs as modified on inception allows promotion to happen
1342
	 * without taking potentially large number of soft faults.
1343
	 */
1344
	if (!upgrade)
1345
		access &= ~VM_PROT_WRITE;
1346
1347
	/*
1348
	 * Loop through all of the virtual pages within the entry's
1364
	 * Loop through all of the virtual pages within the entry's
1349
	 * range, copying each page from the source object to the
1365
	 * range, copying each page from the source object to the
1350
	 * destination object.  Since the source is wired, those pages
1366
	 * destination object.  Since the source is wired, those pages
Lines 1451-1456 again: Link Here
1451
	}
1467
	}
1452
	VM_OBJECT_WUNLOCK(dst_object);
1468
	VM_OBJECT_WUNLOCK(dst_object);
1453
	if (upgrade) {
1469
	if (upgrade) {
1470
uncow:
1454
		dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
1471
		dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY);
1455
		vm_object_deallocate(src_object);
1472
		vm_object_deallocate(src_object);
1456
	}
1473
	}
(-)b/sys/vm/vm_object.c (-14 / +55 lines)
Lines 2102-2116 boolean_t Link Here
2102
vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2102
vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
2103
    vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2103
    vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2104
{
2104
{
2105
	vm_pindex_t next_pindex;
2105
	vm_object_t shadow_object;
2106
	vm_page_t m;
2107
	vm_pindex_t next_pindex, pi;
2108
	boolean_t ret;
2106
2109
2107
	if (prev_object == NULL)
2110
	if (prev_object == NULL)
2108
		return (TRUE);
2111
		return (TRUE);
2112
	ret = FALSE;
2109
	VM_OBJECT_WLOCK(prev_object);
2113
	VM_OBJECT_WLOCK(prev_object);
2110
	if ((prev_object->type != OBJT_DEFAULT &&
2114
	if ((prev_object->type != OBJT_DEFAULT &&
2111
	    prev_object->type != OBJT_SWAP) ||
2115
	    prev_object->type != OBJT_SWAP) ||
2112
	    (prev_object->flags & OBJ_TMPFS_NODE) != 0) {
2116
	    (prev_object->flags & OBJ_TMPFS_NODE) != 0) {
2113
		VM_OBJECT_WUNLOCK(prev_object);
2114
		return (FALSE);
2117
		return (FALSE);
2115
	}
2118
	}
2116
2119
Lines 2120-2143 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, Link Here
2120
	vm_object_collapse(prev_object);
2123
	vm_object_collapse(prev_object);
2121
2124
2122
	/*
2125
	/*
2123
	 * Can't coalesce if: . more than one reference . paged out . shadows
2126
	 * Can't coalesce if shadows another object, which means that
2124
	 * another object . has a copy elsewhere (any of which mean that the
2127
	 * the pages not mapped to prev_entry may be in use anyway.
2125
	 * pages not mapped to prev_entry may be in use anyway)
2126
	 */
2128
	 */
2127
	if (prev_object->backing_object != NULL) {
2129
	if (prev_object->backing_object != NULL)
2128
		VM_OBJECT_WUNLOCK(prev_object);
2130
		goto out;
2129
		return (FALSE);
2130
	}
2131
2131
2132
	prev_size >>= PAGE_SHIFT;
2132
	prev_size >>= PAGE_SHIFT;
2133
	next_size >>= PAGE_SHIFT;
2133
	next_size >>= PAGE_SHIFT;
2134
	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2134
	next_pindex = OFF_TO_IDX(prev_offset) + prev_size;
2135
2135
2136
	if ((prev_object->ref_count > 1) &&
2136
	/*
2137
	    (prev_object->size != next_pindex)) {
2137
	 * If object has more than one reference or is larger than the
2138
		VM_OBJECT_WUNLOCK(prev_object);
2138
	 * end of the previous mapping, still allow coalescing map
2139
		return (FALSE);
2139
	 * entries for the case when this is due to other mappings of
2140
	 * the object into the current address space.
2141
	 */
2142
	if (prev_object->ref_count > 1 && prev_object->size != next_pindex) {
2143
		/*
2144
		 * Only one mapping allowed, otherwise coalesce could
2145
		 * result in the contradictory content in the regions.
2146
		 */
2147
		if ((prev_object->flags & OBJ_ONEMAPPING) == 0)
2148
			goto out;
2149
2150
		/* No pages in the region, either resident ... */
2151
		m = vm_page_find_least(prev_object, next_pindex);
2152
		if (m != NULL && m->pindex < next_pindex + next_size)
2153
			goto out;
2154
		/* ... or swapped out. */
2155
		if (prev_object->type == OBJT_SWAP) {
2156
			for (pi = next_pindex; pi < next_pindex + next_size;
2157
			    pi++) {
2158
				if (vm_pager_has_page(prev_object, pi, NULL,
2159
				    NULL))
2160
					goto out;
2161
			}
2162
		}
2163
2164
		/*
2165
		 * Region must be not shadowed, otherwise the
2166
		 * instantiated page in the our (backing) object could
2167
		 * leak to the shadow.
2168
		 */
2169
		LIST_FOREACH(shadow_object, &prev_object->shadow_head,
2170
		    shadow_list) {
2171
			KASSERT(shadow_object->backing_object == prev_object,
2172
			    ("corrupted shadow"));
2173
			if (shadow_object->backing_object_offset <
2174
			    next_pindex + next_size &&
2175
			    shadow_object->backing_object_offset +
2176
			    shadow_object->size > next_pindex)
2177
				goto out;
2178
		}
2140
	}
2179
	}
2180
	ret = TRUE;
2141
2181
2142
	/*
2182
	/*
2143
	 * Account for the charge.
2183
	 * Account for the charge.
Lines 2189-2196 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, Link Here
2189
	if (next_pindex + next_size > prev_object->size)
2229
	if (next_pindex + next_size > prev_object->size)
2190
		prev_object->size = next_pindex + next_size;
2230
		prev_object->size = next_pindex + next_size;
2191
2231
2232
out:
2192
	VM_OBJECT_WUNLOCK(prev_object);
2233
	VM_OBJECT_WUNLOCK(prev_object);
2193
	return (TRUE);
2234
	return (ret);
2194
}
2235
}
2195
2236
2196
void
2237
void
(-)b/sys/vm/vm_page.h (+1 lines)
Lines 227-232 struct vm_domain { Link Here
227
	long vmd_segs;	/* bitmask of the segments */
227
	long vmd_segs;	/* bitmask of the segments */
228
	boolean_t vmd_oom;
228
	boolean_t vmd_oom;
229
	int vmd_pass;	/* local pagedaemon pass */
229
	int vmd_pass;	/* local pagedaemon pass */
230
	int vmd_oom_seq;
230
	struct vm_page vmd_marker; /* marker for pagedaemon private use */
231
	struct vm_page vmd_marker; /* marker for pagedaemon private use */
231
};
232
};
232
233
(-)b/sys/vm/vm_pageout.c (-14 / +40 lines)
Lines 121-127 static void vm_pageout_init(void); Link Here
121
static int vm_pageout_clean(vm_page_t m);
121
static int vm_pageout_clean(vm_page_t m);
122
static int vm_pageout_cluster(vm_page_t m);
122
static int vm_pageout_cluster(vm_page_t m);
123
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
123
static void vm_pageout_scan(struct vm_domain *vmd, int pass);
124
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass);
124
static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
125
    int starting_page_shortage);
125
126
126
SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
127
SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init,
127
    NULL);
128
    NULL);
Lines 158-163 int vm_pages_needed; /* Event on which pageout daemon sleeps */ Link Here
158
int vm_pageout_deficit;		/* Estimated number of pages deficit */
159
int vm_pageout_deficit;		/* Estimated number of pages deficit */
159
int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
160
int vm_pageout_pages_needed;	/* flag saying that the pageout daemon needs pages */
160
int vm_pageout_wakeup_thresh;
161
int vm_pageout_wakeup_thresh;
162
static int vm_pageout_oom_seq = 24;
161
163
162
#if !defined(NO_SWAPPING)
164
#if !defined(NO_SWAPPING)
163
static int vm_pageout_req_swapout;	/* XXX */
165
static int vm_pageout_req_swapout;	/* XXX */
Lines 223-228 static int pageout_lock_miss; Link Here
223
SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
225
SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
224
	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
226
	CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
225
227
228
SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq,
229
	CTLFLAG_RW, &vm_pageout_oom_seq, 0,
230
	"side-to-side calls to oom detector to start OOM");
231
226
#define VM_PAGEOUT_PAGE_COUNT 16
232
#define VM_PAGEOUT_PAGE_COUNT 16
227
int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
233
int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
228
234
Lines 1028-1034 vm_pageout_scan(struct vm_domain *vmd, int pass) Link Here
1028
	vm_page_t m, next;
1034
	vm_page_t m, next;
1029
	struct vm_pagequeue *pq;
1035
	struct vm_pagequeue *pq;
1030
	vm_object_t object;
1036
	vm_object_t object;
1031
	int act_delta, addl_page_shortage, deficit, maxscan, page_shortage;
1037
	int act_delta, addl_page_shortage, deficit, maxscan;
1038
	int page_shortage, starting_page_shortage;
1032
	int vnodes_skipped = 0;
1039
	int vnodes_skipped = 0;
1033
	int maxlaunder;
1040
	int maxlaunder;
1034
	boolean_t queues_locked;
1041
	boolean_t queues_locked;
Lines 1069-1074 vm_pageout_scan(struct vm_domain *vmd, int pass) Link Here
1069
		page_shortage = vm_paging_target() + deficit;
1076
		page_shortage = vm_paging_target() + deficit;
1070
	} else
1077
	} else
1071
		page_shortage = deficit = 0;
1078
		page_shortage = deficit = 0;
1079
	starting_page_shortage = page_shortage;
1072
1080
1073
	/*
1081
	/*
1074
	 * maxlaunder limits the number of dirty pages we flush per scan.
1082
	 * maxlaunder limits the number of dirty pages we flush per scan.
Lines 1337-1342 relock_queues: Link Here
1337
		(void)speedup_syncer();
1345
		(void)speedup_syncer();
1338
1346
1339
	/*
1347
	/*
1348
	 * If we are critically low on one of RAM or swap and low on
1349
	 * the other, kill the largest process.  However, we avoid
1350
	 * doing this on the first pass in order to give ourselves a
1351
	 * chance to flush out dirty vnode-backed pages and to allow
1352
	 * active pages to be moved to the inactive queue and reclaimed.
1353
	 */
1354
	vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage);
1355
1356
	/*
1340
	 * Compute the number of pages we want to try to move from the
1357
	 * Compute the number of pages we want to try to move from the
1341
	 * active queue to the inactive queue.
1358
	 * active queue to the inactive queue.
1342
	 */
1359
	 */
Lines 1445-1459 relock_queues: Link Here
1445
		}
1462
		}
1446
	}
1463
	}
1447
#endif
1464
#endif
1448
1449
	/*
1450
	 * If we are critically low on one of RAM or swap and low on
1451
	 * the other, kill the largest process.  However, we avoid
1452
	 * doing this on the first pass in order to give ourselves a
1453
	 * chance to flush out dirty vnode-backed pages and to allow
1454
	 * active pages to be moved to the inactive queue and reclaimed.
1455
	 */
1456
	vm_pageout_mightbe_oom(vmd, pass);
1457
}
1465
}
1458
1466
1459
static int vm_pageout_oom_vote;
1467
static int vm_pageout_oom_vote;
Lines 1464-1481 static int vm_pageout_oom_vote; Link Here
1464
 * failed to reach free target is premature.
1472
 * failed to reach free target is premature.
1465
 */
1473
 */
1466
static void
1474
static void
1467
vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass)
1475
vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage,
1476
    int starting_page_shortage)
1468
{
1477
{
1469
	int old_vote;
1478
	int old_vote;
1470
1479
1471
	if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) ||
1480
	if (starting_page_shortage <= 0 || starting_page_shortage !=
1472
	    (swap_pager_full && vm_paging_target() > 0))) {
1481
	    page_shortage) {
1482
#if 0
1483
		if (vmd->vmd_oom_seq != 0)
1484
			printf("CLR oom_seq %d ps %d sps %d\n", vmd->vmd_oom_seq, page_shortage, starting_page_shortage);
1485
#endif
1486
		vmd->vmd_oom_seq = 0;
1487
	} else
1488
		vmd->vmd_oom_seq++;
1489
	if (vmd->vmd_oom_seq < vm_pageout_oom_seq) {
1473
		if (vmd->vmd_oom) {
1490
		if (vmd->vmd_oom) {
1474
			vmd->vmd_oom = FALSE;
1491
			vmd->vmd_oom = FALSE;
1475
			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1492
			atomic_subtract_int(&vm_pageout_oom_vote, 1);
1476
		}
1493
		}
1477
		return;
1494
		return;
1478
	}
1495
	}
1496
#if 0
1497
printf("OOM oom_seq %d ps %d sps %d\n", vmd->vmd_oom_seq, page_shortage, starting_page_shortage);
1498
#endif
1499
1500
	/*
1501
	 * Do not follow the call sequence until OOM condition is
1502
	 * cleared.
1503
	 */
1504
	vmd->vmd_oom_seq = 0;
1479
1505
1480
	if (vmd->vmd_oom)
1506
	if (vmd->vmd_oom)
1481
		return;
1507
		return;
(-)b/sys/vm/vm_reserv.c (+1 lines)
Lines 762-767 vm_reserv_break(vm_reserv_t rv, vm_page_t m) Link Here
762
	} while (i < NPOPMAP);
762
	} while (i < NPOPMAP);
763
	KASSERT(rv->popcnt == 0,
763
	KASSERT(rv->popcnt == 0,
764
	    ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
764
	    ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
765
	rv->pages->psind = 0;
765
	vm_reserv_broken++;
766
	vm_reserv_broken++;
766
}
767
}
767
768

Return to bug 200992