FreeBSD Bugzilla – Attachment 157913 Details for
Bug 200992
proccess won't die in thread_suspend_switch
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Return true from callout_stop() if not_on_a_list is true.
1.patch (text/plain), 52.37 KB, created by
Konstantin Belousov
on 2015-06-21 06:31:56 UTC
(
hide
)
Description:
Return true from callout_stop() if not_on_a_list is true.
Filename:
MIME Type:
Creator:
Konstantin Belousov
Created:
2015-06-21 06:31:56 UTC
Size:
52.37 KB
patch
obsolete
>diff --git a/bin/dd/dd.c b/bin/dd/dd.c >index 8ae11a7..210a61c 100644 >--- a/bin/dd/dd.c >+++ b/bin/dd/dd.c >@@ -254,27 +254,24 @@ getfdtype(IO *io) > int type; > > if (fstat(io->fd, &sb) == -1) >- err(1, "%s", io->name); >+ return; > if (S_ISREG(sb.st_mode)) > io->flags |= ISTRUNC; >- if (S_ISCHR(sb.st_mode) || S_ISBLK(sb.st_mode)) { >- if (ioctl(io->fd, FIODTYPE, &type) == -1) { >- err(1, "%s", io->name); >- } else { >- if (type & D_TAPE) >- io->flags |= ISTAPE; >- else if (type & (D_DISK | D_MEM)) >- io->flags |= ISSEEK; >- if (S_ISCHR(sb.st_mode) && (type & D_TAPE) == 0) >- io->flags |= ISCHR; >- } >+ if (S_ISCHR(sb.st_mode) || S_ISBLK(sb.st_mode)) { >+ if (S_ISCHR(sb.st_mode)) >+ io->flags |= ISCHR; >+ if (ioctl(io->fd, FIODTYPE, &type) == -1) >+ return; >+ if (type & D_TAPE) >+ io->flags |= ISTAPE; >+ else if (type & (D_DISK | D_MEM)) >+ io->flags |= ISSEEK; > return; > } >- errno = 0; >- if (lseek(io->fd, (off_t)0, SEEK_CUR) == -1 && errno == ESPIPE) >- io->flags |= ISPIPE; >- else >+ if (lseek(io->fd, (off_t)0, SEEK_CUR) != -1) > io->flags |= ISSEEK; >+ else if (errno == ESPIPE) >+ io->flags |= ISPIPE; > } > > static void >diff --git a/contrib/gdb/gdb/gdbthread.h b/contrib/gdb/gdb/gdbthread.h >index 09dea26..8d77f77 100644 >--- a/contrib/gdb/gdb/gdbthread.h >+++ b/contrib/gdb/gdb/gdbthread.h >@@ -75,6 +75,8 @@ struct thread_info > struct private_thread_info *private; > }; > >+extern int thread_list_empty (void); >+ > /* Create an empty thread list, or empty the existing one. */ > extern void init_thread_list (void); > >diff --git a/contrib/gdb/gdb/infrun.c b/contrib/gdb/gdb/infrun.c >index e84a4c7..55ce97b 100644 >--- a/contrib/gdb/gdb/infrun.c >+++ b/contrib/gdb/gdb/infrun.c >@@ -384,9 +384,22 @@ follow_inferior_reset_breakpoints (void) > insert_breakpoints (); > } > >+void >+clear_step_resume_breakpoint_thread (void) >+{ >+ if (step_resume_breakpoint) >+ step_resume_breakpoint->thread = -1; >+} >+ >+void >+clear_step_resume_breakpoint (void) >+{ >+ step_resume_breakpoint = NULL; >+} >+ > /* EXECD_PATHNAME is assumed to be non-NULL. */ > >-static void >+void > follow_exec (int pid, char *execd_pathname) > { > int saved_pid = pid; >@@ -1648,7 +1661,8 @@ handle_inferior_event (struct execution_control_state *ecs) > > /* This causes the eventpoints and symbol table to be reset. Must > do this now, before trying to determine whether to stop. */ >- follow_exec (PIDGET (inferior_ptid), pending_follow.execd_pathname); >+ target_follow_exec (PIDGET (inferior_ptid), >+ pending_follow.execd_pathname); > xfree (pending_follow.execd_pathname); > > stop_pc = read_pc_pid (ecs->ptid); >diff --git a/contrib/gdb/gdb/objfiles.c b/contrib/gdb/gdb/objfiles.c >index 6179077..8846628 100644 >--- a/contrib/gdb/gdb/objfiles.c >+++ b/contrib/gdb/gdb/objfiles.c >@@ -482,11 +482,11 @@ free_all_objfiles (void) > { > struct objfile *objfile, *temp; > >+ clear_symtab_users (); > ALL_OBJFILES_SAFE (objfile, temp) > { > free_objfile (objfile); > } >- clear_symtab_users (); > } > > /* Relocate OBJFILE to NEW_OFFSETS. There should be OBJFILE->NUM_SECTIONS >diff --git a/contrib/gdb/gdb/target.c b/contrib/gdb/gdb/target.c >index 1f703dd..e9d3beb 100644 >--- a/contrib/gdb/gdb/target.c >+++ b/contrib/gdb/gdb/target.c >@@ -1307,6 +1307,52 @@ target_async_mask (int mask) > } > > /* Look through the list of possible targets for a target that can >+ follow forks. */ >+ >+int >+target_follow_fork (int follow_child) >+{ >+ struct target_ops *t; >+ >+ for (t = current_target.beneath; t != NULL; t = t->beneath) >+ { >+ if (t->to_follow_fork != NULL) >+ { >+ int retval = t->to_follow_fork (t, follow_child); >+ if (targetdebug) >+ fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n", >+ follow_child, retval); >+ return retval; >+ } >+ } >+ >+ /* Some target returned a fork event, but did not know how to follow it. */ >+ internal_error (__FILE__, __LINE__, >+ "could not find a target to follow fork"); >+} >+ >+void >+target_follow_exec (int pid, char *execd_pathname) >+{ >+ struct target_ops *t; >+ >+ for (t = current_target.beneath; t != NULL; t = t->beneath) >+ { >+ if (t->to_follow_exec != NULL) >+ { >+ t->to_follow_exec (pid, execd_pathname); >+ if (targetdebug) >+ fprintf_unfiltered (gdb_stdlog, "target_follow_exec (%d, %s)\n", >+ pid, execd_pathname); >+ return; >+ } >+ } >+ >+ /* If target does not specify a follow_exec handler, call the default. */ >+ follow_exec (pid, execd_pathname); >+} >+ >+/* Look through the list of possible targets for a target that can > execute a run or attach command without any other data. This is > used to locate the default process stratum. > >@@ -2159,9 +2205,9 @@ debug_to_remove_vfork_catchpoint (int pid) > } > > static int >-debug_to_follow_fork (int follow_child) >+debug_to_follow_fork (struct target_ops* ops, int follow_child) > { >- int retval = debug_target.to_follow_fork (follow_child); >+ int retval = debug_target.to_follow_fork (ops, follow_child); > > fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n", > follow_child, retval); >diff --git a/contrib/gdb/gdb/target.h b/contrib/gdb/gdb/target.h >index 94ea970..87364f9 100644 >--- a/contrib/gdb/gdb/target.h >+++ b/contrib/gdb/gdb/target.h >@@ -362,7 +362,8 @@ struct target_ops > int (*to_remove_fork_catchpoint) (int); > int (*to_insert_vfork_catchpoint) (int); > int (*to_remove_vfork_catchpoint) (int); >- int (*to_follow_fork) (int); >+ int (*to_follow_fork) (struct target_ops*, int); >+ void (*to_follow_exec) (int, char*); > int (*to_insert_exec_catchpoint) (int); > int (*to_remove_exec_catchpoint) (int); > int (*to_reported_exec_events_per_exec_call) (void); >@@ -761,8 +762,7 @@ extern void target_load (char *arg, int from_tty); > This function returns 1 if the inferior should not be resumed > (i.e. there is another event pending). */ > >-#define target_follow_fork(follow_child) \ >- (*current_target.to_follow_fork) (follow_child) >+int target_follow_fork (int follow_child); > > /* On some targets, we can catch an inferior exec event when it > occurs. These functions insert/remove an already-created >@@ -1248,4 +1248,6 @@ extern void push_remote_target (char *name, int from_tty); > /* Blank target vector entries are initialized to target_ignore. */ > void target_ignore (void); > >+void target_follow_exec (int pid, char *execd_pathname); >+ > #endif /* !defined (TARGET_H) */ >diff --git a/contrib/gdb/gdb/thread.c b/contrib/gdb/gdb/thread.c >index f8cc18d..362d871 100644 >--- a/contrib/gdb/gdb/thread.c >+++ b/contrib/gdb/gdb/thread.c >@@ -65,6 +65,12 @@ static void restore_current_thread (ptid_t); > static void switch_to_thread (ptid_t ptid); > static void prune_threads (void); > >+int >+thread_list_empty () >+{ >+ return thread_list == NULL; >+} >+ > void > delete_step_resume_breakpoint (void *arg) > { >diff --git a/gnu/usr.bin/gdb/arch/amd64/Makefile b/gnu/usr.bin/gdb/arch/amd64/Makefile >index a43e894..efd2908 100644 >--- a/gnu/usr.bin/gdb/arch/amd64/Makefile >+++ b/gnu/usr.bin/gdb/arch/amd64/Makefile >@@ -2,7 +2,7 @@ > > GENSRCS+= xm.h > .if !defined(GDB_CROSS_DEBUGGER) >-LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c >+LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c > LIBSRCS+= amd64-nat.c amd64bsd-nat.c amd64fbsd-nat.c > .endif > LIBSRCS+= solib.c solib-svr4.c >diff --git a/gnu/usr.bin/gdb/arch/amd64/init.c b/gnu/usr.bin/gdb/arch/amd64/init.c >index 1b296f4..706ff49 100644 >--- a/gnu/usr.bin/gdb/arch/amd64/init.c >+++ b/gnu/usr.bin/gdb/arch/amd64/init.c >@@ -115,6 +115,7 @@ extern initialize_file_ftype _initialize_tui_out; > extern initialize_file_ftype _initialize_tui_regs; > extern initialize_file_ftype _initialize_tui_stack; > extern initialize_file_ftype _initialize_tui_win; >+extern initialize_file_ftype _initialize_fbsdnat; > void > initialize_all_files (void) > { >@@ -231,4 +232,5 @@ initialize_all_files (void) > _initialize_tui_regs (); > _initialize_tui_stack (); > _initialize_tui_win (); >+ _initialize_fbsdnat (); > } >diff --git a/gnu/usr.bin/gdb/arch/arm/Makefile b/gnu/usr.bin/gdb/arch/arm/Makefile >index 22aee06..131e7a6 100644 >--- a/gnu/usr.bin/gdb/arch/arm/Makefile >+++ b/gnu/usr.bin/gdb/arch/arm/Makefile >@@ -1,7 +1,7 @@ > # $FreeBSD$ > > GENSRCS+= xm.h >-LIBSRCS+= armfbsd-nat.c >+LIBSRCS+= armfbsd-nat.c fbsd-nat.c > LIBSRCS+= arm-tdep.c armfbsd-tdep.c solib.c solib-svr4.c > .if !defined(GDB_CROSS_DEBUGGER) > LIBSRCS+= fbsd-threads.c >diff --git a/gnu/usr.bin/gdb/arch/arm/init.c b/gnu/usr.bin/gdb/arch/arm/init.c >index d4064da..5382cf8 100644 >--- a/gnu/usr.bin/gdb/arch/arm/init.c >+++ b/gnu/usr.bin/gdb/arch/arm/init.c >@@ -113,6 +113,7 @@ extern initialize_file_ftype _initialize_tui_out; > extern initialize_file_ftype _initialize_tui_regs; > extern initialize_file_ftype _initialize_tui_stack; > extern initialize_file_ftype _initialize_tui_win; >+extern initialize_file_ftype _initialize_fbsdnat; > void > initialize_all_files (void) > { >@@ -225,4 +226,5 @@ initialize_all_files (void) > _initialize_tui_regs (); > _initialize_tui_stack (); > _initialize_tui_win (); >+ _initialize_fbsdnat (); > } >diff --git a/gnu/usr.bin/gdb/arch/i386/Makefile b/gnu/usr.bin/gdb/arch/i386/Makefile >index 3d96278..21631b7 100644 >--- a/gnu/usr.bin/gdb/arch/i386/Makefile >+++ b/gnu/usr.bin/gdb/arch/i386/Makefile >@@ -2,7 +2,7 @@ > > GENSRCS+= xm.h > .if !defined(GDB_CROSS_DEBUGGER) >-LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c >+LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c > LIBSRCS+= i386-nat.c i386bsd-nat.c i386fbsd-nat.c > .endif > LIBSRCS+= solib.c solib-svr4.c >diff --git a/gnu/usr.bin/gdb/arch/i386/init.c b/gnu/usr.bin/gdb/arch/i386/init.c >index 7675d8e..f7c90ed 100644 >--- a/gnu/usr.bin/gdb/arch/i386/init.c >+++ b/gnu/usr.bin/gdb/arch/i386/init.c >@@ -116,6 +116,7 @@ extern initialize_file_ftype _initialize_tui_out; > extern initialize_file_ftype _initialize_tui_regs; > extern initialize_file_ftype _initialize_tui_stack; > extern initialize_file_ftype _initialize_tui_win; >+extern initialize_file_ftype _initialize_fbsdnat; > void > initialize_all_files (void) > { >@@ -233,4 +234,5 @@ initialize_all_files (void) > _initialize_tui_regs (); > _initialize_tui_stack (); > _initialize_tui_win (); >+ _initialize_fbsdnat (); > } >diff --git a/gnu/usr.bin/gdb/arch/mips/Makefile b/gnu/usr.bin/gdb/arch/mips/Makefile >index 24e9cfc..9a991ce 100644 >--- a/gnu/usr.bin/gdb/arch/mips/Makefile >+++ b/gnu/usr.bin/gdb/arch/mips/Makefile >@@ -4,7 +4,7 @@ > # XXX Should set DEFAULT_BFD_VEC based on target. > # > .if !defined(GDB_CROSS_DEBUGGER) >-LIBSRCS+= mipsfbsd-nat.c fbsd-threads.c >+LIBSRCS+= fbsd-nat.c mipsfbsd-nat.c fbsd-threads.c > .endif > LIBSRCS+= solib.c solib-svr4.c > LIBSRCS+= mips-tdep.c mipsfbsd-tdep.c fbsd-proc.c >diff --git a/gnu/usr.bin/gdb/arch/mips/init.c b/gnu/usr.bin/gdb/arch/mips/init.c >index 491b2b3..59bed00 100644 >--- a/gnu/usr.bin/gdb/arch/mips/init.c >+++ b/gnu/usr.bin/gdb/arch/mips/init.c >@@ -112,6 +112,7 @@ extern initialize_file_ftype _initialize_tui_out; > extern initialize_file_ftype _initialize_tui_regs; > extern initialize_file_ftype _initialize_tui_stack; > extern initialize_file_ftype _initialize_tui_win; >+extern initialize_file_ftype _initialize_fbsdnat; > void > initialize_all_files (void) > { >@@ -230,4 +231,5 @@ initialize_all_files (void) > _initialize_tui_regs (); > _initialize_tui_stack (); > _initialize_tui_win (); >+ _initialize_fbsdnat (); > } >diff --git a/gnu/usr.bin/gdb/arch/powerpc/Makefile b/gnu/usr.bin/gdb/arch/powerpc/Makefile >index fa41a23..aab143a 100644 >--- a/gnu/usr.bin/gdb/arch/powerpc/Makefile >+++ b/gnu/usr.bin/gdb/arch/powerpc/Makefile >@@ -1,7 +1,7 @@ > # $FreeBSD$ > > .if !defined(GDB_CROSS_DEBUGGER) >-LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c >+LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c > LIBSRCS+= ppcfbsd-nat.c > .endif > LIBSRCS+= solib.c solib-svr4.c >diff --git a/gnu/usr.bin/gdb/arch/powerpc/init.c b/gnu/usr.bin/gdb/arch/powerpc/init.c >index 5b96bd5..69fa43e 100644 >--- a/gnu/usr.bin/gdb/arch/powerpc/init.c >+++ b/gnu/usr.bin/gdb/arch/powerpc/init.c >@@ -113,6 +113,7 @@ extern initialize_file_ftype _initialize_tui_out; > extern initialize_file_ftype _initialize_tui_regs; > extern initialize_file_ftype _initialize_tui_stack; > extern initialize_file_ftype _initialize_tui_win; >+extern initialize_file_ftype _initialize_fbsdnat; > void > initialize_all_files (void) > { >@@ -227,4 +228,5 @@ initialize_all_files (void) > _initialize_tui_regs (); > _initialize_tui_stack (); > _initialize_tui_win (); >+ _initialize_fbsdnat (); > } >diff --git a/gnu/usr.bin/gdb/arch/powerpc64/Makefile b/gnu/usr.bin/gdb/arch/powerpc64/Makefile >index fa41a23..aab143a 100644 >--- a/gnu/usr.bin/gdb/arch/powerpc64/Makefile >+++ b/gnu/usr.bin/gdb/arch/powerpc64/Makefile >@@ -1,7 +1,7 @@ > # $FreeBSD$ > > .if !defined(GDB_CROSS_DEBUGGER) >-LIBSRCS+= fbsd-proc.c fbsd-threads.c gcore.c >+LIBSRCS+= fbsd-nat.c fbsd-proc.c fbsd-threads.c gcore.c > LIBSRCS+= ppcfbsd-nat.c > .endif > LIBSRCS+= solib.c solib-svr4.c >diff --git a/gnu/usr.bin/gdb/arch/powerpc64/init.c b/gnu/usr.bin/gdb/arch/powerpc64/init.c >index 5b96bd5..69fa43e 100644 >--- a/gnu/usr.bin/gdb/arch/powerpc64/init.c >+++ b/gnu/usr.bin/gdb/arch/powerpc64/init.c >@@ -113,6 +113,7 @@ extern initialize_file_ftype _initialize_tui_out; > extern initialize_file_ftype _initialize_tui_regs; > extern initialize_file_ftype _initialize_tui_stack; > extern initialize_file_ftype _initialize_tui_win; >+extern initialize_file_ftype _initialize_fbsdnat; > void > initialize_all_files (void) > { >@@ -227,4 +228,5 @@ initialize_all_files (void) > _initialize_tui_regs (); > _initialize_tui_stack (); > _initialize_tui_win (); >+ _initialize_fbsdnat (); > } >diff --git a/gnu/usr.bin/gdb/arch/sparc64/init.c b/gnu/usr.bin/gdb/arch/sparc64/init.c >index d2cf24e..d19d12f 100644 >--- a/gnu/usr.bin/gdb/arch/sparc64/init.c >+++ b/gnu/usr.bin/gdb/arch/sparc64/init.c >@@ -114,6 +114,7 @@ extern initialize_file_ftype _initialize_tui_out; > extern initialize_file_ftype _initialize_tui_regs; > extern initialize_file_ftype _initialize_tui_stack; > extern initialize_file_ftype _initialize_tui_win; >+extern initialize_file_ftype _initialize_fbsdnat; > void > initialize_all_files (void) > { >@@ -229,4 +230,5 @@ initialize_all_files (void) > _initialize_tui_regs (); > _initialize_tui_stack (); > _initialize_tui_win (); >+ _initialize_fbsdnat (); > } >diff --git a/gnu/usr.bin/gdb/libgdb/fbsd-nat.c b/gnu/usr.bin/gdb/libgdb/fbsd-nat.c >new file mode 100644 >index 0000000..b7e8ffb >--- /dev/null >+++ b/gnu/usr.bin/gdb/libgdb/fbsd-nat.c >@@ -0,0 +1,342 @@ >+/* Native-dependent code for FreeBSD. >+ >+ Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc. >+ >+ This file is part of GDB. >+ >+ This program is free software; you can redistribute it and/or modify >+ it under the terms of the GNU General Public License as published by >+ the Free Software Foundation; either version 2 of the License, or >+ (at your option) any later version. >+ >+ This program is distributed in the hope that it will be useful, >+ but WITHOUT ANY WARRANTY; without even the implied warranty of >+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the >+ GNU General Public License for more details. >+ >+ You should have received a copy of the GNU General Public License >+ along with this program; if not, write to the Free Software >+ Foundation, Inc., 51 Franklin Street, Fifth Floor, >+ Boston, MA 02110-1301, USA. */ >+ >+#include "defs.h" >+#include "inferior.h" >+#include "symfile.h" >+#include "gdbcore.h" >+#include "gdbthread.h" >+#include "gdb_assert.h" >+#include <sys/types.h> >+#include <sys/ptrace.h> >+#include <sys/wait.h> >+ >+extern struct target_ops child_ops; >+void clear_step_resume_breakpoint (void); >+void clear_step_resume_breakpoint_thread (void); >+void (*reactivate_threads) (char*) = NULL; >+void (*disable_threads) (void) = NULL; >+ >+static void (*mourn_inferior_beneath) (void); >+static void (*detach_beneath) (char *args, int from_tty); >+static ptid_t (*wait_beneath) (ptid_t ptid, >+ struct target_waitstatus *ourstatus); >+int follow_event_pid = 0; >+ >+/* Return a the name of file that can be opened to get the symbols for >+ the child process identified by PID. */ >+ >+char * >+fbsd_pid_to_exec_file (int pid) >+{ >+ size_t len = MAXPATHLEN; >+ char *buf = xcalloc (len, sizeof (char)); >+ char *path; >+ >+#ifdef KERN_PROC_PATHNAME >+ int mib[4]; >+ >+ mib[0] = CTL_KERN; >+ mib[1] = KERN_PROC; >+ mib[2] = KERN_PROC_PATHNAME; >+ mib[3] = pid; >+ if (sysctl (mib, 4, buf, &len, NULL, 0) == 0) >+ return buf; >+#endif >+ >+ path = xstrprintf ("/proc/%d/file", pid); >+ if (readlink (path, buf, MAXPATHLEN) == -1) >+ { >+ xfree (buf); >+ buf = NULL; >+ } >+ >+ xfree (path); >+ return buf; >+} >+ >+/* Wait for the child specified by PTID to do something. Return the >+ process ID of the child, or MINUS_ONE_PTID in case of error; store >+ the status in *OURSTATUS. */ >+ >+static ptid_t >+inf_ptrace_wait (ptid_t ptid, struct target_waitstatus *ourstatus) >+{ >+ pid_t pid; >+ int status, save_errno; >+ >+ do >+ { >+ set_sigint_trap (); >+ set_sigio_trap (); >+ do >+ { >+ pid = waitpid (PIDGET (ptid), &status, 0); >+ save_errno = errno; >+ } >+ while (pid == -1 && errno == EINTR); >+ >+ clear_sigio_trap (); >+ clear_sigint_trap (); >+ >+ if (pid == -1) >+ { >+ fprintf_unfiltered (gdb_stderr, >+ _("Child process unexpectedly missing: %s.\n"), >+ safe_strerror (save_errno)); >+ >+ /* Claim it exited with unknown signal. */ >+ ourstatus->kind = TARGET_WAITKIND_SIGNALLED; >+ ourstatus->value.sig = TARGET_SIGNAL_UNKNOWN; >+ return minus_one_ptid; >+ } >+ >+ /* Ignore terminated detached child processes. */ >+ if (!WIFSTOPPED (status) && pid != PIDGET (inferior_ptid)) >+ pid = -1; >+ } >+ while (pid == -1); >+ >+ store_waitstatus (ourstatus, status); >+ return pid_to_ptid (pid); >+} >+ >+static ptid_t >+fbsd_wait (ptid_t ptid, struct target_waitstatus *ourstatus) >+{ >+ long lwp; >+ struct ptrace_lwpinfo lwpinfo; >+ struct target_waitstatus stat; >+ ptid_t ret; >+ static ptid_t forking_child = {0,0,0}; >+ >+ ret = wait_beneath (ptid, ourstatus); >+ >+ if (PIDGET (ret) >= 0 && ourstatus->kind == TARGET_WAITKIND_STOPPED && >+ (ourstatus->value.sig == TARGET_SIGNAL_TRAP || >+ ourstatus->value.sig == TARGET_SIGNAL_STOP) && >+ (ptrace(PT_LWPINFO, PIDGET (ret), (caddr_t)&lwpinfo, >+ sizeof lwpinfo) == 0)) >+ { >+ if (lwpinfo.pl_flags & PL_FLAG_CHILD) >+ { >+ /* Leave the child in a stopped state until we get a fork event in >+ the parent. That's when we decide which process to follow. */ >+ ourstatus->kind = TARGET_WAITKIND_IGNORE; >+ forking_child = ret; >+ } >+ else if (lwpinfo.pl_flags & PL_FLAG_FORKED) >+ { >+ /* We'd better be in the middle of processing a fork() event. */ >+ gdb_assert (!ptid_equal (forking_child, null_ptid)); >+ ourstatus->kind = TARGET_WAITKIND_FORKED; >+ ourstatus->value.related_pid = lwpinfo.pl_child_pid; >+ forking_child = null_ptid; >+ } >+ else if (lwpinfo.pl_flags & PL_FLAG_EXEC && >+ PIDGET (ret) == follow_event_pid) >+ { >+ ourstatus->kind = TARGET_WAITKIND_EXECD; >+ ourstatus->value.execd_pathname = >+ xstrdup (fbsd_pid_to_exec_file (PIDGET (ret))); >+ } >+ } >+ >+ return ret; >+} >+ >+static void >+fbsd_enable_event_reporting (int pid) >+{ >+#ifdef PT_FOLLOW_FORK >+ follow_event_pid = pid; >+ if (ptrace(PT_FOLLOW_FORK, pid, 0, 1) < 0) >+ error (_("Cannot follow fork on this target.")); >+#endif >+} >+ >+static void >+fbsd_post_attach (int pid) >+{ >+ fbsd_enable_event_reporting (pid); >+} >+ >+static void >+fbsd_post_startup_inferior (ptid_t ptid) >+{ >+ fbsd_enable_event_reporting (PIDGET (ptid)); >+} >+ >+int >+fbsd_follow_fork (struct target_ops *ops, int follow_child) >+{ >+ ptid_t last_ptid, ret, child_ptid; >+ struct target_waitstatus last_status; >+ int parent_pid, child_pid; >+ struct target_waitstatus ourstatus; >+ >+ get_last_target_status (&last_ptid, &last_status); >+ parent_pid = PIDGET (last_ptid); >+ child_pid = last_status.value.related_pid; >+ >+ if (follow_child) >+ { >+ detach_breakpoints (child_pid); >+ remove_breakpoints (); >+ child_ptid = pid_to_ptid (child_pid); >+ >+ target_detach (NULL, 0); >+ inferior_ptid = child_ptid; >+ >+ /* Reinstall ourselves, since we might have been removed in >+ target_detach (which does other necessary cleanup). */ >+ push_target (ops); >+ >+ /* Need to restore some of the actions done by the threaded detach */ >+ if (reactivate_threads) >+ { >+ reactivate_threads (fbsd_pid_to_exec_file (child_pid)); >+ reactivate_threads = NULL; >+ } >+ >+ /* Reset breakpoints in the child as appropriate. */ >+ clear_step_resume_breakpoint_thread (); >+ follow_inferior_reset_breakpoints (); >+ >+ /* Enable fork/exec event reporting for the child. */ >+ fbsd_enable_event_reporting (child_pid); >+ } >+ else /* Follow parent */ >+ { >+ /* Before detaching from the child, remove all breakpoints from >+ it. (This won't actually modify the breakpoint list, but will >+ physically remove the breakpoints from the child.) */ >+ detach_breakpoints (child_pid); >+ ptrace (PT_DETACH, child_pid, (caddr_t) 1, 0); >+ } >+ >+ return 0; >+} >+ >+/* EXECD_PATHNAME is assumed to be non-NULL. */ >+ >+static void >+fbsd_follow_exec (int pid, char *execd_pathname) >+{ >+ struct target_waitstatus status; >+ ptid_t ret = inferior_ptid; >+ >+ /* This is an exec event that we actually wish to pay attention to. >+ Refresh our symbol table to the newly exec'd program, remove any >+ momentary bp's, etc. >+ >+ If there are breakpoints, they aren't really inserted now, >+ since the exec() transformed our inferior into a fresh set >+ of instructions. >+ >+ We want to preserve symbolic breakpoints on the list, since >+ we have hopes that they can be reset after the new a.out's >+ symbol table is read. >+ >+ However, any "raw" breakpoints must be removed from the list >+ (e.g., the solib bp's), since their address is probably invalid >+ now. >+ >+ And, we DON'T want to call delete_breakpoints() here, since >+ that may write the bp's "shadow contents" (the instruction >+ value that was overwritten witha TRAP instruction). Since >+ we now have a new a.out, those shadow contents aren't valid. */ >+ update_breakpoints_after_exec (); >+ >+ /* If there was one, it's gone now. We cannot truly step-to-next >+ statement through an exec(). */ >+ clear_step_resume_breakpoint (); >+ step_range_start = 0; >+ step_range_end = 0; >+ >+ /* What is this a.out's name? */ >+ printf_unfiltered (_("Executing new program: %s\n"), execd_pathname); >+ >+ /* We've followed the inferior through an exec. Therefore, the >+ inferior has essentially been killed & reborn. */ >+ >+ gdb_flush (gdb_stdout); >+ >+ /* Disable thread library */ >+ if (disable_threads) >+ { >+ disable_threads (); >+ disable_threads = NULL; >+ } >+ >+ generic_mourn_inferior (); >+ inferior_ptid = ret; >+ >+ /* That a.out is now the one to use. */ >+ exec_file_attach (execd_pathname, 0); >+ >+ /* And also is where symbols can be found. */ >+ symbol_file_add_main (execd_pathname, 0); >+ >+ /* Reset the shared library package. This ensures that we get >+ a shlib event when the child reaches "_start", at which point >+ the dld will have had a chance to initialize the child. */ >+#if defined(SOLIB_RESTART) >+ SOLIB_RESTART (); >+#endif >+#ifdef SOLIB_CREATE_INFERIOR_HOOK >+ SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid)); >+#else >+ solib_create_inferior_hook (); >+#endif >+ >+ /* Reinsert all breakpoints. (Those which were symbolic have >+ been reset to the proper address in the new a.out, thanks >+ to symbol_file_command...) */ >+ insert_breakpoints (); >+} >+ >+static void fbsd_mourn_inferior (void) >+{ >+ follow_event_pid = 0; >+ mourn_inferior_beneath (); >+} >+ >+static void fbsd_detach (char *args, int from_tty) >+{ >+ follow_event_pid = 0; >+ detach_beneath (args, from_tty); >+} >+ >+void >+_initialize_fbsdnat (void) >+{ >+ wait_beneath = inf_ptrace_wait; >+ detach_beneath = child_ops.to_detach; >+ mourn_inferior_beneath = child_ops.to_mourn_inferior; >+ child_ops.to_wait = fbsd_wait; >+ child_ops.to_detach = fbsd_detach; >+ child_ops.to_mourn_inferior = fbsd_mourn_inferior; >+ child_ops.to_post_attach = fbsd_post_attach; >+ child_ops.to_post_startup_inferior = fbsd_post_startup_inferior; >+ child_ops.to_follow_fork = fbsd_follow_fork; >+ child_ops.to_follow_exec = fbsd_follow_exec; >+} >diff --git a/gnu/usr.bin/gdb/libgdb/fbsd-threads.c b/gnu/usr.bin/gdb/libgdb/fbsd-threads.c >index 78819a7..86bb4f0 100644 >--- a/gnu/usr.bin/gdb/libgdb/fbsd-threads.c >+++ b/gnu/usr.bin/gdb/libgdb/fbsd-threads.c >@@ -68,6 +68,9 @@ extern struct target_ops core_ops; > > /* Pointer to the next function on the objfile event chain. */ > static void (*target_new_objfile_chain) (struct objfile *objfile); >+ >+/* Non-zero while processing thread library re-activation after fork() */ >+static int fbsd_forking; > > /* Non-zero if there is a thread module */ > static int fbsd_thread_present; >@@ -154,6 +157,10 @@ static int fbsd_thread_alive (ptid_t ptid); > static void attach_thread (ptid_t ptid, const td_thrhandle_t *th_p, > const td_thrinfo_t *ti_p, int verbose); > static void fbsd_thread_detach (char *args, int from_tty); >+extern void (*reactivate_threads) (char*); >+extern void (*disable_threads) (void); >+static void fbsd_thread_activate (void); >+static void fbsd_thread_deactivate (void); > > /* Building process ids. */ > >@@ -405,15 +412,50 @@ disable_thread_event_reporting (void) > td_death_bp_addr = 0; > } > >+static void >+fbsd_thread_reactivate_after_fork (char *pathname) >+{ >+ fbsd_forking = 1; >+ >+ /* That a.out is now the one to use. */ >+ exec_file_attach (pathname, 0); >+ >+ /* And also is where symbols can be found. */ >+ symbol_file_add_main (pathname, 0); >+ push_target (&fbsd_thread_ops); >+ >+#ifdef SOLIB_CREATE_INFERIOR_HOOK >+ SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid)); >+#else >+ solib_create_inferior_hook (); >+#endif >+ fbsd_forking = 0; >+} >+ >+static void >+fbsd_thread_disable_after_exec (void) >+{ >+ if (fbsd_thread_active) >+ fbsd_thread_deactivate (); >+ >+ unpush_target (&fbsd_thread_ops); >+} >+ > static void > fbsd_thread_activate (void) > { > fbsd_thread_active = 1; >+ reactivate_threads = fbsd_thread_reactivate_after_fork; >+ disable_threads = fbsd_thread_disable_after_exec; > init_thread_list(); > if (fbsd_thread_core == 0) > enable_thread_event_reporting (); >- fbsd_thread_find_new_threads (); >- get_current_thread (); >+ >+ if (!fbsd_forking) >+ { >+ fbsd_thread_find_new_threads (); >+ get_current_thread (); >+ } > } > > static void >@@ -626,7 +668,7 @@ fbsd_thread_resume (ptid_t ptid, int step, enum target_signal signo) > } > > lwp = GET_LWP (work_ptid); >- if (lwp == 0) >+ if (lwp == 0 && GET_THREAD (work_ptid) != 0) > { > /* check user thread */ > ret = td_ta_map_id2thr_p (thread_agent, GET_THREAD(work_ptid), &th); >@@ -790,6 +832,9 @@ fbsd_thread_wait (ptid_t ptid, struct target_waitstatus *ourstatus) > ret = child_ops.to_wait (ptid, ourstatus); > if (GET_PID(ret) >= 0 && ourstatus->kind == TARGET_WAITKIND_STOPPED) > { >+ if (thread_list_empty ()) >+ fbsd_thread_find_new_threads (); >+ > lwp = get_current_lwp (GET_PID(ret)); > ret = thread_from_lwp (BUILD_LWP(lwp, GET_PID(ret)), > &th, &ti); >@@ -1065,6 +1110,9 @@ fbsd_thread_create_inferior (char *exec_file, char *allargs, char **env) > static void > fbsd_thread_post_startup_inferior (ptid_t ptid) > { >+ if (child_ops.to_post_startup_inferior) >+ child_ops.to_post_startup_inferior (ptid); >+ > if (fbsd_thread_present && !fbsd_thread_active) > { > /* The child process is now the actual multi-threaded >diff --git a/lib/libc/amd64/sys/__vdso_gettc.c b/lib/libc/amd64/sys/__vdso_gettc.c >index c6f2dfb..f64b94d 100644 >--- a/lib/libc/amd64/sys/__vdso_gettc.c >+++ b/lib/libc/amd64/sys/__vdso_gettc.c >@@ -43,7 +43,6 @@ __vdso_gettc_low(const struct vdso_timehands *th) > return (rv); > } > >-#pragma weak __vdso_gettc > u_int > __vdso_gettc(const struct vdso_timehands *th) > { >@@ -51,7 +50,6 @@ __vdso_gettc(const struct vdso_timehands *th) > return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : rdtsc32()); > } > >-#pragma weak __vdso_gettimekeep > int > __vdso_gettimekeep(struct vdso_timekeep **tk) > { >diff --git a/lib/libc/i386/sys/__vdso_gettc.c b/lib/libc/i386/sys/__vdso_gettc.c >index c6f2dfb..f64b94d 100644 >--- a/lib/libc/i386/sys/__vdso_gettc.c >+++ b/lib/libc/i386/sys/__vdso_gettc.c >@@ -43,7 +43,6 @@ __vdso_gettc_low(const struct vdso_timehands *th) > return (rv); > } > >-#pragma weak __vdso_gettc > u_int > __vdso_gettc(const struct vdso_timehands *th) > { >@@ -51,7 +50,6 @@ __vdso_gettc(const struct vdso_timehands *th) > return (th->th_x86_shift > 0 ? __vdso_gettc_low(th) : rdtsc32()); > } > >-#pragma weak __vdso_gettimekeep > int > __vdso_gettimekeep(struct vdso_timekeep **tk) > { >diff --git a/lib/libc/sys/__vdso_gettimeofday.c b/lib/libc/sys/__vdso_gettimeofday.c >index a305173..97626a1 100644 >--- a/lib/libc/sys/__vdso_gettimeofday.c >+++ b/lib/libc/sys/__vdso_gettimeofday.c >@@ -79,7 +79,6 @@ binuptime(struct bintime *bt, struct vdso_timekeep *tk, int abs) > > static struct vdso_timekeep *tk; > >-#pragma weak __vdso_gettimeofday > int > __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) > { >@@ -102,7 +101,6 @@ __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) > return (0); > } > >-#pragma weak __vdso_clock_gettime > int > __vdso_clock_gettime(clockid_t clock_id, struct timespec *ts) > { >diff --git a/lib/libc/sys/trivial-vdso_tc.c b/lib/libc/sys/trivial-vdso_tc.c >index b99bbc4..51d2081 100644 >--- a/lib/libc/sys/trivial-vdso_tc.c >+++ b/lib/libc/sys/trivial-vdso_tc.c >@@ -31,7 +31,6 @@ __FBSDID("$FreeBSD$"); > #include <sys/vdso.h> > #include <errno.h> > >-#pragma weak __vdso_gettc > u_int > __vdso_gettc(const struct vdso_timehands *th) > { >@@ -39,7 +38,6 @@ __vdso_gettc(const struct vdso_timehands *th) > return (0); > } > >-#pragma weak __vdso_gettimekeep > int > __vdso_gettimekeep(struct vdso_timekeep **tk) > { >diff --git a/share/man/man9/fpu_kern.9 b/share/man/man9/fpu_kern.9 >index 748e6fc..c861073 100644 >--- a/share/man/man9/fpu_kern.9 >+++ b/share/man/man9/fpu_kern.9 >@@ -120,6 +120,16 @@ could be used from both kernel thread and syscall contexts. > The > .Fn fpu_kern_leave > function correctly handles such contexts. >+.It Dv FPU_KERN_NOCTX >+Avoid nesting save area. >+If the flag is specified, the >+.Fa ctx >+must be passed as >+.Va NULL . >+The flag should only be used for really short code blocks >+which can be executed in a critical section. >+It avoids the need to allocate the FPU context by the cost >+of increased system latency. > .El > .El > .Pp >diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c >index f30c073..1a480ee 100644 >--- a/sys/amd64/amd64/fpu.c >+++ b/sys/amd64/amd64/fpu.c >@@ -631,6 +631,8 @@ fpudna(void) > */ > critical_enter(); > >+ KASSERT((curpcb->pcb_flags & PCB_FPUNOSAVE) == 0, >+ ("fpudna while in fpu_kern_enter(FPU_KERN_NOCTX)")); > if (PCPU_GET(fpcurthread) == curthread) { > printf("fpudna: fpcurthread == curthread\n"); > stop_emulating(); >@@ -661,7 +663,8 @@ fpudna(void) > * fpu_initialstate, to ignite the XSAVEOPT > * tracking engine. > */ >- bcopy(fpu_initialstate, curpcb->pcb_save, cpu_max_ext_state_size); >+ bcopy(fpu_initialstate, curpcb->pcb_save, >+ cpu_max_ext_state_size); > fpurestore(curpcb->pcb_save); > if (curpcb->pcb_initial_fpucw != __INITIAL_FPUCW__) > fldcw(curpcb->pcb_initial_fpucw); >@@ -959,11 +962,36 @@ fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) > { > struct pcb *pcb; > >+ KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL, >+ ("ctx is required when !FPU_KERN_NOCTX")); >+ pcb = td->td_pcb; >+ KASSERT((pcb->pcb_flags & PCB_FPUNOSAVE) == 0, >+ ("recursive fpu_kern_enter while in PCB_FPUNOSAVE state")); >+ if ((flags & FPU_KERN_NOCTX) != 0) { >+ critical_enter(); >+ stop_emulating(); >+ if (curthread == PCPU_GET(fpcurthread)) { >+ fpusave(curpcb->pcb_save); >+ PCPU_SET(fpcurthread, NULL); >+ } else { >+ KASSERT(PCPU_GET(fpcurthread) == NULL, >+ ("invalid fpcurthread")); >+ } >+ >+ /* >+ * This breaks XSAVEOPT tracker, but >+ * PCB_FPUNOSAVE state is supposed to never need to >+ * save FPU context at all. >+ */ >+ fpurestore(fpu_initialstate); >+ set_pcb_flags(pcb, PCB_KERNFPU | PCB_FPUNOSAVE | >+ PCB_FPUINITDONE); >+ return (0); >+ } > if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) { > ctx->flags = FPU_KERN_CTX_DUMMY; > return (0); > } >- pcb = td->td_pcb; > KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save == > get_pcb_user_save_pcb(pcb), ("mangled pcb_save")); > ctx->flags = 0; >@@ -982,15 +1010,26 @@ fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) > { > struct pcb *pcb; > >+ pcb = td->td_pcb; >+ if ((pcb->pcb_flags & PCB_FPUNOSAVE) != 0) { >+ KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX")); >+ KASSERT(PCPU_GET(fpcurthread) == NULL, >+ ("non-NULL fpcurthread for PCB_FPUNOSAVE")); >+ CRITICAL_ASSERT(td); >+ clear_pcb_flags(pcb, PCB_FPUNOSAVE | PCB_FPUINITDONE); >+ start_emulating(); >+ critical_exit(); >+ goto restore_flags; >+ } > if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0) > return (0); > KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx")); >- pcb = td->td_pcb; > critical_enter(); > if (curthread == PCPU_GET(fpcurthread)) > fpudrop(); > critical_exit(); > pcb->pcb_save = ctx->prev; >+restore_flags: > if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) { > if ((pcb->pcb_flags & PCB_USERFPUINITDONE) != 0) { > set_pcb_flags(pcb, PCB_FPUINITDONE); >diff --git a/sys/amd64/amd64/initcpu.c b/sys/amd64/amd64/initcpu.c >index 36f2d0f..4966cb7 100644 >--- a/sys/amd64/amd64/initcpu.c >+++ b/sys/amd64/amd64/initcpu.c >@@ -90,6 +90,11 @@ static void > init_amd(void) > { > >+ if (CPUID_TO_FAMILY(cpu_id) == 0x9) { >+ if ((cpu_feature2 & CPUID2_HV) == 0) >+ wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) | (1 << 6)); >+ } >+ > /* > * Work around Erratum 721 for Family 10h and 12h processors. > * These processors may incorrectly update the stack pointer >diff --git a/sys/amd64/include/fpu.h b/sys/amd64/include/fpu.h >index 78d2fee..433457f 100644 >--- a/sys/amd64/include/fpu.h >+++ b/sys/amd64/include/fpu.h >@@ -86,6 +86,7 @@ void fpu_save_area_reset(struct savefpu *fsa); > #define FPU_KERN_NORMAL 0x0000 > #define FPU_KERN_NOWAIT 0x0001 > #define FPU_KERN_KTHR 0x0002 >+#define FPU_KERN_NOCTX 0x0004 > > #endif > >diff --git a/sys/amd64/include/pcb.h b/sys/amd64/include/pcb.h >index 153393f..e3a64df 100644 >--- a/sys/amd64/include/pcb.h >+++ b/sys/amd64/include/pcb.h >@@ -79,6 +79,7 @@ struct pcb { > #define PCB_FPUINITDONE 0x08 /* fpu state is initialized */ > #define PCB_USERFPUINITDONE 0x10 /* fpu user state is initialized */ > #define PCB_32BIT 0x40 /* process has 32 bit context (segs etc) */ >+#define PCB_FPUNOSAVE 0x80 /* no save area for current FPU ctx */ > > uint16_t pcb_initial_fpucw; > >diff --git a/sys/dev/drm2/i915/intel_iic.c b/sys/dev/drm2/i915/intel_iic.c >index 36a5b9e..89a870b 100644 >--- a/sys/dev/drm2/i915/intel_iic.c >+++ b/sys/dev/drm2/i915/intel_iic.c >@@ -142,6 +142,7 @@ intel_iic_reset(struct drm_device *dev) > > dev_priv = dev->dev_private; > I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); >+ I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); > } > > static int >diff --git a/sys/fs/msdosfs/msdosfs_fat.c b/sys/fs/msdosfs/msdosfs_fat.c >index cf03e00..b65aa74 100644 >--- a/sys/fs/msdosfs/msdosfs_fat.c >+++ b/sys/fs/msdosfs/msdosfs_fat.c >@@ -689,8 +689,11 @@ chainalloc(struct msdosfsmount *pmp, u_long start, u_long count, > pmp->pm_nxtfree = CLUST_FIRST; > pmp->pm_flags |= MSDOSFS_FSIMOD; > error = fatchain(pmp, start, count, fillwith); >- if (error != 0) >+ if (error != 0) { >+ for (cl = start, n = count; n-- > 0;) >+ usemap_free(pmp, cl++); > return (error); >+ } > #ifdef MSDOSFS_DEBUG > printf("clusteralloc(): allocated cluster chain at %lu (%lu clusters)\n", > start, count); >diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c >index a642d10..f3e1f5c 100644 >--- a/sys/i386/i386/pmap.c >+++ b/sys/i386/i386/pmap.c >@@ -3387,11 +3387,19 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, > PMAP_LOCK(pmap); > sched_pin(); > >- /* >- * In the case that a page table page is not >- * resident, we are creating it here. >- */ >- if (va < VM_MAXUSER_ADDRESS) { >+ pde = pmap_pde(pmap, va); >+ if ((*pde & PG_PS) != 0) { >+ /* PG_V is asserted by pmap_demote_pde */ >+ pmap_demote_pde(pmap, pde, va); >+ if (va < VM_MAXUSER_ADDRESS) { >+ mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME); >+ mpte->wire_count++; >+ } >+ } else if (va < VM_MAXUSER_ADDRESS) { >+ /* >+ * In the case that a page table page is not resident, >+ * we are creating it here. >+ */ > mpte = pmap_allocpte(pmap, va, flags); > if (mpte == NULL) { > KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, >@@ -3402,10 +3410,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, > return (KERN_RESOURCE_SHORTAGE); > } > } >- >- pde = pmap_pde(pmap, va); >- if ((*pde & PG_PS) != 0) >- panic("pmap_enter: attempted pmap_enter on 4MB page"); > pte = pmap_pte_quick(pmap, va); > > /* >diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c >index 27c6f40..21c82f3 100644 >--- a/sys/kern/kern_proc.c >+++ b/sys/kern/kern_proc.c >@@ -3001,6 +3001,7 @@ resume_all_proc(void) > > cp = curproc; > sx_xlock(&allproc_lock); >+again: > LIST_REMOVE(cp, p_list); > LIST_INSERT_HEAD(&allproc, cp, p_list); > for (;;) { >@@ -3021,6 +3022,12 @@ resume_all_proc(void) > PROC_UNLOCK(p); > } > } >+ /* Did the loop above missed any stopped process ? */ >+ LIST_FOREACH(p, &allproc, p_list) { >+ /* No need for proc lock. */ >+ if ((p->p_flag & P_TOTAL_STOP) != 0) >+ goto again; >+ } > sx_xunlock(&allproc_lock); > } > >diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c >index 01da596..06b5abe 100644 >--- a/sys/kern/kern_timeout.c >+++ b/sys/kern/kern_timeout.c >@@ -1237,7 +1237,7 @@ again: > CC_UNLOCK(cc); > if (sq_locked) > sleepq_release(&cc_exec_waiting(cc, direct)); >- return (0); >+ return (not_on_a_list); > } > > if (safe) { >@@ -1352,13 +1352,13 @@ again: > CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", > c, c->c_func, c->c_arg); > CC_UNLOCK(cc); >- return (0); >+ return (not_on_a_list); > } > CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", > c, c->c_func, c->c_arg); > CC_UNLOCK(cc); > KASSERT(!sq_locked, ("sleepqueue chain still locked")); >- return (0); >+ return (not_on_a_list); > } > if (sq_locked) > sleepq_release(&cc_exec_waiting(cc, direct)); >diff --git a/sys/nlm/nlm_advlock.c b/sys/nlm/nlm_advlock.c >index 003a43d..456af87 100644 >--- a/sys/nlm/nlm_advlock.c >+++ b/sys/nlm/nlm_advlock.c >@@ -210,7 +210,7 @@ nlm_advlock_internal(struct vnode *vp, void *id, int op, struct flock *fl, > struct rpc_callextra ext; > struct nlm_feedback_arg nf; > AUTH *auth; >- struct ucred *cred; >+ struct ucred *cred, *cred1; > struct nlm_file_svid *ns; > int svid; > int error; >@@ -240,15 +240,17 @@ nlm_advlock_internal(struct vnode *vp, void *id, int op, struct flock *fl, > else > retries = INT_MAX; > >- if (unlock_vp) >- VOP_UNLOCK(vp, 0); >- > /* > * We need to switch to mount-point creds so that we can send >- * packets from a privileged port. >+ * packets from a privileged port. Reference mnt_cred and >+ * switch to them before unlocking the vnode, since mount >+ * point could be unmounted right after unlock. > */ > cred = td->td_ucred; > td->td_ucred = vp->v_mount->mnt_cred; >+ crhold(td->td_ucred); >+ if (unlock_vp) >+ VOP_UNLOCK(vp, 0); > > host = nlm_find_host_by_name(servername, sa, vers); > auth = authunix_create(cred); >@@ -373,7 +375,9 @@ nlm_advlock_internal(struct vnode *vp, void *id, int op, struct flock *fl, > if (ns) > nlm_free_svid(ns); > >+ cred1 = td->td_ucred; > td->td_ucred = cred; >+ crfree(cred1); > AUTH_DESTROY(auth); > > nlm_host_release(host); >diff --git a/sys/sys/vdso.h b/sys/sys/vdso.h >index d905304..9484f32 100644 >--- a/sys/sys/vdso.h >+++ b/sys/sys/vdso.h >@@ -60,6 +60,11 @@ struct timespec; > struct timeval; > struct timezone; > >+#pragma weak __vdso_clock_gettime >+#pragma weak __vdso_gettimeofday >+#pragma weak __vdso_gettc >+#pragma weak __vdso_gettimekeep >+ > int __vdso_clock_gettime(clockid_t clock_id, struct timespec *ts); > int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); > u_int __vdso_gettc(const struct vdso_timehands *vdso_th); >diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c >index 618ed8e..d051956 100644 >--- a/sys/ufs/ffs/ffs_vnops.c >+++ b/sys/ufs/ffs/ffs_vnops.c >@@ -730,10 +730,12 @@ ffs_write(ap) > vnode_pager_setsize(vp, uio->uio_offset + xfersize); > > /* >- * We must perform a read-before-write if the transfer size >- * does not cover the entire buffer. >+ * We must perform a read-before-write if the transfer >+ * size does not cover the entire buffer or the valid >+ * part of the last buffer for the file. > */ >- if (fs->fs_bsize > xfersize) >+ if (fs->fs_bsize > xfersize && (blkoffset != 0 || >+ uio->uio_offset + xfersize < ip->i_size)) > flags |= BA_CLRBUF; > else > flags &= ~BA_CLRBUF; >diff --git a/sys/vm/memguard.c b/sys/vm/memguard.c >index d4efc2b..d8f2b17 100644 >--- a/sys/vm/memguard.c >+++ b/sys/vm/memguard.c >@@ -505,6 +505,10 @@ memguard_cmp_zone(uma_zone_t zone) > zone->uz_flags & UMA_ZONE_NOFREE) > return (0); > >+ if (zone->uz_link.kl_keg != NULL && >+ (zone->uz_link.kl_keg->uk_flags & UMA_ZFLAG_CACHEONLY) != 0) >+ return (0); >+ > if (memguard_cmp(zone->uz_size)) > return (1); > >diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c >index 7391465..6761c7d 100644 >--- a/sys/vm/vm_fault.c >+++ b/sys/vm/vm_fault.c >@@ -1294,30 +1294,59 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, > src_object = src_entry->object.vm_object; > src_pindex = OFF_TO_IDX(src_entry->offset); > >+ KASSERT(upgrade || dst_entry->object.vm_object == NULL, >+ ("vm_fault_copy_entry: vm_object not NULL")); > if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { > dst_object = src_object; > vm_object_reference(dst_object); > } else { > /* >- * Create the top-level object for the destination entry. (Doesn't >- * actually shadow anything - we copy the pages directly.) >+ * Create the top-level object for the destination >+ * entry. (Doesn't actually shadow anything - we copy >+ * the pages directly.) > */ >- dst_object = vm_object_allocate(OBJT_DEFAULT, >- OFF_TO_IDX(dst_entry->end - dst_entry->start)); >+ vm_object_shadow(&dst_entry->object.vm_object, >+ &dst_entry->offset, OFF_TO_IDX(dst_entry->end - >+ dst_entry->start)); >+ dst_object = dst_entry->object.vm_object; > #if VM_NRESERVLEVEL > 0 >- dst_object->flags |= OBJ_COLORED; >- dst_object->pg_color = atop(dst_entry->start); >+ if (dst_object != src_object) { >+ dst_object->flags |= OBJ_COLORED; >+ dst_object->pg_color = atop(dst_entry->start); >+ } > #endif >+ >+ /* >+ * If not an upgrade, then enter the mappings in the >+ * pmap as read and/or execute accesses. Otherwise, >+ * enter them as write accesses. >+ * >+ * A writeable large page mapping is only created if >+ * all of the constituent small page mappings are >+ * modified. Marking PTEs as modified on inception >+ * allows promotion to happen without taking >+ * potentially large number of soft faults. >+ */ >+ access &= ~VM_PROT_WRITE; > } >+ /* >+ * dst_entry->offset is either left unchanged in the upgrade >+ * case, or vm_object_shadow takes care of recalculating the >+ * offset depending on creation of the new object. >+ */ > >- VM_OBJECT_WLOCK(dst_object); >- KASSERT(upgrade || dst_entry->object.vm_object == NULL, >- ("vm_fault_copy_entry: vm_object not NULL")); >- if (src_object != dst_object) { >- dst_entry->object.vm_object = dst_object; >- dst_entry->offset = 0; >- dst_object->charge = dst_entry->end - dst_entry->start; >+ /* >+ * This can only happen for upgrade case, due to src_object >+ * reference bump above, and it means that all pages are >+ * private already. >+ */ >+ if (dst_object == src_object && >+ (src_entry->protection & VM_PROT_WRITE) == 0) { >+ KASSERT(upgrade, ("XXX")); >+ goto uncow; > } >+ >+ VM_OBJECT_WLOCK(dst_object); > if (fork_charge != NULL) { > KASSERT(dst_entry->cred == NULL, > ("vm_fault_copy_entry: leaked swp charge")); >@@ -1332,19 +1361,6 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, > } > > /* >- * If not an upgrade, then enter the mappings in the pmap as >- * read and/or execute accesses. Otherwise, enter them as >- * write accesses. >- * >- * A writeable large page mapping is only created if all of >- * the constituent small page mappings are modified. Marking >- * PTEs as modified on inception allows promotion to happen >- * without taking potentially large number of soft faults. >- */ >- if (!upgrade) >- access &= ~VM_PROT_WRITE; >- >- /* > * Loop through all of the virtual pages within the entry's > * range, copying each page from the source object to the > * destination object. Since the source is wired, those pages >@@ -1451,6 +1467,7 @@ again: > } > VM_OBJECT_WUNLOCK(dst_object); > if (upgrade) { >+uncow: > dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); > vm_object_deallocate(src_object); > } >diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c >index c7f3153..4947b63 100644 >--- a/sys/vm/vm_object.c >+++ b/sys/vm/vm_object.c >@@ -2102,15 +2102,18 @@ boolean_t > vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, > vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) > { >- vm_pindex_t next_pindex; >+ vm_object_t shadow_object; >+ vm_page_t m; >+ vm_pindex_t next_pindex, pi; >+ boolean_t ret; > > if (prev_object == NULL) > return (TRUE); >+ ret = FALSE; > VM_OBJECT_WLOCK(prev_object); > if ((prev_object->type != OBJT_DEFAULT && > prev_object->type != OBJT_SWAP) || > (prev_object->flags & OBJ_TMPFS_NODE) != 0) { >- VM_OBJECT_WUNLOCK(prev_object); > return (FALSE); > } > >@@ -2120,24 +2123,61 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, > vm_object_collapse(prev_object); > > /* >- * Can't coalesce if: . more than one reference . paged out . shadows >- * another object . has a copy elsewhere (any of which mean that the >- * pages not mapped to prev_entry may be in use anyway) >+ * Can't coalesce if shadows another object, which means that >+ * the pages not mapped to prev_entry may be in use anyway. > */ >- if (prev_object->backing_object != NULL) { >- VM_OBJECT_WUNLOCK(prev_object); >- return (FALSE); >- } >+ if (prev_object->backing_object != NULL) >+ goto out; > > prev_size >>= PAGE_SHIFT; > next_size >>= PAGE_SHIFT; > next_pindex = OFF_TO_IDX(prev_offset) + prev_size; > >- if ((prev_object->ref_count > 1) && >- (prev_object->size != next_pindex)) { >- VM_OBJECT_WUNLOCK(prev_object); >- return (FALSE); >+ /* >+ * If object has more than one reference or is larger than the >+ * end of the previous mapping, still allow coalescing map >+ * entries for the case when this is due to other mappings of >+ * the object into the current address space. >+ */ >+ if (prev_object->ref_count > 1 && prev_object->size != next_pindex) { >+ /* >+ * Only one mapping allowed, otherwise coalesce could >+ * result in the contradictory content in the regions. >+ */ >+ if ((prev_object->flags & OBJ_ONEMAPPING) == 0) >+ goto out; >+ >+ /* No pages in the region, either resident ... */ >+ m = vm_page_find_least(prev_object, next_pindex); >+ if (m != NULL && m->pindex < next_pindex + next_size) >+ goto out; >+ /* ... or swapped out. */ >+ if (prev_object->type == OBJT_SWAP) { >+ for (pi = next_pindex; pi < next_pindex + next_size; >+ pi++) { >+ if (vm_pager_has_page(prev_object, pi, NULL, >+ NULL)) >+ goto out; >+ } >+ } >+ >+ /* >+ * Region must be not shadowed, otherwise the >+ * instantiated page in the our (backing) object could >+ * leak to the shadow. >+ */ >+ LIST_FOREACH(shadow_object, &prev_object->shadow_head, >+ shadow_list) { >+ KASSERT(shadow_object->backing_object == prev_object, >+ ("corrupted shadow")); >+ if (shadow_object->backing_object_offset < >+ next_pindex + next_size && >+ shadow_object->backing_object_offset + >+ shadow_object->size > next_pindex) >+ goto out; >+ } > } >+ ret = TRUE; > > /* > * Account for the charge. >@@ -2189,8 +2229,9 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, > if (next_pindex + next_size > prev_object->size) > prev_object->size = next_pindex + next_size; > >+out: > VM_OBJECT_WUNLOCK(prev_object); >- return (TRUE); >+ return (ret); > } > > void >diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h >index cd30772..bb1b165 100644 >--- a/sys/vm/vm_page.h >+++ b/sys/vm/vm_page.h >@@ -227,6 +227,7 @@ struct vm_domain { > long vmd_segs; /* bitmask of the segments */ > boolean_t vmd_oom; > int vmd_pass; /* local pagedaemon pass */ >+ int vmd_oom_seq; > struct vm_page vmd_marker; /* marker for pagedaemon private use */ > }; > >diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c >index 26eda8c..e5f6d91 100644 >--- a/sys/vm/vm_pageout.c >+++ b/sys/vm/vm_pageout.c >@@ -121,7 +121,8 @@ static void vm_pageout_init(void); > static int vm_pageout_clean(vm_page_t m); > static int vm_pageout_cluster(vm_page_t m); > static void vm_pageout_scan(struct vm_domain *vmd, int pass); >-static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass); >+static void vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, >+ int starting_page_shortage); > > SYSINIT(pagedaemon_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_FIRST, vm_pageout_init, > NULL); >@@ -158,6 +159,7 @@ int vm_pages_needed; /* Event on which pageout daemon sleeps */ > int vm_pageout_deficit; /* Estimated number of pages deficit */ > int vm_pageout_pages_needed; /* flag saying that the pageout daemon needs pages */ > int vm_pageout_wakeup_thresh; >+static int vm_pageout_oom_seq = 24; > > #if !defined(NO_SWAPPING) > static int vm_pageout_req_swapout; /* XXX */ >@@ -223,6 +225,10 @@ static int pageout_lock_miss; > SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, > CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); > >+SYSCTL_INT(_vm, OID_AUTO, pageout_oom_seq, >+ CTLFLAG_RW, &vm_pageout_oom_seq, 0, >+ "side-to-side calls to oom detector to start OOM"); >+ > #define VM_PAGEOUT_PAGE_COUNT 16 > int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT; > >@@ -1028,7 +1034,8 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) > vm_page_t m, next; > struct vm_pagequeue *pq; > vm_object_t object; >- int act_delta, addl_page_shortage, deficit, maxscan, page_shortage; >+ int act_delta, addl_page_shortage, deficit, maxscan; >+ int page_shortage, starting_page_shortage; > int vnodes_skipped = 0; > int maxlaunder; > boolean_t queues_locked; >@@ -1069,6 +1076,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) > page_shortage = vm_paging_target() + deficit; > } else > page_shortage = deficit = 0; >+ starting_page_shortage = page_shortage; > > /* > * maxlaunder limits the number of dirty pages we flush per scan. >@@ -1337,6 +1345,15 @@ relock_queues: > (void)speedup_syncer(); > > /* >+ * If we are critically low on one of RAM or swap and low on >+ * the other, kill the largest process. However, we avoid >+ * doing this on the first pass in order to give ourselves a >+ * chance to flush out dirty vnode-backed pages and to allow >+ * active pages to be moved to the inactive queue and reclaimed. >+ */ >+ vm_pageout_mightbe_oom(vmd, page_shortage, starting_page_shortage); >+ >+ /* > * Compute the number of pages we want to try to move from the > * active queue to the inactive queue. > */ >@@ -1445,15 +1462,6 @@ relock_queues: > } > } > #endif >- >- /* >- * If we are critically low on one of RAM or swap and low on >- * the other, kill the largest process. However, we avoid >- * doing this on the first pass in order to give ourselves a >- * chance to flush out dirty vnode-backed pages and to allow >- * active pages to be moved to the inactive queue and reclaimed. >- */ >- vm_pageout_mightbe_oom(vmd, pass); > } > > static int vm_pageout_oom_vote; >@@ -1464,18 +1472,36 @@ static int vm_pageout_oom_vote; > * failed to reach free target is premature. > */ > static void >-vm_pageout_mightbe_oom(struct vm_domain *vmd, int pass) >+vm_pageout_mightbe_oom(struct vm_domain *vmd, int page_shortage, >+ int starting_page_shortage) > { > int old_vote; > >- if (pass <= 1 || !((swap_pager_avail < 64 && vm_page_count_min()) || >- (swap_pager_full && vm_paging_target() > 0))) { >+ if (starting_page_shortage <= 0 || starting_page_shortage != >+ page_shortage) { >+#if 0 >+ if (vmd->vmd_oom_seq != 0) >+ printf("CLR oom_seq %d ps %d sps %d\n", vmd->vmd_oom_seq, page_shortage, starting_page_shortage); >+#endif >+ vmd->vmd_oom_seq = 0; >+ } else >+ vmd->vmd_oom_seq++; >+ if (vmd->vmd_oom_seq < vm_pageout_oom_seq) { > if (vmd->vmd_oom) { > vmd->vmd_oom = FALSE; > atomic_subtract_int(&vm_pageout_oom_vote, 1); > } > return; > } >+#if 0 >+printf("OOM oom_seq %d ps %d sps %d\n", vmd->vmd_oom_seq, page_shortage, starting_page_shortage); >+#endif >+ >+ /* >+ * Do not follow the call sequence until OOM condition is >+ * cleared. >+ */ >+ vmd->vmd_oom_seq = 0; > > if (vmd->vmd_oom) > return; >diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c >index 2cfc630..9a3f5c7 100644 >--- a/sys/vm/vm_reserv.c >+++ b/sys/vm/vm_reserv.c >@@ -762,6 +762,7 @@ vm_reserv_break(vm_reserv_t rv, vm_page_t m) > } while (i < NPOPMAP); > KASSERT(rv->popcnt == 0, > ("vm_reserv_break: reserv %p's popcnt is corrupted", rv)); >+ rv->pages->psind = 0; > vm_reserv_broken++; > } >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Diff
Attachments on
bug 200992
:
157913
|
157915