This is mostly a place holder until I can gather more data and perhaps do a full rebuild from current and test again. For the moment however the following was seen on a QEMU test RISC-V virtual machine for rv64gimafdc arch : ganymede# ganymede# uname -a FreeBSD ganymede 13.0-CURRENT FreeBSD 13.0-CURRENT r341837 QEMU riscv ganymede# ganymede# sysctl -a | grep '^Free' Free Memory: 8039816K ganymede# ganymede# sysctl -a | grep '\.smp\.' kern.smp.maxid: 3 kern.smp.maxcpus: 16 kern.smp.active: 1 kern.smp.disabled: 0 kern.smp.cpus: 4 kern.smp.topology: 0 kern.smp.forward_signal_enabled: 1 ganymede# ganymede# ganymede# ls /dev/d* /dev/devctl /dev/devctl2 /dev/devstat ganymede# ganymede# df -h Filesystem Size Used Avail Capacity Mounted on /dev/vtbd0 32G 2.2G 27G 7% / devfs 1.0K 1.0K 0B 100% /dev ganymede# ganymede# ls /dev/vt* /dev/vtbd0 /dev/vtbd1 ganymede# ganymede# zpool list ZFS filesystem version: 5 ZFS storage pool version: features support (5000) no pools available ganymede# ganymede# zpool create -m none -o failmode=continue \ ? -O sharenfs=off -O checksum=sha256 -O compression=on \ ? -O atime=on -O devices=off -O exec=on -O setuid=on \ ? -O xattr=on rv64imafdc /dev/vtbd1 ganymede# ganymede# zpool list NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT rv64imafdc 3.75G 100K 3.75G - - 0% 0% 1.00x ONLINE - ganymede# Sorry about the poor choice of pool name but it was a toy test. ganymede# zpool status rv64imafdc pool: rv64imafdc state: ONLINE scan: none requested config: NAME STATE READ WRITE CKSUM rv64imafdc ONLINE 0 0 0 vtbd1 ONLINE 0 0 0 errors: No known data errors ganymede# ganymede# zfs create -o quota=2G -o mountpoint=/z \ ? -o sharenfs=off -o checksum=sha256 -o compression=on \ ? -o atime=on -o devices=off -o exec=off -o setuid=on \ ? -o xattr=on rv64imafdc/z ganymede# ganymede# zfs list NAME USED AVAIL REFER MOUNTPOINT rv64imafdc 142K 3.62G 23K none rv64imafdc/z 23K 2.00G 23K /z ganymede# ganymede# zfs create -o quota=1G -o mountpoint=/z/000 \ ? -o sharenfs=off -o checksum=sha256 -o compression=on \ ? -o atime=on -o devices=off -o exec=on -o setuid=on \ ? -o xattr=on rv64imafdc/z/000 ganymede# ganymede# zfs list NAME USED AVAIL REFER MOUNTPOINT rv64imafdc 182K 3.62G 23K none rv64imafdc/z 46K 2.00G 23K /z rv64imafdc/z/000 23K 1024M 23K /z/000 ganymede# ganymede# zfs create -o quota=1G -o mountpoint=/z/001 \ ? -o sharenfs=off -o checksum=sha256 -o compression=on \ ? -o atime=on -o devices=off -o exec=on -o setuid=on \ ? -o xattr=on rv64imafdc/z/001 ganymede# ganymede# ganymede# zfs snapshot -r rv64imafdc@`( /bin/date '+%Y%m%d%H%M%S' )` ganymede# ganymede# ganymede# zfs list -t all NAME USED AVAIL REFER MOUNTPOINT rv64imafdc 239K 3.62G 23K none rv64imafdc@20190523135048 0 - 23K - rv64imafdc/z 69K 2.00G 23K /z rv64imafdc/z@20190523135048 0 - 23K - rv64imafdc/z/000 23K 1024M 23K /z/000 rv64imafdc/z/000@20190523135048 0 - 23K - rv64imafdc/z/001 23K 1024M 23K /z/001 rv64imafdc/z/001@20190523135048 0 - 23K - ganymede# Other trivial things done with the toy zpool. Then at shutdown : ganymede# ganymede# shutdown -h 'now' Shutdown NOW! shutdown: [pid 770] ganymede# *** FINAL System shutdown message from root@ganymede *** System going down IMMEDIATELY System shutdown time has arrived May 23 14:32:03 ganymede shutdown[770]: halt by root: Stopping cron. Waiting for PIDS: 552. Stopping sshd. Waiting for PIDS: 542. Stopping devd. Waiting for PIDS: 286. Writing entropy file:. Writing early boot entropy file:. . Terminated May 23 14:32:39 ganymede syslogd: exiting on signal 15 Waiting (max 60 seconds) for system process `vnlru' to stop... done Waiting (max 60 seconds) for system process `syncer' to stop... Syncing disks, vnodes remaining... 0 0 0 done Waiting (max 60 seconds) for system thread `bufdaemon' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-0' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-1' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-2' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-3' to stop... done All buffers synced. timeout stopping cpus panic: solaris assert: zrl->zr_refcount == 0 (0x10 == 0x0), file: /usr/src/head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zrlock.c, line: 65 cpuid = 2 time = 1558621965 KDB: stack backtrace: db_trace_self() at db_read_token+0x5e2 pc = 0xffffffc00046539a ra = 0xffffffc0000c1d52 sp = 0xffffffc041ef37a0 fp = 0xffffffc041ef39c0 db_read_token() at kdb_backtrace+0x2e pc = 0xffffffc0000c1d52 ra = 0xffffffc0002156ea sp = 0xffffffc041ef39c0 fp = 0xffffffc041ef39d0 kdb_backtrace() at vpanic+0x15a pc = 0xffffffc0002156ea ra = 0xffffffc0001d9f8c sp = 0xffffffc041ef39d0 fp = 0xffffffc041ef3a10 vpanic() at panic+0x22 pc = 0xffffffc0001d9f8c ra = 0xffffffc0001d9fc4 sp = 0xffffffc041ef3a10 fp = 0xffffffc041ef3a30 panic() at assfail3+0x18 pc = 0xffffffc0001d9fc4 ra = 0xffffffc001651590 sp = 0xffffffc041ef3a30 fp = 0xffffffd005c86368 KDB: enter: panic [ thread pid 0 tid 100098 ] Stopped at kdb_enter+0x3a: c.ebreak db> db> bt Tracing pid 0 tid 100098 td 0xffffffd005a1f000 db_trace_self() at db_trace_thread+0x3c pc = 0xffffffc00046539a ra = 0xffffffc00046542c sp = 0xffffffc041ef3448 fp = 0xffffffc041ef3478 db_trace_thread() at db_skip_to_eol+0x300 pc = 0xffffffc00046542c ra = 0xffffffc0000bf8e4 sp = 0xffffffc041ef3478 fp = 0xffffffc041ef34a8 db_skip_to_eol() at db_skip_to_eol+0x414 pc = 0xffffffc0000bf8e4 ra = 0xffffffc0000bf9f8 sp = 0xffffffc041ef34a8 fp = 0xffffffc041ef3598 db_skip_to_eol() at db_command_loop+0x74 pc = 0xffffffc0000bf9f8 ra = 0xffffffc0000bff28 sp = 0xffffffc041ef3598 fp = 0xffffffc041ef35d8 db_command_loop() at db_read_token+0x4ba pc = 0xffffffc0000bff28 ra = 0xffffffc0000c1c2a sp = 0xffffffc041ef35d8 fp = 0xffffffc041ef3818 db_read_token() at kdb_trap+0x122 pc = 0xffffffc0000c1c2a ra = 0xffffffc000215e1c sp = 0xffffffc041ef3818 fp = 0xffffffc041ef3878 kdb_trap() at do_trap_supervisor+0x96 pc = 0xffffffc000215e1c ra = 0xffffffc00046f064 sp = 0xffffffc041ef3878 fp = 0xffffffc041ef3898 do_trap_supervisor() at cpu_exception_handler_supervisor+0x68 pc = 0xffffffc00046f064 ra = 0xffffffc000465a28 sp = 0xffffffc041ef3898 fp = 0xffffffc041ef39d0 cpu_exception_handler_supervisor() at vpanic+0x16a pc = 0xffffffc000465a28 ra = 0xffffffc0001d9f9c sp = 0xffffffc041ef39d0 fp = 0xffffffc041ef3a10 vpanic() at panic+0x22 pc = 0xffffffc0001d9f9c ra = 0xffffffc0001d9fc4 sp = 0xffffffc041ef3a10 fp = 0xffffffc041ef3a30 panic() at assfail3+0x18 pc = 0xffffffc0001d9fc4 ra = 0xffffffc001651590 sp = 0xffffffc041ef3a30 fp = 0xffffffd005c86368 db> ps pid ppid pgrp uid state wmesg wchan cmd 625 0 0 0 DL (threaded) [zfskern] 100078 D arc_rec 0xffffffc0015c31a0 [arc_reclaim_thread] 100097 D arc_dnl 0xffffffc0015c3258 [arc_dnlc_evicts_thr] 100099 D dbuf_ev 0xffffffc0015c8ae8 [dbuf_evict_thread] 100116 D l2arc_f 0xffffffc0015c32e0 [l2arc_feed_thread] 100346 D spa->sp 0xffffffc00169dd18 [trim rv64imafdc] 100357 D tx->tx_ 0xffffffd0017f3aa8 [txg_thread_enter] 100358 D tx->tx_ 0xffffffd0017f3a88 [txg_thread_enter] 100359 D t->zthr 0xffffffd0017c9228 [solthread 0xffffffc] 100360 D t->zthr 0xffffffd0039f9ba8 [solthread 0xffffffc] 21 0 0 0 DL - 0xffffffc00063dc24 [soaiod4] 20 0 0 0 DL - 0xffffffc00063dc24 [soaiod3] 19 0 0 0 DL - 0xffffffc00063dc24 [soaiod2] 18 0 0 0 DL - 0xffffffc00063dc24 [soaiod1] 17 0 0 0 DL kpsusp 0xffffffd0013e86c0 [vnlru] 16 0 0 0 DL kpsusp 0xffffffd0013e8bd8 [syncer] 15 0 0 0 DL (threaded) [bufdaemon] 100044 D ktsusp 0xffffffd0017a665c [bufdaemon] 100053 D ktsusp 0xffffffd0038bf65c [bufspacedaemon-0] 100058 D ktsusp 0xffffffd0038bf0fc [bufspacedaemon-1] 100059 D ktsusp 0xffffffd0038be65c [bufspacedaemon-2] 100060 D ktsusp 0xffffffd0038be0fc [bufspacedaemon-3] 14 0 0 0 DL psleep 0xffffffc00063e9d8 [vmdaemon] 9 0 0 0 DL (threaded) [pagedaemon] 100042 D psleep 0xffffffc0006a7e18 [dom0] 100051 D launds 0xffffffc0006a7e24 [laundry: dom0] 100054 D umarcl 0xffffffc000437524 [uma] 8 0 0 0 DL - 0xffffffc000642a38 [rand_harvestq] 7 0 0 0 DL waiting 0xffffffc0006a38d0 [sctp_iterator] 6 0 0 0 DL crypto_ 0xffffffd0013e1150 [crypto returns 3] 5 0 0 0 DL crypto_ 0xffffffd0013e10f0 [crypto returns 2] 4 0 0 0 DL crypto_ 0xffffffd0013e1090 [crypto returns 1] 3 0 0 0 DL crypto_ 0xffffffd0013e1030 [crypto returns 0] 2 0 0 0 DL crypto_ 0xffffffc0006a58b0 [crypto] 13 0 0 0 DL (threaded) [geom] 100020 D - 0xffffffc00063d298 [g_event] 100021 D - 0xffffffc00063d290 [g_up] 100022 D - 0xffffffc00063d288 [g_down] 12 0 0 0 WL (threaded) [intr] 100007 I [swi6: Giant taskq] 100008 I [swi6: task queue] 100012 I [swi5: fast taskq] 100014 I [swi1: netisr 0] 100015 I [swi3: vm] 100016 I [swi4: clock (0)] 100017 I [swi4: clock (1)] 100018 I [swi4: clock (2)] 100019 I [swi4: clock (3)] 100033 I [swi0: uart] 100034 I [plic0,7: +] 100035 I [plic0,6: +] 100036 I [plic0,5: +] 11 0 0 0 RL (threaded) [idle] 100003 CanRun [idle: cpu0] 100004 Run CPU 1 [idle: cpu1] 100005 CanRun [idle: cpu2] 100006 Run CPU 3 [idle: cpu3] 1 0 1 0 RLs CPU 0 [init] 10 0 0 0 DL audit_w 0xffffffc0006a5dd0 [audit] 0 0 0 0 RLs (threaded) [kernel] 100000 D swapin 0xffffffc00065dea0 [swapper] 100009 D - 0xffffffd00136ad00 [aiod_kick taskq] 100010 D - 0xffffffd00136fd00 [config_0] 100011 D - 0xffffffd00136ac00 [kqueue_ctx taskq] 100013 D - 0xffffffd00136aa00 [thread taskq] 100023 D - 0xffffffd00136a800 [firmware taskq] 100024 D - 0xffffffd00136a700 [crypto_0] 100025 D - 0xffffffd00136a700 [crypto_1] 100026 D - 0xffffffd00136a700 [crypto_2] 100027 D - 0xffffffd00136a700 [crypto_3] 100037 D - 0xffffffd00136a500 [vtnet0 rxq 0] 100038 D - 0xffffffd00136a400 [vtnet0 txq 0] 100039 D - 0xffffffc00067093a [deadlkres] 100047 D - 0xffffffd00136e900 [softirq_0] 100048 D - 0xffffffd00136e800 [softirq_1] 100049 D - 0xffffffd00136e700 [softirq_2] 100050 D - 0xffffffd00136e600 [softirq_3] 100089 D - 0xffffffd0059ed200 [system_taskq_0] 100090 D - 0xffffffd0059ed200 [system_taskq_1] 100091 D - 0xffffffd0059ed200 [system_taskq_2] 100092 D - 0xffffffd0059ed200 [system_taskq_3] 100093 D - 0xffffffd0059ed100 [arc_prune_0] 100094 D - 0xffffffd0059ed100 [arc_prune_1] 100095 D - 0xffffffd0059ed100 [arc_prune_2] 100096 D - 0xffffffd0059ed100 [arc_prune_3] 100098 Run CPU 2 [dbu_evict] 100100 D - 0xffffffd0059ece00 [z_vdev_file_0] 100101 D - 0xffffffd0059ece00 [z_vdev_file_1] 100102 D - 0xffffffd0059ece00 [z_vdev_file_2] 100103 D - 0xffffffd0059ece00 [z_vdev_file_3] 100104 D - 0xffffffd0059ece00 [z_vdev_file_4] 100105 D - 0xffffffd0059ece00 [z_vdev_file_5] 100106 D - 0xffffffd0059ece00 [z_vdev_file_6] 100107 D - 0xffffffd0059ece00 [z_vdev_file_7] 100108 D - 0xffffffd0059ece00 [z_vdev_file_8] 100109 D - 0xffffffd0059ece00 [z_vdev_file_9] 100110 D - 0xffffffd0059ece00 [z_vdev_file_10] 100111 D - 0xffffffd0059ece00 [z_vdev_file_11] 100112 D - 0xffffffd0059ece00 [z_vdev_file_12] 100113 D - 0xffffffd0059ece00 [z_vdev_file_13] 100114 D - 0xffffffd0059ece00 [z_vdev_file_14] 100115 D - 0xffffffd0059ece00 [z_vdev_file_15] 100117 D - 0xffffffd0059ecd00 [zfsvfs] 100118 D - 0xffffffd0059ecc00 [zio_null_issue] 100119 D - 0xffffffd0059ecb00 [zio_null_intr] 100120 D - 0xffffffd0059eca00 [zio_read_issue_0] 100121 D - 0xffffffd0059eca00 [zio_read_issue_1] 100122 D - 0xffffffd0059eca00 [zio_read_issue_2] 100123 D - 0xffffffd0059eca00 [zio_read_issue_3] 100124 D - 0xffffffd0059eca00 [zio_read_issue_4] 100125 D - 0xffffffd0059eca00 [zio_read_issue_5] 100126 D - 0xffffffd0059eca00 [zio_read_issue_6] 100127 D - 0xffffffd0059eca00 [zio_read_issue_7] 100128 D - 0xffffffd0059ec900 [zio_read_intr_0_0] 100129 D - 0xffffffd0059ec900 [zio_read_intr_0_1] 100130 D - 0xffffffd0059ec900 [zio_read_intr_0_2] 100131 D - 0xffffffd0059ec900 [zio_read_intr_0_3] 100132 D - 0xffffffd0059ec900 [zio_read_intr_0_4] 100133 D - 0xffffffd0059ec900 [zio_read_intr_0_5] 100134 D - 0xffffffd0059ec900 [zio_read_intr_0_6] 100135 D - 0xffffffd0059ec900 [zio_read_intr_0_7] 100136 D - 0xffffffd0059ec900 [zio_read_intr_0_8] 100137 D - 0xffffffd0059ec900 [zio_read_intr_0_9] 100138 D - 0xffffffd0059ec900 [zio_read_intr_0_10] 100139 D - 0xffffffd0059ec900 [zio_read_intr_0_11] 100140 D - 0xffffffd0059ec800 [zio_read_intr_1_0] 100141 D - 0xffffffd0059ec800 [zio_read_intr_1_1] 100142 D - 0xffffffd0059ec800 [zio_read_intr_1_2] 100143 D - 0xffffffd0059ec800 [zio_read_intr_1_3] 100144 D - 0xffffffd0059ec800 [zio_read_intr_1_4] 100145 D - 0xffffffd0059ec800 [zio_read_intr_1_5] 100146 D - 0xffffffd0059ec800 [zio_read_intr_1_6] 100147 D - 0xffffffd0059ec800 [zio_read_intr_1_7] 100148 D - 0xffffffd0059ec800 [zio_read_intr_1_8] 100149 D - 0xffffffd0059ec800 [zio_read_intr_1_9] 100150 D - 0xffffffd0059ec800 [zio_read_intr_1_10] 100151 D - 0xffffffd0059ec800 [zio_read_intr_1_11] 100152 D - 0xffffffd0059ec700 [zio_read_intr_2_0] 100153 D - 0xffffffd0059ec700 [zio_read_intr_2_1] 100154 D - 0xffffffd0059ec700 [zio_read_intr_2_2] 100155 D - 0xffffffd0059ec700 [zio_read_intr_2_3] 100156 D - 0xffffffd0059ec700 [zio_read_intr_2_4] 100157 D - 0xffffffd0059ec700 [zio_read_intr_2_5] 100158 D - 0xffffffd0059ec700 [zio_read_intr_2_6] 100159 D - 0xffffffd0059ec700 [zio_read_intr_2_7] 100160 D - 0xffffffd0059ec700 [zio_read_intr_2_8] 100161 D - 0xffffffd0059ec700 [zio_read_intr_2_9] 100162 D - 0xffffffd0059ec700 [zio_read_intr_2_10] 100163 D - 0xffffffd0059ec700 [zio_read_intr_2_11] 100164 D - 0xffffffd0059ec600 [zio_read_intr_3_0] 100165 D - 0xffffffd0059ec600 [zio_read_intr_3_1] 100166 D - 0xffffffd0059ec600 [zio_read_intr_3_2] 100167 D - 0xffffffd0059ec600 [zio_read_intr_3_3] 100168 D - 0xffffffd0059ec600 [zio_read_intr_3_4] 100169 D - 0xffffffd0059ec600 [zio_read_intr_3_5] 100170 D - 0xffffffd0059ec600 [zio_read_intr_3_6] 100171 D - 0xffffffd0059ec600 [zio_read_intr_3_7] 100172 D - 0xffffffd0059ec600 [zio_read_intr_3_8] 100173 D - 0xffffffd0059ec600 [zio_read_intr_3_9] 100174 D - 0xffffffd0059ec600 [zio_read_intr_3_10] 100175 D - 0xffffffd0059ec600 [zio_read_intr_3_11] 100176 D - 0xffffffd0059ec500 [zio_read_intr_4_0] 100177 D - 0xffffffd0059ec500 [zio_read_intr_4_1] 100178 D - 0xffffffd0059ec500 [zio_read_intr_4_2] 100179 D - 0xffffffd0059ec500 [zio_read_intr_4_3] 100180 D - 0xffffffd0059ec500 [zio_read_intr_4_4] 100181 D - 0xffffffd0059ec500 [zio_read_intr_4_5] 100182 D - 0xffffffd0059ec500 [zio_read_intr_4_6] 100183 D - 0xffffffd0059ec500 [zio_read_intr_4_7] 100184 D - 0xffffffd0059ec500 [zio_read_intr_4_8] 100185 D - 0xffffffd0059ec500 [zio_read_intr_4_9] 100186 D - 0xffffffd0059ec500 [zio_read_intr_4_10] 100187 D - 0xffffffd0059ec500 [zio_read_intr_4_11] 100188 D - 0xffffffd0059ec400 [zio_read_intr_5_0] 100189 D - 0xffffffd0059ec400 [zio_read_intr_5_1] 100190 D - 0xffffffd0059ec400 [zio_read_intr_5_2] 100191 D - 0xffffffd0059ec400 [zio_read_intr_5_3] 100192 D - 0xffffffd0059ec400 [zio_read_intr_5_4] 100193 D - 0xffffffd0059ec400 [zio_read_intr_5_5] 100194 D - 0xffffffd0059ec400 [zio_read_intr_5_6] 100195 D - 0xffffffd0059ec400 [zio_read_intr_5_7] 100196 D - 0xffffffd0059ec400 [zio_read_intr_5_8] 100197 D - 0xffffffd0059ec400 [zio_read_intr_5_9] 100198 D - 0xffffffd0059ec400 [zio_read_intr_5_10] 100199 D - 0xffffffd0059ec400 [zio_read_intr_5_11] 100200 D - 0xffffffd0059ec300 [zio_read_intr_6_0] 100201 D - 0xffffffd0059ec300 [zio_read_intr_6_1] 100202 D - 0xffffffd0059ec300 [zio_read_intr_6_2] 100203 D - 0xffffffd0059ec300 [zio_read_intr_6_3] 100204 D - 0xffffffd0059ec300 [zio_read_intr_6_4] 100205 D - 0xffffffd0059ec300 [zio_read_intr_6_5] 100206 D - 0xffffffd0059ec300 [zio_read_intr_6_6] 100207 D - 0xffffffd0059ec300 [zio_read_intr_6_7] 100208 D - 0xffffffd0059ec300 [zio_read_intr_6_8] 100209 D - 0xffffffd0059ec300 [zio_read_intr_6_9] 100210 D - 0xffffffd0059ec300 [zio_read_intr_6_10] 100211 D - 0xffffffd0059ec300 [zio_read_intr_6_11] 100212 D - 0xffffffd0059ec200 [zio_read_intr_7_0] 100213 D - 0xffffffd0059ec200 [zio_read_intr_7_1] 100214 D - 0xffffffd0059ec200 [zio_read_intr_7_2] 100215 D - 0xffffffd0059ec200 [zio_read_intr_7_3] 100216 D - 0xffffffd0059ec200 [zio_read_intr_7_4] 100217 D - 0xffffffd0059ec200 [zio_read_intr_7_5] 100218 D - 0xffffffd0059ec200 [zio_read_intr_7_6] 100219 D - 0xffffffd0059ec200 [zio_read_intr_7_7] 100220 D - 0xffffffd0059ec200 [zio_read_intr_7_8] 100221 D - 0xffffffd0059ec200 [zio_read_intr_7_9] 100222 D - 0xffffffd0059ec200 [zio_read_intr_7_10] 100223 D - 0xffffffd0059ec200 [zio_read_intr_7_11] 100224 D - 0xffffffd0059ec100 [zio_write_issue_0] 100225 D - 0xffffffd0059ec100 [zio_write_issue_1] 100226 D - 0xffffffd0059ec100 [zio_write_issue_2] 100227 D - 0xffffffd0059ec000 [zio_write_issue_hig] 100228 D - 0xffffffd0059ec000 [zio_write_issue_hig] 100229 D - 0xffffffd0059ec000 [zio_write_issue_hig] 100230 D - 0xffffffd0059ec000 [zio_write_issue_hig] 100231 D - 0xffffffd0059ec000 [zio_write_issue_hig] 100232 D - 0xffffffd0059ebe00 [zio_write_intr_0] 100233 D - 0xffffffd0059ebe00 [zio_write_intr_1] 100234 D - 0xffffffd0059ebe00 [zio_write_intr_2] 100235 D - 0xffffffd0059ebe00 [zio_write_intr_3] 100236 D - 0xffffffd0059ebe00 [zio_write_intr_4] 100237 D - 0xffffffd0059ebe00 [zio_write_intr_5] 100238 D - 0xffffffd0059ebe00 [zio_write_intr_6] 100239 D - 0xffffffd0059ebe00 [zio_write_intr_7] 100240 D - 0xffffffd0059ebd00 [zio_write_intr_high] 100241 D - 0xffffffd0059ebd00 [zio_write_intr_high] 100242 D - 0xffffffd0059ebd00 [zio_write_intr_high] 100243 D - 0xffffffd0059ebd00 [zio_write_intr_high] 100244 D - 0xffffffd0059ebd00 [zio_write_intr_high] 100245 D - 0xffffffd0059ebc00 [zio_free_issue_0_0] 100246 D - 0xffffffd0059ebc00 [zio_free_issue_0_1] 100247 D - 0xffffffd0059ebc00 [zio_free_issue_0_2] 100248 D - 0xffffffd0059ebc00 [zio_free_issue_0_3] 100249 D - 0xffffffd0059ebc00 [zio_free_issue_0_4] 100250 D - 0xffffffd0059ebc00 [zio_free_issue_0_5] 100251 D - 0xffffffd0059ebc00 [zio_free_issue_0_6] 100252 D - 0xffffffd0059ebc00 [zio_free_issue_0_7] 100253 D - 0xffffffd0059ebc00 [zio_free_issue_0_8] 100254 D - 0xffffffd0059ebc00 [zio_free_issue_0_9] 100255 D - 0xffffffd0059ebc00 [zio_free_issue_0_10] 100256 D - 0xffffffd0059ebc00 [zio_free_issue_0_11] 100257 D - 0xffffffd0059ebb00 [zio_free_issue_1_0] 100258 D - 0xffffffd0059ebb00 [zio_free_issue_1_1] 100259 D - 0xffffffd0059ebb00 [zio_free_issue_1_2] 100260 D - 0xffffffd0059ebb00 [zio_free_issue_1_3] 100261 D - 0xffffffd0059ebb00 [zio_free_issue_1_4] 100262 D - 0xffffffd0059ebb00 [zio_free_issue_1_5] 100263 D - 0xffffffd0059ebb00 [zio_free_issue_1_6] 100264 D - 0xffffffd0059ebb00 [zio_free_issue_1_7] 100265 D - 0xffffffd0059ebb00 [zio_free_issue_1_8] 100266 D - 0xffffffd0059ebb00 [zio_free_issue_1_9] 100267 D - 0xffffffd0059ebb00 [zio_free_issue_1_10] 100268 D - 0xffffffd0059ebb00 [zio_free_issue_1_11] 100269 D - 0xffffffd0059eba00 [zio_free_issue_2_0] 100270 D - 0xffffffd0059eba00 [zio_free_issue_2_1] 100271 D - 0xffffffd0059eba00 [zio_free_issue_2_2] 100272 D - 0xffffffd0059eba00 [zio_free_issue_2_3] 100273 D - 0xffffffd0059eba00 [zio_free_issue_2_4] 100274 D - 0xffffffd0059eba00 [zio_free_issue_2_5] 100275 D - 0xffffffd0059eba00 [zio_free_issue_2_6] 100276 D - 0xffffffd0059eba00 [zio_free_issue_2_7] 100277 D - 0xffffffd0059eba00 [zio_free_issue_2_8] 100278 D - 0xffffffd0059eba00 [zio_free_issue_2_9] 100279 D - 0xffffffd0059eba00 [zio_free_issue_2_10] 100280 D - 0xffffffd0059eba00 [zio_free_issue_2_11] 100281 D - 0xffffffd0059eb900 [zio_free_issue_3_0] 100282 D - 0xffffffd0059eb900 [zio_free_issue_3_1] 100283 D - 0xffffffd0059eb900 [zio_free_issue_3_2] 100284 D - 0xffffffd0059eb900 [zio_free_issue_3_3] 100285 D - 0xffffffd0059eb900 [zio_free_issue_3_4] 100286 D - 0xffffffd0059eb900 [zio_free_issue_3_5] 100287 D - 0xffffffd0059eb900 [zio_free_issue_3_6] 100288 D - 0xffffffd0059eb900 [zio_free_issue_3_7] 100289 D - 0xffffffd0059eb900 [zio_free_issue_3_8] 100290 D - 0xffffffd0059eb900 [zio_free_issue_3_9] 100291 D - 0xffffffd0059eb900 [zio_free_issue_3_10] 100292 D - 0xffffffd0059eb900 [zio_free_issue_3_11] 100293 D - 0xffffffd0059eb800 [zio_free_issue_4_0] 100294 D - 0xffffffd0059eb800 [zio_free_issue_4_1] 100295 D - 0xffffffd0059eb800 [zio_free_issue_4_2] 100296 D - 0xffffffd0059eb800 [zio_free_issue_4_3] 100297 D - 0xffffffd0059eb800 [zio_free_issue_4_4] 100298 D - 0xffffffd0059eb800 [zio_free_issue_4_5] 100299 D - 0xffffffd0059eb800 [zio_free_issue_4_6] 100300 D - 0xffffffd0059eb800 [zio_free_issue_4_7] 100301 D - 0xffffffd0059eb800 [zio_free_issue_4_8] 100302 D - 0xffffffd0059eb800 [zio_free_issue_4_9] 100303 D - 0xffffffd0059eb800 [zio_free_issue_4_10] 100304 D - 0xffffffd0059eb800 [zio_free_issue_4_11] 100305 D - 0xffffffd0059eb700 [zio_free_issue_5_0] 100306 D - 0xffffffd0059eb700 [zio_free_issue_5_1] 100307 D - 0xffffffd0059eb700 [zio_free_issue_5_2] 100308 D - 0xffffffd0059eb700 [zio_free_issue_5_3] 100309 D - 0xffffffd0059eb700 [zio_free_issue_5_4] 100310 D - 0xffffffd0059eb700 [zio_free_issue_5_5] 100311 D - 0xffffffd0059eb700 [zio_free_issue_5_6] 100312 D - 0xffffffd0059eb700 [zio_free_issue_5_7] 100313 D - 0xffffffd0059eb700 [zio_free_issue_5_8] 100314 D - 0xffffffd0059eb700 [zio_free_issue_5_9] 100315 D - 0xffffffd0059eb700 [zio_free_issue_5_10] 100316 D - 0xffffffd0059eb700 [zio_free_issue_5_11] 100317 D - 0xffffffd0059eb600 [zio_free_issue_6_0] 100318 D - 0xffffffd0059eb600 [zio_free_issue_6_1] 100319 D - 0xffffffd0059eb600 [zio_free_issue_6_2] 100320 D - 0xffffffd0059eb600 [zio_free_issue_6_3] 100321 D - 0xffffffd0059eb600 [zio_free_issue_6_4] 100322 D - 0xffffffd0059eb600 [zio_free_issue_6_5] 100323 D - 0xffffffd0059eb600 [zio_free_issue_6_6] 100324 D - 0xffffffd0059eb600 [zio_free_issue_6_7] 100325 D - 0xffffffd0059eb600 [zio_free_issue_6_8] 100326 D - 0xffffffd0059eb600 [zio_free_issue_6_9] 100327 D - 0xffffffd0059eb600 [zio_free_issue_6_10] 100328 D - 0xffffffd0059eb600 [zio_free_issue_6_11] 100329 D - 0xffffffd0059eb500 [zio_free_issue_7_0] 100330 D - 0xffffffd0059eb500 [zio_free_issue_7_1] 100331 D - 0xffffffd0059eb500 [zio_free_issue_7_2] 100332 D - 0xffffffd0059eb500 [zio_free_issue_7_3] 100333 D - 0xffffffd0059eb500 [zio_free_issue_7_4] 100334 D - 0xffffffd0059eb500 [zio_free_issue_7_5] 100335 D - 0xffffffd0059eb500 [zio_free_issue_7_6] 100336 D - 0xffffffd0059eb500 [zio_free_issue_7_7] 100337 D - 0xffffffd0059eb500 [zio_free_issue_7_8] 100338 D - 0xffffffd0059eb500 [zio_free_issue_7_9] 100339 D - 0xffffffd0059eb500 [zio_free_issue_7_10] 100340 D - 0xffffffd0059eb500 [zio_free_issue_7_11] 100341 D - 0xffffffd0059eb400 [zio_free_intr] 100342 D - 0xffffffd0059eb300 [zio_claim_issue] 100343 D - 0xffffffd0059eb200 [zio_claim_intr] 100344 D - 0xffffffd0059eb100 [zio_ioctl_issue] 100345 D - 0xffffffd0059eb000 [zio_ioctl_intr] 100347 D - 0xffffffd0059eae00 [metaslab_group_task] 100348 D - 0xffffffd0059eae00 [metaslab_group_task] 100349 D - 0xffffffd001369400 [dp_sync_taskq_0] 100350 D - 0xffffffd001369400 [dp_sync_taskq_1] 100351 D - 0xffffffd001369400 [dp_sync_taskq_2] 100352 D - 0xffffffd001369300 [dp_zil_clean_taskq_] 100353 D - 0xffffffd001369300 [dp_zil_clean_taskq_] 100354 D - 0xffffffd001369300 [dp_zil_clean_taskq_] 100355 D - 0xffffffd001369300 [dp_zil_clean_taskq_] 100356 D - 0xffffffd001369200 [zfs_vn_rele_taskq] db> db> halt panic: cpu_halt cpuid = 2 time = 1558621965 KDB: stack backtrace: db_trace_self() at db_read_token+0x5e2 pc = 0xffffffc00046539a ra = 0xffffffc0000c1d52 sp = 0xffffffc041ef31b8 fp = 0xffffffc041ef33d8 db_read_token() at kdb_backtrace+0x2e pc = 0xffffffc0000c1d52 ra = 0xffffffc0002156ea sp = 0xffffffc041ef33d8 fp = 0xffffffc041ef33e8 kdb_backtrace() at vpanic+0x15a pc = 0xffffffc0002156ea ra = 0xffffffc0001d9f8c sp = 0xffffffc041ef33e8 fp = 0xffffffc041ef3428 vpanic() at panic+0x22 pc = 0xffffffc0001d9f8c ra = 0xffffffc0001d9fc4 sp = 0xffffffc041ef3428 fp = 0xffffffc041ef3448 panic() at cpu_halt+0x14 pc = 0xffffffc0001d9fc4 ra = 0xffffffc0004668f0 sp = 0xffffffc041ef3448 fp = 0xffffffc041ef3498 cpu_halt() at db_capture_cmd+0x296 pc = 0xffffffc0004668f0 ra = 0xffffffc0000bf47a sp = 0xffffffc041ef3498 fp = 0xffffffc041ef34a8 db_capture_cmd() at db_skip_to_eol+0x414 pc = 0xffffffc0000bf47a ra = 0xffffffc0000bf9f8 sp = 0xffffffc041ef34a8 fp = 0xffffffc041ef3598 db_skip_to_eol() at db_command_loop+0x74 pc = 0xffffffc0000bf9f8 ra = 0xffffffc0000bff28 sp = 0xffffffc041ef3598 fp = 0xffffffc041ef35d8 db_command_loop() at db_read_token+0x4ba pc = 0xffffffc0000bff28 ra = 0xffffffc0000c1c2a sp = 0xffffffc041ef35d8 fp = 0xffffffc041ef3818 db_read_token() at kdb_trap+0x122 pc = 0xffffffc0000c1c2a ra = 0xffffffc000215e1c sp = 0xffffffc041ef3818 fp = 0xffffffc041ef3878 kdb_trap() at do_trap_supervisor+0x96 pc = 0xffffffc000215e1c ra = 0xffffffc00046f064 sp = 0xffffffc041ef3878 fp = 0xffffffc041ef3898 do_trap_supervisor() at cpu_exception_handler_supervisor+0x68 pc = 0xffffffc00046f064 ra = 0xffffffc000465a28 sp = 0xffffffc041ef3898 fp = 0xffffffc041ef39d0 cpu_exception_handler_supervisor() at vpanic+0x16a pc = 0xffffffc000465a28 ra = 0xffffffc0001d9f9c sp = 0xffffffc041ef39d0 fp = 0xffffffc041ef3a10 vpanic() at panic+0x22 pc = 0xffffffc0001d9f9c ra = 0xffffffc0001d9fc4 sp = 0xffffffc041ef3a10 fp = 0xffffffc041ef3a30 panic() at assfail3+0x18 pc = 0xffffffc0001d9fc4 ra = 0xffffffc001651590 sp = 0xffffffc041ef3a30 fp = 0xffffffd005c86368 Uptime: 59m54s Rebooting... Power off So I will come back here if there is a need after a full rebuild with the CURRENT sources. -- Dennis Clarke RISC-V/SPARC/PPC/ARM/CISC UNIX and Linux spoken GreyBeard and suspenders optional
I tried to push this a little further and could not get a panic again. Once is usually enough. However I did add some more devices to the zpool and tried to toss some activity at it. No panic seen on a reboot. ganymede# ganymede# zpool list NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT rv64imafdc 3.75G 863M 2.91G - - 6% 22% 1.00x ONLINE - ganymede# ganymede# zpool status pool: rv64imafdc state: ONLINE scan: scrub repaired 0 in 0 days 00:00:45 with 0 errors on Thu May 23 14:22:53 2019 config: NAME STATE READ WRITE CKSUM rv64imafdc ONLINE 0 0 0 vtbd1 ONLINE 0 0 0 errors: No known data errors ganymede# I added on a mirror. ganymede# /bin/date '+%Y%m%d%H%M%S' ; zpool status 20190523151823 pool: rv64imafdc state: ONLINE status: One or more devices is currently being resilvered. The pool will continue to function, possibly in a degraded state. action: Wait for the resilver to complete. scan: resilver in progress since Thu May 23 15:15:35 2019 861M scanned at 5.04M/s, 474M issued at 2.77M/s, 862M total 472M resilvered, 54.98% done, no estimated completion time config: NAME STATE READ WRITE CKSUM rv64imafdc ONLINE 0 0 0 mirror-0 ONLINE 0 0 0 vtbd1 ONLINE 0 0 0 vtbd2 ONLINE 0 0 0 errors: No known data errors ganymede# ganymede# /bin/date '+%Y%m%d%H%M%S' ; zpool status 20190523152328 pool: rv64imafdc state: ONLINE scan: resilvered 861M in 0 days 00:05:14 with 0 errors on Thu May 23 15:20:49 2019 config: NAME STATE READ WRITE CKSUM rv64imafdc ONLINE 0 0 0 mirror-0 ONLINE 0 0 0 vtbd1 ONLINE 0 0 0 vtbd2 ONLINE 0 0 0 errors: No known data errors ganymede# Add on a hot spare. ganymede# zpool add rv64imafdc spare vtbd3 ganymede# /bin/date '+%Y%m%d%H%M%S' ; zpool status 20190523152443 pool: rv64imafdc state: ONLINE scan: resilvered 861M in 0 days 00:05:14 with 0 errors on Thu May 23 15:20:49 2019 config: NAME STATE READ WRITE CKSUM rv64imafdc ONLINE 0 0 0 mirror-0 ONLINE 0 0 0 vtbd1 ONLINE 0 0 0 vtbd2 ONLINE 0 0 0 spares vtbd3 AVAIL errors: No known data errors ganymede# ganymede# ganymede# sysctl -a | grep '^Free' Free Memory: 12044436K ganymede# sysctl -a | grep '\.smp\.' kern.smp.maxid: 7 kern.smp.maxcpus: 16 kern.smp.active: 1 kern.smp.disabled: 0 kern.smp.cpus: 8 kern.smp.topology: 0 kern.smp.forward_signal_enabled: 1 ganymede# zpool list NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT rv64imafdc 3.75G 862M 2.91G - - 6% 22% 1.00x ONLINE - ganymede# zpool status pool: rv64imafdc state: ONLINE scan: resilvered 861M in 0 days 00:05:14 with 0 errors on Thu May 23 15:20:49 2019 config: NAME STATE READ WRITE CKSUM rv64imafdc ONLINE 0 0 0 mirror-0 ONLINE 0 0 0 vtbd1 ONLINE 0 0 0 vtbd2 ONLINE 0 0 0 spares vtbd3 AVAIL errors: No known data errors ganymede# ganymede# uname -a FreeBSD ganymede 13.0-CURRENT FreeBSD 13.0-CURRENT r341837 QEMU riscv ganymede# ganymede# zfs list NAME USED AVAIL REFER MOUNTPOINT rv64imafdc 862M 2.78G 23K none rv64imafdc/z 861M 1.16G 23K /z rv64imafdc/z/000 861M 163M 844M /z/000 rv64imafdc/z/001 23K 1024M 23K /z/001 ganymede# Did a shutdown and then boot. No problem seen. Really a full fresh build is needed. -- Dennis Clarke RISC-V/SPARC/PPC/ARM/CISC UNIX and Linux spoken GreyBeard and suspenders optional
Merely a follow up comment. Here is how I bring up the qemu virtual machine with a few extra disks in order to test zpool and zfs functions : qemu-system-riscv64 -nographic \ -machine virt -smp 8 -m 12G -kernel bbl \ -object rng-random,filename=/dev/urandom,id=rng0 \ -device virtio-rng-device,rng=rng0 \ -drive file=./20190107183100/riscv64_freebsd/disk.img,format=raw,index=0,media=disk,id=hd0 \ -device virtio-blk-device,drive=hd0 \ -drive file=/usr/local/build/qemu_qcow2_test/d0.img,format=raw,index=1,media=disk,id=hd1 \ -device virtio-blk-device,drive=hd1 \ -drive file=/usr/local/build/qemu_qcow2_test/d1.img,format=raw,index=2,media=disk,id=hd2 \ -device virtio-blk-device,drive=hd2 \ -drive file=/usr/local/build/qemu_qcow2_test/d2.img,format=raw,index=3,media=disk,id=hd3 \ -device virtio-blk-device,drive=hd3 \ -device virtio-net-device,netdev=usernet \ -netdev user,id=usernet,hostfwd=tcp::10000-:22
The panic is repeatable. Seems to need a few tries to hit it on shutdown ganymede# uname -a FreeBSD ganymede 13.0-CURRENT FreeBSD 13.0-CURRENT r341837 QEMU riscv ganymede# zpool status pool: rv64imafdc state: ONLINE scan: resilvered 861M in 0 days 00:05:14 with 0 errors on Thu May 23 15:20:49 2019 config: NAME STATE READ WRITE CKSUM rv64imafdc ONLINE 0 0 0 mirror-0 ONLINE 0 0 0 vtbd1 ONLINE 0 0 0 vtbd2 ONLINE 0 0 0 spares vtbd3 AVAIL errors: No known data errors ganymede# sysctl hw.machine hw.ncpu hw.machine_arch kern.smp.maxcpus kern.smp.active kern.smp.disabled kern.smp.cpus hw.machine: riscv hw.ncpu: 8 hw.machine_arch: riscv64 kern.smp.maxcpus: 16 kern.smp.active: 1 kern.smp.disabled: 0 kern.smp.cpus: 8 ganymede# ganymede# uptime 12:07PM up 1 day, 21:03, 1 user, load averages: 0.41, 0.25, 0.18 ganymede# ganymede# ganymede# ganymede# shutdown -p 'now' Shutdown NOW! shutdown: [pid 6028] ganymede# *** FINAL System shutdown message from root@ganymede *** System going down IMMEDIATELY System shutdown time has arrived May 25 12:07:57 ganymede shutdown[6028]: power-down by root: Stopping cron. Waiting for PIDS: 561. Stopping sshd. Waiting for PIDS: 551. 90 second watchdog timeout expired. Shutdown terminated. Sat May 25 12:09:34 UTC 2019 May 25 12:09:35 ganymede syslogd: exiting on signal 15 Waiting (max 60 seconds) for system process `vnlru' to stop... done Waiting (max 60 seconds) for system process `syncer' to stop... Syncing disks, vnodes remaining... 0 0 0 done Waiting (max 60 seconds) for system thread `bufdaemon' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-0' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-1' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-2' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-3' to stop... done Waiting (max 60 seconds) for system thread `bufspacedaemon-4' to stop... done All buffers synced. Uptime: 1d21h5m29s panic: solaris assert: zrl->zr_refcount == 0 (0x3 == 0x0), file: /usr/src/head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zrlock.c, line: 65 cpuid = 1 time = 1558786186 KDB: stack backtrace: db_trace_self() at db_read_token+0x5e2 pc = 0xffffffc00046539a ra = 0xffffffc0000c1d52 sp = 0xffffffc06f85c7a0 fp = 0xffffffc06f85c9c0 db_read_token() at kdb_backtrace+0x2e pc = 0xffffffc0000c1d52 ra = 0xffffffc0002156ea sp = 0xffffffc06f85c9c0 fp = 0xffffffc06f85c9d0 kdb_backtrace() at vpanic+0x15a pc = 0xffffffc0002156ea ra = 0xffffffc0001d9f8c sp = 0xffffffc06f85c9d0 fp = 0xffffffc06f85ca10 vpanic() at panic+0x22 pc = 0xffffffc0001d9f8c ra = 0xffffffc0001d9fc4 sp = 0xffffffc06f85ca10 fp = 0xffffffc06f85ca30 panic() at assfail3+0x18 pc = 0xffffffc0001d9fc4 ra = 0xffffffc0013ec590 sp = 0xffffffc06f85ca30 fp = 0xffffffd005bb10a8 KDB: enter: panic [ thread pid 0 tid 100108 ] Stopped at kdb_enter+0x3a: c.ebreak db> bt Tracing pid 0 tid 100108 td 0xffffffd0058d0000 db_trace_self() at db_trace_thread+0x3c pc = 0xffffffc00046539a ra = 0xffffffc00046542c sp = 0xffffffc06f85c448 fp = 0xffffffc06f85c478 db_trace_thread() at db_skip_to_eol+0x300 pc = 0xffffffc00046542c ra = 0xffffffc0000bf8e4 sp = 0xffffffc06f85c478 fp = 0xffffffc06f85c4a8 db_skip_to_eol() at db_skip_to_eol+0x414 pc = 0xffffffc0000bf8e4 ra = 0xffffffc0000bf9f8 sp = 0xffffffc06f85c4a8 fp = 0xffffffc06f85c598 db_skip_to_eol() at db_command_loop+0x74 pc = 0xffffffc0000bf9f8 ra = 0xffffffc0000bff28 sp = 0xffffffc06f85c598 fp = 0xffffffc06f85c5d8 db_command_loop() at db_read_token+0x4ba pc = 0xffffffc0000bff28 ra = 0xffffffc0000c1c2a sp = 0xffffffc06f85c5d8 fp = 0xffffffc06f85c818 db_read_token() at kdb_trap+0x122 pc = 0xffffffc0000c1c2a ra = 0xffffffc000215e1c sp = 0xffffffc06f85c818 fp = 0xffffffc06f85c878 kdb_trap() at do_trap_supervisor+0x96 pc = 0xffffffc000215e1c ra = 0xffffffc00046f064 sp = 0xffffffc06f85c878 fp = 0xffffffc06f85c898 do_trap_supervisor() at cpu_exception_handler_supervisor+0x68 pc = 0xffffffc00046f064 ra = 0xffffffc000465a28 sp = 0xffffffc06f85c898 fp = 0xffffffc06f85c9d0 cpu_exception_handler_supervisor() at vpanic+0x16a pc = 0xffffffc000465a28 ra = 0xffffffc0001d9f9c sp = 0xffffffc06f85c9d0 fp = 0xffffffc06f85ca10 vpanic() at panic+0x22 pc = 0xffffffc0001d9f9c ra = 0xffffffc0001d9fc4 sp = 0xffffffc06f85ca10 fp = 0xffffffc06f85ca30 panic() at assfail3+0x18 pc = 0xffffffc0001d9fc4 ra = 0xffffffc0013ec590 sp = 0xffffffc06f85ca30 fp = 0xffffffd005bb10a8 db> halt panic: cpu_halt cpuid = 1 time = 1558786186 KDB: stack backtrace: db_trace_self() at db_read_token+0x5e2 pc = 0xffffffc00046539a ra = 0xffffffc0000c1d52 sp = 0xffffffc06f85c1b8 fp = 0xffffffc06f85c3d8 db_read_token() at kdb_backtrace+0x2e pc = 0xffffffc0000c1d52 ra = 0xffffffc0002156ea sp = 0xffffffc06f85c3d8 fp = 0xffffffc06f85c3e8 kdb_backtrace() at vpanic+0x15a pc = 0xffffffc0002156ea ra = 0xffffffc0001d9f8c sp = 0xffffffc06f85c3e8 fp = 0xffffffc06f85c428 vpanic() at panic+0x22 pc = 0xffffffc0001d9f8c ra = 0xffffffc0001d9fc4 sp = 0xffffffc06f85c428 fp = 0xffffffc06f85c448 panic() at cpu_halt+0x14 pc = 0xffffffc0001d9fc4 ra = 0xffffffc0004668f0 sp = 0xffffffc06f85c448 fp = 0xffffffc06f85c498 cpu_halt() at db_capture_cmd+0x296 pc = 0xffffffc0004668f0 ra = 0xffffffc0000bf47a sp = 0xffffffc06f85c498 fp = 0xffffffc06f85c4a8 db_capture_cmd() at db_skip_to_eol+0x414 pc = 0xffffffc0000bf47a ra = 0xffffffc0000bf9f8 sp = 0xffffffc06f85c4a8 fp = 0xffffffc06f85c598 db_skip_to_eol() at db_command_loop+0x74 pc = 0xffffffc0000bf9f8 ra = 0xffffffc0000bff28 sp = 0xffffffc06f85c598 fp = 0xffffffc06f85c5d8 db_command_loop() at db_read_token+0x4ba pc = 0xffffffc0000bff28 ra = 0xffffffc0000c1c2a sp = 0xffffffc06f85c5d8 fp = 0xffffffc06f85c818 db_read_token() at kdb_trap+0x122 pc = 0xffffffc0000c1c2a ra = 0xffffffc000215e1c sp = 0xffffffc06f85c818 fp = 0xffffffc06f85c878 kdb_trap() at do_trap_supervisor+0x96 pc = 0xffffffc000215e1c ra = 0xffffffc00046f064 sp = 0xffffffc06f85c878 fp = 0xffffffc06f85c898 do_trap_supervisor() at cpu_exception_handler_supervisor+0x68 pc = 0xffffffc00046f064 ra = 0xffffffc000465a28 sp = 0xffffffc06f85c898 fp = 0xffffffc06f85c9d0 cpu_exception_handler_supervisor() at vpanic+0x16a pc = 0xffffffc000465a28 ra = 0xffffffc0001d9f9c sp = 0xffffffc06f85c9d0 fp = 0xffffffc06f85ca10 vpanic() at panic+0x22 pc = 0xffffffc0001d9f9c ra = 0xffffffc0001d9fc4 sp = 0xffffffc06f85ca10 fp = 0xffffffc06f85ca30 panic() at assfail3+0x18 pc = 0xffffffc0001d9fc4 ra = 0xffffffc0013ec590 sp = 0xffffffc06f85ca30 fp = 0xffffffd005bb10a8 Uptime: 1d21h5m29s Rebooting... Power off titan$
123456789+123456789+123456789+123456789+123456789+123456789+123456789+12 I have been running a recent kernel and version of OpenZFS with a three way mirror plus hotspare. For well over a month or so and running under continuous load as well as buildkernel and buildworld within the ZFS filesystems. I also turn on features such as sha256 and sha512 hash and test with various compression options. Never ever touch dedupe. That has never worked anyways. At least not ever with Sun/Oracle ZFS. Add on top of that computational testing and other mixed loads and I can not reproduce this bug. In fact, I have seen not a single flaw in the ZFS implementation on RISC-V thus far. Also snapshots. Who doesn't love to have snapshots? There is no disk load overhead really to a snapshot. ijiraq# uname -apKU FreeBSD ijiraq 13.0-ALPHA1 FreeBSD 13.0-ALPHA1 #0 main-c256037-gf3ea417f96b0: Sun Jan 17 21:27:09 GMT 2021 root@ijiraq:/usr/obj/usr/src/freebsd-src/riscv.riscv64/sys/GENERIC riscv riscv64 1300136 1300133 ijiraq# ijiraq# zpool status pool: z0 state: ONLINE scan: scrub repaired 0B in 00:10:17 with 0 errors on Wed Jan 27 21:10:09 2021 config: NAME STATE READ WRITE CKSUM z0 ONLINE 0 0 0 mirror-0 ONLINE 0 0 0 vtbd1p1 ONLINE 0 0 0 vtbd2p1 ONLINE 0 0 0 vtbd3p1 ONLINE 0 0 0 spares vtbd4p1 AVAIL errors: No known data errors ijiraq# zpool get all z0 NAME PROPERTY VALUE SOURCE z0 size 31.5G - z0 capacity 32% - z0 altroot - default z0 health ONLINE - z0 guid 5951082463239387854 - z0 version - default z0 bootfs - default z0 delegation on default z0 autoreplace off default z0 cachefile - default z0 failmode continue local z0 listsnapshots off default z0 autoexpand off default z0 dedupratio 1.00x - z0 free 21.4G - z0 allocated 10.1G - z0 readonly off - z0 ashift 0 default z0 comment - default z0 expandsize - - z0 freeing 0 - z0 fragmentation 18% - z0 leaked 0 - z0 multihost off default z0 checkpoint - - z0 load_guid 3913557725721910272 - z0 autotrim off default z0 feature@async_destroy enabled local z0 feature@empty_bpobj active local z0 feature@lz4_compress active local z0 feature@multi_vdev_crash_dump enabled local z0 feature@spacemap_histogram active local z0 feature@enabled_txg active local z0 feature@hole_birth active local z0 feature@extensible_dataset active local z0 feature@embedded_data active local z0 feature@bookmarks enabled local z0 feature@filesystem_limits enabled local z0 feature@large_blocks enabled local z0 feature@large_dnode enabled local z0 feature@sha512 active local z0 feature@skein enabled local z0 feature@userobj_accounting active local z0 feature@encryption enabled local z0 feature@project_quota active local z0 feature@device_removal enabled local z0 feature@obsolete_counts enabled local z0 feature@zpool_checkpoint enabled local z0 feature@spacemap_v2 active local z0 feature@allocation_classes enabled local z0 feature@resilver_defer enabled local z0 feature@bookmark_v2 enabled local z0 feature@redaction_bookmarks enabled local z0 feature@redacted_datasets enabled local z0 feature@bookmark_written enabled local z0 feature@log_spacemap active local z0 feature@livelist enabled local z0 feature@device_rebuild enabled local z0 feature@zstd_compress enabled local ijiraq# ijiraq# ijiraq# zfs list -o name,used,avail,atime,checksum,compression,compressratio,exec,setuid,mounted,mountpoint -t all NAME USED AVAIL ATIME CHECKSUM COMPRESS RATIO EXEC SETUID MOUNTED MOUNTPOINT z0 10.1G 20.4G on sha256 lz4 2.10x on on no none z0@20210117024244 0B - - - - 1.00x on on - - z0@20210117024620 0B - - - - 1.00x on on - - z0@20210118015828 0B - - - - 1.00x on on - - z0/local 471M 20.4G on sha256 lz4 2.23x on on yes /usr/local z0/local@20201128001413 2.36M - - - - 2.27x on on - - z0/local@20210117024244 435K - - - - 2.26x on on - - z0/local@20210117024620 223K - - - - 2.24x on on - - z0/local@20210118015828 305K - - - - 2.24x on on - - z0/obj 3.55G 8.45G on sha512 lz4 2.89x on on yes /usr/obj z0/opt 2.02G 20.4G on sha256 lz4 2.26x on on yes /opt z0/opt@20210117024244 34K - - - - 2.09x on on - - z0/opt@20210117024620 34K - - - - 2.09x on on - - z0/opt@20210118015828 34K - - - - 2.09x on on - - z0/opt/bw 2.01G 20.4G on sha256 lz4 2.26x on on yes /opt/bw z0/opt/bw@20210117024244 0B - - - - 2.26x on on - - z0/opt/bw@20210117024620 0B - - - - 2.26x on on - - z0/opt/bw@20210118015828 0B - - - - 2.26x on on - - z0/ports 1.68G 20.4G on sha512 lz4 1.35x on on yes /usr/ports z0/ports@20210117010525 22K - - - - 1.00x on on - - z0/ports@20210117024244 436K - - - - 2.24x on on - - z0/ports@20210117024620 602K - - - - 2.25x on on - - z0/ports@20210118015828 34.8M - - - - 1.19x on on - - z0/src 2.39G 20.4G on sha256 lz4 1.29x on on yes /usr/src z0/src@20210127232344 15.1M - - - - 1.29x on on - - ijiraq# ijiraq# uptime 12:26AM up 8 days, 5:35, 2 users, load averages: 2.81, 2.47, 2.38 ijiraq# It may be best to close this as simply "never seen again". -- Dennis Clarke RISC-V/SPARC/PPC/ARM/CISC UNIX and Linux spoken GreyBeard and suspenders optional