$OpenBSD: patch-cpu_ctrl_xfer_pro_cc,v 1.2 2001/03/30 18:53:25 todd Exp $
--- cpu/ctrl_xfer_pro.cc.orig	Sat Mar 25 21:39:07 2000
+++ cpu/ctrl_xfer_pro.cc	Fri Mar 30 11:05:29 2001
@@ -40,7 +40,7 @@ BX_CPU_C::jump_protected(BxInstruction_t
 
   /* destination selector is not null else #GP(0) */
   if ((cs_raw & 0xfffc) == 0) {
-    bx_panic("jump_protected: cs == 0\n");
+    bio->panic("jump_protected: cs == 0\n");
     exception(BX_GP_EXCEPTION, 0, 0);
     return;
     }
@@ -57,7 +57,7 @@ BX_CPU_C::jump_protected(BxInstruction_t
 
   if ( descriptor.segment ) {
     if ( descriptor.u.segment.executable==0 ) {
-      bx_printf("jump_protected: S=1: descriptor not executable\n");
+      bio->printd("[CPU%u] jump_protected: S=1: descriptor not executable\n", BX_SIM_ID);
       exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
       return;
       }
@@ -65,21 +65,21 @@ BX_CPU_C::jump_protected(BxInstruction_t
     if ( descriptor.u.segment.c_ed ) {
       // descripor DPL must be <= CPL else #GP(selector)
       if (descriptor.dpl > CPL) {
-        bx_printf("jump_protected: dpl > CPL\n");
+        bio->printd("[CPU%u] jump_protected: dpl > CPL\n", BX_SIM_ID);
         exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
         return;
         }
 
       /* segment must be PRESENT else #NP(selector) */
       if (descriptor.p == 0) {
-        bx_printf("jump_protected: p == 0\n");
+        bio->printd("[CPU%u] jump_protected: p == 0\n", BX_SIM_ID);
         exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
         return;
         }
 
       /* instruction pointer must be in code segment limit else #GP(0) */
       if (disp32 > descriptor.u.segment.limit_scaled) {
-        bx_panic("jump_protected: IP > limit\n");
+        bio->panic("jump_protected: IP > limit\n");
         exception(BX_GP_EXCEPTION, 0, 0);
         return;
         }
@@ -96,28 +96,28 @@ BX_CPU_C::jump_protected(BxInstruction_t
     else {
       /* RPL of destination selector must be <= CPL else #GP(selector) */
       if (selector.rpl > CPL) {
-        bx_panic("jump_protected: rpl > CPL\n");
+        bio->panic("jump_protected: rpl > CPL\n");
         exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
         return;
         }
 
       // descriptor DPL must = CPL else #GP(selector)
       if (descriptor.dpl != CPL) {
-        bx_printf("jump_protected: dpl != CPL\n");
+        bio->printd("[CPU%u] jump_protected: dpl != CPL\n", BX_SIM_ID);
         exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
         return;
         }
 
       /* segment must be PRESENT else #NP(selector) */
       if (descriptor.p == 0) {
-        bx_printf("jump_protected: p == 0\n");
+        bio->printd("[CPU%u] jump_protected: p == 0\n", BX_SIM_ID);
         exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
         return;
         }
 
       /* IP must be in code segment limit else #GP(0) */
       if (disp32 > descriptor.u.segment.limit_scaled) {
-        bx_panic("jump_protected: IP > limit\n");
+        bio->panic("jump_protected: IP > limit\n");
         exception(BX_GP_EXCEPTION, 0, 0);
         return;
         }
@@ -129,7 +129,7 @@ BX_CPU_C::jump_protected(BxInstruction_t
       BX_CPU_THIS_PTR eip = disp32;
       return;
       }
-    bx_panic("jump_protected: segment=1\n");
+    bio->panic("jump_protected: segment=1\n");
     }
 
   else {
@@ -144,20 +144,20 @@ BX_CPU_C::jump_protected(BxInstruction_t
       case  1: // 286 available TSS
       case  9: // 386 available TSS
         //if ( descriptor.type==1 )
-        //  bx_printf("jump to 286 TSS\n");
+        //  bio->printd("[CPU%u] jump to 286 TSS\n", BX_SIM_ID);
         //else
-        //  bx_printf("jump to 386 TSS\n");
+        //  bio->printd("[CPU%u] jump to 386 TSS\n", BX_SIM_ID);
 
         // TSS DPL must be >= CPL, else #GP(TSS selector)
         if (descriptor.dpl < CPL) {
-          bx_panic("jump_protected: TSS.dpl < CPL\n");
+          bio->panic("jump_protected: TSS.dpl < CPL\n");
           exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
 
         // TSS DPL must be >= TSS selector RPL, else #GP(TSS selector)
         if (descriptor.dpl < selector.rpl) {
-          bx_panic("jump_protected: TSS.dpl < selector.rpl\n");
+          bio->panic("jump_protected: TSS.dpl < selector.rpl\n");
           exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
@@ -175,7 +175,7 @@ BX_CPU_C::jump_protected(BxInstruction_t
 
         // IP must be in code seg limit, else #GP(0)
         if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
-          bx_printf("jump_protected: TSS.p == 0\n");
+          bio->printd("[CPU%u] jump_protected: TSS.p == 0\n", BX_SIM_ID);
           exception(BX_GP_EXCEPTION, 0, 0);
           return;
           }
@@ -183,30 +183,30 @@ BX_CPU_C::jump_protected(BxInstruction_t
         break;
 
       case  3: // Busy 286 TSS
-        bx_panic("jump_protected: JUMP to busy 286 TSS unsupported.\n");
+        bio->panic("jump_protected: JUMP to busy 286 TSS unsupported.\n");
         return;
         break;
 
       case  4: // 286 call gate
-        //bx_printf("jump_protected: JUMP TO 286 CALL GATE:\n");
+        //bio->printd("[CPU%u] jump_protected: JUMP TO 286 CALL GATE:\n", BX_SIM_ID);
 
         // descriptor DPL must be >= CPL else #GP(gate selector)
         if (descriptor.dpl < CPL) {
-          bx_printf("jump_protected: gate.dpl < CPL\n");
+          bio->printd("[CPU%u] jump_protected: gate.dpl < CPL\n", BX_SIM_ID);
           exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
 
         // descriptor DPL must be >= gate selector RPL else #GP(gate selector)
         if (descriptor.dpl < selector.rpl) {
-          bx_printf("jump_protected: gate.dpl < selector.rpl\n");
+          bio->printd("[CPU%u] jump_protected: gate.dpl < selector.rpl\n", BX_SIM_ID);
           exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
 
         // gate must be present else #NP(gate selector)
         if (descriptor.p==0) {
-          bx_panic("jump_protected: task gate.p == 0\n");
+          bio->panic("jump_protected: task gate.p == 0\n");
           exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
@@ -215,7 +215,7 @@ BX_CPU_C::jump_protected(BxInstruction_t
         // selector must not be null, else #GP(0)
         gate_cs_raw = descriptor.u.gate286.dest_selector;
         if ( (gate_cs_raw & 0xfffc) == 0 ) {
-          bx_panic("jump_protected: CS selector null\n");
+          bio->panic("jump_protected: CS selector null\n");
           exception(BX_GP_EXCEPTION, 0x0000, 0);
           }
         parse_selector(gate_cs_raw, &gate_cs_selector);
@@ -228,35 +228,35 @@ BX_CPU_C::jump_protected(BxInstruction_t
         if ( (gate_cs_descriptor.valid==0) ||
              (gate_cs_descriptor.segment==0) ||
              (gate_cs_descriptor.u.segment.executable==0) ) {
-          bx_printf("jump_protected: AR byte: not code segment.\n");
+          bio->printd("[CPU%u] jump_protected: AR byte: not code segment.\n", BX_SIM_ID);
           exception(BX_GP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
           }
 
         // if non-conforming, code segment descriptor DPL must = CPL else #GP(CS selector)
         if (gate_cs_descriptor.u.segment.c_ed==0) {
           if (gate_cs_descriptor.dpl != CPL) {
-            bx_printf("jump_protected: non-conform: code seg des DPL != CPL.\n");
+            bio->printd("[CPU%u] jump_protected: non-conform: code seg des DPL != CPL.\n", BX_SIM_ID);
             exception(BX_GP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
             }
           }
         // if conforming, then code segment descriptor DPL must <= CPL else #GP(CS selector)
         else {
           if (gate_cs_descriptor.dpl > CPL) {
-            bx_printf("jump_protected: conform: code seg des DPL > CPL.\n");
+            bio->printd("[CPU%u] jump_protected: conform: code seg des DPL > CPL.\n", BX_SIM_ID);
             exception(BX_GP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
             }
           }
 
         // code segment must be present else #NP(CS selector)
         if (gate_cs_descriptor.p==0) {
-          bx_printf("jump_protected: code seg not present.\n");
+          bio->printd("[CPU%u] jump_protected: code seg not present.\n", BX_SIM_ID);
           exception(BX_NP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
           }
 
         // IP must be in code segment limit else #GP(0)
         if ( descriptor.u.gate286.dest_offset >
              gate_cs_descriptor.u.segment.limit_scaled ) {
-          bx_panic("jump_protected: IP > limit\n");
+          bio->panic("jump_protected: IP > limit\n");
           exception(BX_GP_EXCEPTION, 0x0000, 0);
           }
 
@@ -270,11 +270,11 @@ BX_CPU_C::jump_protected(BxInstruction_t
 
 
       case  5: // task gate
-//bx_printf("jump_pro: task gate\n");
+//bio->printd("[CPU%u] jump_pro: task gate\n", BX_SIM_ID);
 
         // gate descriptor DPL must be >= CPL else #GP(gate selector)
         if (descriptor.dpl < CPL) {
-          bx_panic("jump_protected: gate.dpl < CPL\n");
+          bio->panic("jump_protected: gate.dpl < CPL\n");
           exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
@@ -282,14 +282,14 @@ BX_CPU_C::jump_protected(BxInstruction_t
         // gate descriptor DPL must be >= gate selector RPL
         //   else #GP(gate selector)
         if (descriptor.dpl < selector.rpl) {
-          bx_panic("jump_protected: gate.dpl < selector.rpl\n");
+          bio->panic("jump_protected: gate.dpl < selector.rpl\n");
           exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
 
         // task gate must be present else #NP(gate selector)
         if (descriptor.p==0) {
-          bx_panic("jump_protected: task gate.p == 0\n");
+          bio->panic("jump_protected: task gate.p == 0\n");
           exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
@@ -300,7 +300,7 @@ BX_CPU_C::jump_protected(BxInstruction_t
         raw_tss_selector = descriptor.u.taskgate.tss_selector;
         parse_selector(raw_tss_selector, &tss_selector);
         if (tss_selector.ti) {
-          bx_panic("jump_protected: tss_selector.ti=1\n");
+          bio->panic("jump_protected: tss_selector.ti=1\n");
           exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
           return;
           }
@@ -313,18 +313,18 @@ BX_CPU_C::jump_protected(BxInstruction_t
         //   else #GP(TSS selector)
         parse_descriptor(dword1, dword2, &tss_descriptor);
         if (tss_descriptor.valid==0 || tss_descriptor.segment) {
-          bx_printf("jump_protected: TSS selector points to bad TSS\n");
+          bio->printd("[CPU%u] jump_protected: TSS selector points to bad TSS\n", BX_SIM_ID);
           exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
           }
         if (tss_descriptor.type!=9 && tss_descriptor.type!=1) {
-          bx_printf("jump_protected: TSS selector points to bad TSS\n");
+          bio->printd("[CPU%u] jump_protected: TSS selector points to bad TSS\n", BX_SIM_ID);
           exception(BX_GP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
           }
 
 
         // task state segment must be present, else #NP(tss selector)
         if (tss_descriptor.p==0) {
-          bx_panic("jump_protected: task descriptor.p == 0\n");
+          bio->panic("jump_protected: task descriptor.p == 0\n");
           exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
           }
 
@@ -338,37 +338,37 @@ BX_CPU_C::jump_protected(BxInstruction_t
         else
           temp_eIP =  IP;
         if (temp_eIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
-          bx_panic("jump_protected: eIP > cs.limit\n");
+          bio->panic("jump_protected: eIP > cs.limit\n");
           exception(BX_GP_EXCEPTION, 0x0000, 0);
           }
 
         break;
 
       case 11: // Busy 386 TSS
-        bx_panic("jump_protected: JUMP to busy 386 TSS unsupported.\n");
+        bio->panic("jump_protected: JUMP to busy 386 TSS unsupported.\n");
         return;
         break;
 
       case 12: // 386 call gate
-        //bx_printf("jump_protected: JUMP TO 386 CALL GATE:\n");
+        //bio->printd("[CPU%u] jump_protected: JUMP TO 386 CALL GATE:\n", BX_SIM_ID);
 
         // descriptor DPL must be >= CPL else #GP(gate selector)
         if (descriptor.dpl < CPL) {
-          bx_panic("jump_protected: gate.dpl < CPL\n");
+          bio->panic("jump_protected: gate.dpl < CPL\n");
           exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
 
         // descriptor DPL must be >= gate selector RPL else #GP(gate selector)
         if (descriptor.dpl < selector.rpl) {
-          bx_panic("jump_protected: gate.dpl < selector.rpl\n");
+          bio->panic("jump_protected: gate.dpl < selector.rpl\n");
           exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
 
         // gate must be present else #NP(gate selector)
         if (descriptor.p==0) {
-          bx_panic("jump_protected: task gate.p == 0\n");
+          bio->panic("jump_protected: task gate.p == 0\n");
           exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
@@ -377,7 +377,7 @@ BX_CPU_C::jump_protected(BxInstruction_t
         // selector must not be null, else #GP(0)
         gate_cs_raw = descriptor.u.gate386.dest_selector;
         if ( (gate_cs_raw & 0xfffc) == 0 ) {
-          bx_panic("jump_protected: CS selector null\n");
+          bio->panic("jump_protected: CS selector null\n");
           exception(BX_GP_EXCEPTION, 0x0000, 0);
           }
         parse_selector(gate_cs_raw, &gate_cs_selector);
@@ -390,35 +390,35 @@ BX_CPU_C::jump_protected(BxInstruction_t
         if ( (gate_cs_descriptor.valid==0) ||
              (gate_cs_descriptor.segment==0) ||
              (gate_cs_descriptor.u.segment.executable==0) ) {
-          bx_panic("jump_protected: AR byte: not code segment.\n");
+          bio->panic("jump_protected: AR byte: not code segment.\n");
           exception(BX_GP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
           }
 
         // if non-conforming, code segment descriptor DPL must = CPL else #GP(CS selector)
         if (gate_cs_descriptor.u.segment.c_ed==0) {
           if (gate_cs_descriptor.dpl != CPL) {
-            bx_panic("jump_protected: non-conform: code seg des DPL != CPL.\n");
+            bio->panic("jump_protected: non-conform: code seg des DPL != CPL.\n");
             exception(BX_GP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
             }
           }
         // if conforming, then code segment descriptor DPL must <= CPL else #GP(CS selector)
         else {
           if (gate_cs_descriptor.dpl > CPL) {
-            bx_panic("jump_protected: conform: code seg des DPL > CPL.\n");
+            bio->panic("jump_protected: conform: code seg des DPL > CPL.\n");
             exception(BX_GP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
             }
           }
 
         // code segment must be present else #NP(CS selector)
         if (gate_cs_descriptor.p==0) {
-          bx_panic("jump_protected: code seg not present.\n");
+          bio->panic("jump_protected: code seg not present.\n");
           exception(BX_NP_EXCEPTION, gate_cs_raw & 0xfffc, 0);
           }
 
         // IP must be in code segment limit else #GP(0)
         if ( descriptor.u.gate386.dest_offset >
              gate_cs_descriptor.u.segment.limit_scaled ) {
-          bx_panic("jump_protected: IP > limit\n");
+          bio->panic("jump_protected: IP > limit\n");
           exception(BX_GP_EXCEPTION, 0x0000, 0);
           }
 
@@ -431,7 +431,7 @@ BX_CPU_C::jump_protected(BxInstruction_t
         break;
 
       default:
-        bx_printf("jump_protected: gate type %u unsupported\n",
+        bio->printd("[CPU%u] jump_protected: gate type %u unsupported\n", BX_SIM_ID,
           (unsigned) descriptor.type);
         exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
         return;
@@ -460,7 +460,7 @@ BX_CPU_C::call_protected(BxInstruction_t
 
   /* new cs selector must not be null, else #GP(0) */
   if ( (cs_raw & 0xfffc) == 0 ) {
-    bx_panic("call_protected: CS selector null\n");
+    bio->printd("call_protected: CS selector null\n");
     exception(BX_GP_EXCEPTION, 0, 0);
     }
 
@@ -475,7 +475,7 @@ BX_CPU_C::call_protected(BxInstruction_t
 
   // examine AR byte of selected descriptor for various legal values
   if (cs_descriptor.valid==0) {
-    bx_panic("call_protected: invalid CS descriptor\n");
+    bio->printd("call_protected: invalid CS descriptor\n");
     exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
     }
 
@@ -483,7 +483,7 @@ BX_CPU_C::call_protected(BxInstruction_t
     Bit32u temp_ESP;
 
     if (cs_descriptor.u.segment.executable==0) {
-      bx_panic("call_protected: non executable segment\n");
+      bio->printd("call_protected: non executable segment\n");
       exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
       return;
       }
@@ -491,7 +491,7 @@ BX_CPU_C::call_protected(BxInstruction_t
     if (cs_descriptor.u.segment.c_ed) { // conforming code segment
       // DPL must be <= CPL, else #GP(code seg selector)
       if (cs_descriptor.dpl > CPL) {
-        bx_panic("call_protected: cs.dpl > CPL\n");
+        bio->printd("call_protected: cs.dpl > CPL\n");
         exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
         return;
         }
@@ -501,14 +501,14 @@ BX_CPU_C::call_protected(BxInstruction_t
       // DPL must be = CPL, else #GP(code seg selector)
       if ( (cs_selector.rpl > CPL) ||
            (cs_descriptor.dpl != CPL) ) {
-        bx_panic("call_protected: cs.rpl > CPL\n");
+        bio->printd("call_protected: cs.rpl > CPL\n");
         exception(BX_GP_EXCEPTION, cs_raw & 0xfffc, 0);
         }
       }
 
     // segment must be present, else #NP(code seg selector)
     if (cs_descriptor.p == 0) {
-      bx_printf("call_protected: cs.p = 0\n");
+      bio->printd("[CPU%u] call_protected: cs.p = 0\n", BX_SIM_ID);
       exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
       }
 
@@ -520,13 +520,13 @@ BX_CPU_C::call_protected(BxInstruction_t
     // stack must be big enough for return addr, else #SS(0)
     if (i->os_32) {
       if ( !can_push(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache, temp_ESP, 8) ) {
-        bx_panic("call_protected: stack doesn't have room for ret addr\n");
+        bio->panic("call_protected: stack doesn't have room for ret addr\n");
         exception(BX_SS_EXCEPTION, 0, 0);
         }
 
       // IP must be in code seg limit, else #GP(0)
       if (disp32 > cs_descriptor.u.segment.limit_scaled) {
-        bx_panic("call_protected: IP not in code seg limit\n");
+        bio->panic("call_protected: IP not in code seg limit\n");
         exception(BX_GP_EXCEPTION, 0, 0);
         }
 
@@ -536,13 +536,13 @@ BX_CPU_C::call_protected(BxInstruction_t
       }
     else { // 16bit opsize
       if ( !can_push(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache, temp_ESP, 4) ) {
-        bx_panic("call_protected: stack doesn't have room for ret addr\n");
+        bio->panic("call_protected: stack doesn't have room for ret addr\n");
         exception(BX_SS_EXCEPTION, 0, 0);
         }
 
       // IP must be in code seg limit, else #GP(0)
       if (disp32 > cs_descriptor.u.segment.limit_scaled) {
-        bx_panic("call_protected: IP not in code seg limit\n");
+        bio->panic("call_protected: IP not in code seg limit\n");
         exception(BX_GP_EXCEPTION, 0, 0);
         }
 
@@ -577,21 +577,21 @@ BX_CPU_C::call_protected(BxInstruction_t
     switch (gate_descriptor.type) {
       case 1: // available 16bit TSS
       case 9: // available 32bit TSS
-        //if (gate_descriptor.type==1)
-        //  bx_printf("call_protected: 16bit available TSS\n");
-        //else
-        //  bx_printf("call_protected: 32bit available TSS\n");
+        if (gate_descriptor.type==1)
+          bio->printd("[CPU%u] call_protected: 16bit available TSS\n", BX_SIM_ID);
+        else
+          bio->printd("[CPU%u] call_protected: 32bit available TSS\n", BX_SIM_ID);
 
         // TSS DPL must be >= CPL, else #TS(TSS selector)
         if (gate_descriptor.dpl < CPL) {
-          bx_panic("call_protected: TSS.dpl < CPL\n");
+          bio->panic("call_protected: TSS.dpl < CPL\n");
           exception(BX_TS_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
 
         // TSS DPL must be >= TSS selector RPL, else #TS(TSS selector)
         if (gate_descriptor.dpl < gate_selector.rpl) {
-          bx_panic("call_protected: TSS.dpl < selector.rpl\n");
+          bio->panic("call_protected: TSS.dpl < selector.rpl\n");
           exception(BX_TS_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
@@ -609,7 +609,7 @@ BX_CPU_C::call_protected(BxInstruction_t
 
         // IP must be in code seg limit, else #TS(0)
         if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
-          bx_printf("call_protected: TSS.p == 0\n");
+          bio->printd("[CPU%u] call_protected: TSS.p == 0\n", BX_SIM_ID);
           exception(BX_TS_EXCEPTION, 0, 0);
           return;
           }
@@ -617,10 +617,10 @@ BX_CPU_C::call_protected(BxInstruction_t
         break;
 
       case 5: // TASK GATE
-        //bx_printf("call_protected: task gate\n");
+        bio->printd("[CPU%u] call_protected: task gate\n", BX_SIM_ID);
         // gate descriptor DPL must be >= CPL else #TS(gate selector)
         if (gate_descriptor.dpl < CPL) {
-          bx_panic("call_protected: gate.dpl < CPL\n");
+          bio->printd("call_protected: gate.dpl < CPL\n");
           exception(BX_TS_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
@@ -628,14 +628,14 @@ BX_CPU_C::call_protected(BxInstruction_t
         // gate descriptor DPL must be >= gate selector RPL
         //   else #TS(gate selector)
         if (gate_descriptor.dpl < gate_selector.rpl) {
-          bx_panic("call_protected: gate.dpl < selector.rpl\n");
+          bio->panic("call_protected: gate.dpl < selector.rpl\n");
           exception(BX_TS_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
 
         // task gate must be present else #NP(gate selector)
         if (gate_descriptor.p==0) {
-          bx_panic("call_protected: task gate.p == 0\n");
+          bio->panic("call_protected: task gate.p == 0\n");
           exception(BX_NP_EXCEPTION, cs_raw & 0xfffc, 0);
           return;
           }
@@ -646,7 +646,7 @@ BX_CPU_C::call_protected(BxInstruction_t
         raw_tss_selector = gate_descriptor.u.taskgate.tss_selector;
         parse_selector(raw_tss_selector, &tss_selector);
         if (tss_selector.ti) {
-          bx_panic("call_protected: tss_selector.ti=1\n");
+          bio->panic("call_protected: tss_selector.ti=1\n");
           exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
           return;
           }
@@ -659,18 +659,18 @@ BX_CPU_C::call_protected(BxInstruction_t
         //   else #TS(TSS selector)
         parse_descriptor(dword1, dword2, &tss_descriptor);
         if (tss_descriptor.valid==0 || tss_descriptor.segment) {
-          bx_panic("call_protected: TSS selector points to bad TSS\n");
+          bio->panic("call_protected: TSS selector points to bad TSS\n");
           exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
           }
         if (tss_descriptor.type!=9 && tss_descriptor.type!=1) {
-          bx_panic("call_protected: TSS selector points to bad TSS\n");
+          bio->panic("call_protected: TSS selector points to bad TSS\n");
           exception(BX_TS_EXCEPTION, raw_tss_selector & 0xfffc, 0);
           }
 
 
         // task state segment must be present, else #NP(tss selector)
         if (tss_descriptor.p==0) {
-          bx_panic("call_protected: task descriptor.p == 0\n");
+          bio->panic("call_protected: task descriptor.p == 0\n");
           exception(BX_NP_EXCEPTION, raw_tss_selector & 0xfffc, 0);
           }
 
@@ -684,7 +684,7 @@ BX_CPU_C::call_protected(BxInstruction_t
         else
           temp_eIP =  IP;
         if (temp_eIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
-          bx_panic("call_protected: eIP > cs.limit\n");
+          bio->panic("call_protected: eIP > cs.limit\n");
           exception(BX_TS_EXCEPTION, 0x0000, 0);
           }
 
@@ -693,22 +693,22 @@ BX_CPU_C::call_protected(BxInstruction_t
 
       case  4: // 16bit CALL GATE
       case 12: // 32bit CALL GATE
-//if (gate_descriptor.type==4)
-//  bx_printf("CALL: 16bit call gate\n");
-//else
-//  bx_printf("CALL: 32bit call gate\n");
+if (gate_descriptor.type==4)
+  bio->printd("[CPU%u] CALL: 16bit call gate\n", BX_SIM_ID);
+else
+  bio->printd("[CPU%u] CALL: 32bit call gate\n", BX_SIM_ID);
 
         // call gate DPL must be >= CPL, else #GP(call gate selector)
         // call gate DPL must be >= RPL, else #GP(call gate selector)
         if ( (gate_descriptor.dpl < CPL) ||
              (gate_descriptor.dpl < gate_selector.rpl) ) {
-          bx_panic("call_protected: DPL < CPL or RPL\n");
+          bio->panic("call_protected: DPL < CPL or RPL\n");
           exception(BX_GP_EXCEPTION, gate_selector.value & 0xfffc, 0);
           }
 
         // call gate must be present, else #NP(call gate selector)
         if (gate_descriptor.p==0) {
-          bx_panic("call_protected: not present\n");
+          bio->panic("call_protected: not present\n");
           exception(BX_NP_EXCEPTION, gate_selector.value & 0xfffc, 0);
           }
 
@@ -725,7 +725,7 @@ BX_CPU_C::call_protected(BxInstruction_t
 
         // selector must not be null else #GP(0)
         if ( (dest_selector & 0xfffc) == 0 ) {
-          bx_panic("call_protected: selector in gate null\n");
+          bio->panic("call_protected: selector in gate null\n");
           exception(BX_GP_EXCEPTION, 0, 0);
           }
 
@@ -745,7 +745,7 @@ BX_CPU_C::call_protected(BxInstruction_t
             cs_descriptor.segment==0 ||
             cs_descriptor.u.segment.executable==0 ||
             cs_descriptor.dpl > CPL) {
-          bx_panic("call_protected: selected desciptor not code\n");
+          bio->panic("call_protected: selected desciptor not code\n");
           exception(BX_GP_EXCEPTION, cs_selector.value & 0xfffc, 0);
           }
 
@@ -769,7 +769,7 @@ BX_CPU_C::call_protected(BxInstruction_t
           Bit32u   parameter_dword[32];
           Bit32u   temp_ESP;
 
-//bx_printf("CALL: Call Gate: to more priviliged level\n");
+bio->printd("[CPU%u] CALL: Call Gate: to more priviliged level\n", BX_SIM_ID);
 
           // get new SS selector for new privilege level from TSS
           get_SS_ESP_from_TSS(cs_descriptor.dpl,
@@ -780,7 +780,7 @@ BX_CPU_C::call_protected(BxInstruction_t
           // check selector & descriptor for new SS:
           // selector must not be null, else #TS(0)
           if ( (SS_for_cpl_x & 0xfffc) == 0 ) {
-            bx_panic("call_protected: new SS null\n");
+            bio->panic("call_protected: new SS null\n");
             exception(BX_TS_EXCEPTION, 0, 0);
             return;
             }
@@ -796,7 +796,7 @@ BX_CPU_C::call_protected(BxInstruction_t
           // selector's RPL must equal DPL of code segment,
           //   else #TS(SS selector)
           if (ss_selector.rpl != cs_descriptor.dpl) {
-            bx_panic("call_protected: SS selector.rpl != CS descr.dpl\n");
+            bio->panic("call_protected: SS selector.rpl != CS descr.dpl\n");
             exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
             return;
             }
@@ -804,7 +804,7 @@ BX_CPU_C::call_protected(BxInstruction_t
           // stack segment DPL must equal DPL of code segment,
           //   else #TS(SS selector)
           if (ss_descriptor.dpl != cs_descriptor.dpl) {
-            bx_panic("call_protected: SS descr.rpl != CS descr.dpl\n");
+            bio->panic("call_protected: SS descr.rpl != CS descr.dpl\n");
             exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
             return;
             }
@@ -815,14 +815,14 @@ BX_CPU_C::call_protected(BxInstruction_t
               ss_descriptor.segment==0  ||
               ss_descriptor.u.segment.executable ||
               ss_descriptor.u.segment.r_w==0) {
-            bx_printf("call_protected: ss descriptor not writable data seg\n");
+            bio->printd("[CPU%u] call_protected: ss descriptor not writable data seg\n", BX_SIM_ID);
             exception(BX_TS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
             return;
             }
 
           // segment must be present, else #SS(SS selector)
           if (ss_descriptor.p==0) {
-            bx_panic("call_protected: ss descriptor not present.\n");
+            bio->panic("call_protected: ss descriptor not present.\n");
             exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
             return;
             }
@@ -849,14 +849,14 @@ BX_CPU_C::call_protected(BxInstruction_t
           //   else #SS(SS selector)
 
           if ( !can_push(&ss_descriptor, ESP_for_cpl_x, room_needed) ) {
-            bx_printf("call_protected: stack doesn't have room\n");
+            bio->printd("[CPU%u] call_protected: stack doesn't have room\n", BX_SIM_ID);
             exception(BX_SS_EXCEPTION, SS_for_cpl_x & 0xfffc, 0);
             return;
             }
 
           // new eIP must be in code segment limit else #GP(0)
           if ( new_EIP > cs_descriptor.u.segment.limit_scaled ) {
-            bx_panic("call_protected: IP not within CS limits\n");
+            bio->panic("call_protected: IP not within CS limits\n");
             exception(BX_GP_EXCEPTION, 0, 0);
             return;
             }
@@ -955,7 +955,7 @@ BX_CPU_C::call_protected(BxInstruction_t
         else {
           Bit32u temp_ESP;
 
-//bx_printf("CALL: Call Gate: to same priviliged level\n");
+bio->printd("[CPU%u] CALL: Call Gate: to same priviliged level\n", BX_SIM_ID);
           if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
             temp_ESP = ESP;
           else
@@ -966,7 +966,7 @@ BX_CPU_C::call_protected(BxInstruction_t
             // stack must room for 8-byte return address (2 are padding)
             //   else #SS(0)
             if ( !can_push(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache, temp_ESP, 8) ) {
-              bx_panic("call_protected: stack doesn't have room for 8 bytes\n");
+              bio->panic("call_protected: stack doesn't have room for 8 bytes\n");
               exception(BX_SS_EXCEPTION, 0, 0);
               }
             }
@@ -974,14 +974,14 @@ BX_CPU_C::call_protected(BxInstruction_t
             // stack must room for 4-byte return address
             //   else #SS(0)
             if ( !can_push(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache, temp_ESP, 4) ) {
-              bx_panic("call_protected: stack doesn't have room for 4 bytes\n");
+              bio->panic("call_protected: stack doesn't have room for 4 bytes\n");
               exception(BX_SS_EXCEPTION, 0, 0);
               }
             }
 
           // EIP must be within code segment limit, else #GP(0)
           if ( new_EIP > cs_descriptor.u.segment.limit_scaled ) {
-            bx_panic("call_protected: IP not within code segment limits\n");
+            bio->panic("call_protected: IP not within code segment limits\n");
             exception(BX_GP_EXCEPTION, 0, 0);
             }
 
@@ -1005,18 +1005,18 @@ BX_CPU_C::call_protected(BxInstruction_t
           return;
           }
 
-        bx_panic("call_protected: call gate: should not get here\n");
+        bio->panic("call_protected: call gate: should not get here\n");
         return;
 
       default:
-        bx_panic("call_protected: type = %d\n",
+        bio->panic("call_protected: type = %d\n",
           (unsigned) cs_descriptor.type);
         return;
       }
-    bx_panic("call_protected: gate segment unfinished\n");
+    bio->panic("call_protected: gate segment unfinished\n");
     }
 
-  bx_panic("call_protected: shouldn't get here!\n");
+  bio->panic("call_protected: shouldn't get here!\n");
   return;
 }
 #endif /* 286+ */
@@ -1049,7 +1049,7 @@ BX_CPU_C::return_protected(BxInstruction
     /* operand size=32: third word on stack must be within stack limits,
      *   else #SS(0); */
     if (!can_pop(6)) {
-      bx_panic("return_protected: 3rd word not in stack limits\n");
+      bio->panic("return_protected: 3rd word not in stack limits\n");
       /* #SS(0) */
       return;
       }
@@ -1063,7 +1063,7 @@ BX_CPU_C::return_protected(BxInstruction
      *   else #SS(0);
      */
     if ( !can_pop(4) ) {
-      bx_panic("return_protected: 2nd word not in stack limits\n");
+      bio->panic("return_protected: 2nd word not in stack limits\n");
       /* #SS(0) */
       return;
       }
@@ -1079,8 +1079,8 @@ BX_CPU_C::return_protected(BxInstruction
                        stack_cs_offset, 2, CPL==3, BX_READ, &raw_cs_selector);
   parse_selector(raw_cs_selector, &cs_selector);
   if ( cs_selector.rpl < CPL ) {
-    bx_printf("return_protected: CS.rpl < CPL\n");
-    bx_printf("  CS.rpl=%u CPL=%u\n", (unsigned) cs_selector.rpl,
+    bio->printd("[CPU%u] return_protected: CS.rpl < CPL\n", BX_SIM_ID);
+    bio->printd("[CPU%u]  CS.rpl=%u CPL=%u\n", BX_SIM_ID, (unsigned) cs_selector.rpl,
       (unsigned) CPL);
     exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
     return;
@@ -1089,13 +1089,13 @@ BX_CPU_C::return_protected(BxInstruction
   // if return selector RPL == CPL then
   // RETURN TO SAME LEVEL
   if ( cs_selector.rpl == CPL ) {
-    //bx_printf("return: to same level %04x:%08x\n",
-    //   BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
-    //   BX_CPU_THIS_PTR prev_eip);
+    bio->printd("[CPU%u] return: to same level %04x:%08x\n", BX_SIM_ID,
+       BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
+       BX_CPU_THIS_PTR prev_eip);
     // return selector must be non-null, else #GP(0)
     if ( (raw_cs_selector & 0xfffc) == 0 ) {
-      bx_panic("return_protected: CS null\n");
-      /* #GP(0) */
+      bio->printd("return_protected: CS null\n");
+      exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       return;
       }
 
@@ -1109,14 +1109,14 @@ BX_CPU_C::return_protected(BxInstruction
     if (cs_descriptor.valid==0 ||
         cs_descriptor.segment==0 ||
         cs_descriptor.u.segment.executable==0) {
-      bx_printf("return_protected: same: AR byte not code\n");
+      bio->printd("return_protected: same: AR byte not code\n");
       exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       }
 
     // if non-conforming then code segment DPL must = CPL,
     // else #GP(selector)
     if ((cs_descriptor.u.segment.c_ed==0)  && (cs_descriptor.dpl!=CPL)) {
-      bx_panic("return_protected: non-conforming, DPL!=CPL\n");
+      bio->panic("return_protected: non-conforming, DPL!=CPL\n");
       /* #GP(selector) */
       return;
       }
@@ -1124,20 +1124,20 @@ BX_CPU_C::return_protected(BxInstruction
     // if conforming then code segment DPL must be <= CPL,
     // else #GP(selector)
     if (cs_descriptor.u.segment.c_ed  && (cs_descriptor.dpl>CPL)) {
-      bx_printf("return_protected: conforming, DPL>CPL\n");
+      bio->printd("[CPU%u] return_protected: conforming, DPL>CPL\n", BX_SIM_ID);
       exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       }
 
     // code segment must be present, else #NP(selector)
     if (cs_descriptor.p==0) {
-      bx_printf("return_protected: not present\n");
+      bio->printd("[CPU%u] return_protected: not present\n", BX_SIM_ID);
       exception(BX_NP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       return;
       }
 
     // top word on stack must be within stack limits, else #SS(0)
     if ( !can_pop(stack_param_offset + pop_bytes) ) {
-      bx_panic("return_protected: top word not in stack limits\n");
+      bio->panic("return_protected: top word not in stack limits\n");
       /* #SS(0) */
       return;
       }
@@ -1157,7 +1157,7 @@ BX_CPU_C::return_protected(BxInstruction
       }
 
     if ( return_EIP > cs_descriptor.u.segment.limit_scaled ) {
-      bx_panic("return_protected: return IP > CS.limit\n");
+      bio->panic("return_protected: return IP > CS.limit\n");
       /* #GP(0) */
       return;
       }
@@ -1186,14 +1186,14 @@ BX_CPU_C::return_protected(BxInstruction
     /* + 2:     CS      | + 4:         CS */
     /* + 0:     IP      | + 0:        EIP */
 
-//bx_printf("return: to outer level %04x:%08x\n",
+//bio->printd("[CPU%u] return: to outer level %04x:%08x\n", BX_SIM_ID,
 //  BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
 //  BX_CPU_THIS_PTR prev_eip);
 
     if (i->os_32) {
       /* top 16+immediate bytes on stack must be within stack limits, else #SS(0) */
       if ( !can_pop(16 + pop_bytes) ) {
-        bx_panic("return_protected: 8 bytes not within stack limits\n");
+        bio->panic("return_protected: 8 bytes not within stack limits\n");
         /* #SS(0) */
         return;
         }
@@ -1201,7 +1201,7 @@ BX_CPU_C::return_protected(BxInstruction
     else {
       /* top 8+immediate bytes on stack must be within stack limits, else #SS(0) */
       if ( !can_pop(8 + pop_bytes) ) {
-        bx_panic("return_protected: 8 bytes not within stack limits\n");
+        bio->panic("return_protected: 8 bytes not within stack limits\n");
         /* #SS(0) */
         return;
         }
@@ -1211,7 +1211,7 @@ BX_CPU_C::return_protected(BxInstruction
 
     /* selector must be non-null else #GP(0) */
     if ( (raw_cs_selector & 0xfffc) == 0 ) {
-      bx_panic("return_protected: CS selector null\n");
+      bio->panic("return_protected: CS selector null\n");
       /* #GP(0) */
       return;
       }
@@ -1226,7 +1226,7 @@ BX_CPU_C::return_protected(BxInstruction
     if (cs_descriptor.valid==0 ||
         cs_descriptor.segment==0  ||
         cs_descriptor.u.segment.executable==0) {
-      bx_panic("return_protected: AR byte not code\n");
+      bio->panic("return_protected: AR byte not code\n");
       /* #GP(selector) */
       return;
       }
@@ -1235,7 +1235,7 @@ BX_CPU_C::return_protected(BxInstruction
      * else #GP(selector) */
     if (cs_descriptor.u.segment.c_ed==0 &&
         cs_descriptor.dpl!=cs_selector.rpl) {
-      bx_panic("return_protected: non-conforming seg DPL != selector.rpl\n");
+      bio->panic("return_protected: non-conforming seg DPL != selector.rpl\n");
       /* #GP(selector) */
       return;
       }
@@ -1244,14 +1244,14 @@ BX_CPU_C::return_protected(BxInstruction
      * else #GP(selector) */
     if (cs_descriptor.u.segment.c_ed &&
         cs_descriptor.dpl>cs_selector.rpl) {
-      bx_panic("return_protected: conforming seg DPL > selector.rpl\n");
+      bio->panic("return_protected: conforming seg DPL > selector.rpl\n");
       /* #GP(selector) */
       return;
       }
 
     /* segment must be present else #NP(selector) */
     if (cs_descriptor.p==0) {
-      bx_panic("return_protected: segment not present\n");
+      bio->panic("return_protected: segment not present\n");
       /* #NP(selector) */
       return;
       }
@@ -1280,7 +1280,7 @@ BX_CPU_C::return_protected(BxInstruction
 
     /* selector must be non-null else #GP(0) */
     if ( (raw_ss_selector & 0xfffc) == 0 ) {
-      bx_panic("return_protected: SS selector null\n");
+      bio->panic("return_protected: SS selector null\n");
       /* #GP(0) */
       return;
       }
@@ -1295,7 +1295,7 @@ BX_CPU_C::return_protected(BxInstruction
     /* selector RPL must = RPL of the return CS selector,
      * else #GP(selector) */
     if (ss_selector.rpl != cs_selector.rpl) {
-      bx_printf("return_protected: ss.rpl != cs.rpl\n");
+      bio->printd("[CPU%u] return_protected: ss.rpl != cs.rpl\n", BX_SIM_ID);
       exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
       return;
       }
@@ -1306,7 +1306,7 @@ BX_CPU_C::return_protected(BxInstruction
         ss_descriptor.segment==0 ||
         ss_descriptor.u.segment.executable ||
         ss_descriptor.u.segment.r_w==0) {
-      bx_panic("return_protected: SS.AR byte not writable data\n");
+      bio->panic("return_protected: SS.AR byte not writable data\n");
       /* #GP(selector) */
       return;
       }
@@ -1314,21 +1314,21 @@ BX_CPU_C::return_protected(BxInstruction
     /* descriptor dpl must = RPL of the return CS selector,
      * else #GP(selector) */
     if (ss_descriptor.dpl != cs_selector.rpl) {
-      bx_panic("return_protected: SS.dpl != cs.rpl\n");
+      bio->panic("return_protected: SS.dpl != cs.rpl\n");
       /* #GP(selector) */
       return;
       }
 
     /* segment must be present else #SS(selector) */
     if (ss_descriptor.p==0) {
-      bx_panic("ss.p == 0\n");
+      bio->panic("ss.p == 0\n");
       /* #NP(selector) */
       return;
       }
 
     /* eIP must be in code segment limit, else #GP(0) */
     if (return_EIP > cs_descriptor.u.segment.limit_scaled) {
-      bx_panic("return_protected: eIP > cs.limit\n");
+      bio->panic("return_protected: eIP > cs.limit\n");
       /* #GP(0) */
       return;
       }
@@ -1377,20 +1377,20 @@ BX_CPU_C::iret_protected(BxInstruction_t
     bx_descriptor_t tss_descriptor;
 
     if (BX_CPU_THIS_PTR eflags.vm)
-      bx_panic("IRET: vm set?\n");
+      bio->panic("IRET: vm set?\n");
 
     // TASK_RETURN:
 
-    //bx_printf("IRET: nested task return\n");
+    //bio->printd("[CPU%u] IRET: nested task return\n", BX_SIM_ID);
 
     if (BX_CPU_THIS_PTR tr.cache.valid==0)
-      bx_panic("IRET: TR not valid\n");
+      bio->panic("IRET: TR not valid\n");
     if (BX_CPU_THIS_PTR tr.cache.type == 1)
       base32 = BX_CPU_THIS_PTR tr.cache.u.tss286.base;
     else if (BX_CPU_THIS_PTR tr.cache.type == 9)
       base32 = BX_CPU_THIS_PTR tr.cache.u.tss386.base;
     else {
-      bx_panic("IRET: TR not valid\n");
+      bio->panic("IRET: TR not valid\n");
       base32 = 0; // keep compiler happy
       }
 
@@ -1400,7 +1400,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
     // must specify global, else #TS(new TSS selector)
     parse_selector(raw_link_selector, &link_selector);
     if (link_selector.ti) {
-      bx_panic("iret: link selector.ti=1\n");
+      bio->panic("iret: link selector.ti=1\n");
       exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc, 0);
       }
 
@@ -1411,18 +1411,18 @@ BX_CPU_C::iret_protected(BxInstruction_t
     // new TSS must be busy, else #TS(new TSS selector)
     parse_descriptor(dword1, dword2, &tss_descriptor);
     if (tss_descriptor.valid==0 || tss_descriptor.segment) {
-      bx_printf("iret: TSS selector points to bad TSS\n");
+      bio->printd("[CPU%u] iret: TSS selector points to bad TSS\n", BX_SIM_ID);
       exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc, 0);
       }
     if ((tss_descriptor.type!=11) && (tss_descriptor.type!=3)) {
-      bx_printf("iret: TSS selector points to bad TSS\n");
+      bio->printd("[CPU%u] iret: TSS selector points to bad TSS\n", BX_SIM_ID);
       exception(BX_TS_EXCEPTION, raw_link_selector & 0xfffc, 0);
       }
 
 
     // TSS must be present, else #NP(new TSS selector)
     if (tss_descriptor.p==0) {
-      bx_printf("iret: task descriptor.p == 0\n");
+      bio->printd("[CPU%u] iret: task descriptor.p == 0\n", BX_SIM_ID);
       exception(BX_NP_EXCEPTION, raw_link_selector & 0xfffc, 0);
       }
 
@@ -1434,7 +1434,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
 
     // eIP must be within code seg limit, else #GP(0)
     if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
-      bx_panic("iret: eIP > cs.limit\n");
+      bio->panic("iret: eIP > cs.limit\n");
       exception(BX_GP_EXCEPTION, 0x0000, 0);
       }
     return;
@@ -1472,7 +1472,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
 
     /* CS on stack must be within stack limits, else #SS(0) */
     if ( !can_pop(top_nbytes_same) ) {
-      bx_panic("iret: CS not within stack limits\n");
+      bio->panic("iret: CS not within stack limits\n");
       exception(BX_SS_EXCEPTION, 0, 0);
       return;
       }
@@ -1494,7 +1494,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
       // if VM=1 in flags image on stack then STACK_RETURN_TO_V86
       if (new_eflags & 0x00020000) {
         if (CPL != 0)
-          bx_panic("iret: VM set on stack, CPL!=0\n");
+          bio->panic("iret: VM set on stack, CPL!=0\n");
         BX_CPU_THIS_PTR stack_return_to_v86(new_eip, raw_cs_selector, new_eflags);
         return;
         }
@@ -1510,7 +1510,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
 
     // return CS selector must be non-null, else #GP(0)
     if ( (raw_cs_selector & 0xfffc) == 0 ) {
-      bx_panic("iret: return CS selector null\n");
+      bio->panic("iret: return CS selector null\n");
       exception(BX_GP_EXCEPTION, 0, 0);
       return;
       }
@@ -1526,14 +1526,14 @@ BX_CPU_C::iret_protected(BxInstruction_t
     if ( cs_descriptor.valid==0 ||
          cs_descriptor.segment==0  ||
          cs_descriptor.u.segment.executable==0 ) {
-      bx_panic("iret: AR byte indicated non code segment\n");
+      bio->panic("iret: AR byte indicated non code segment\n");
       exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       return;
       }
 
     // return CS selector RPL must be >= CPL, else #GP(return selector)
     if (cs_selector.rpl < CPL) {
-      bx_panic("iret: return selector RPL < CPL\n");
+      bio->panic("iret: return selector RPL < CPL\n");
       exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       return;
       }
@@ -1543,7 +1543,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
     //     then #GP(return selector)
     if ( cs_descriptor.u.segment.c_ed  &&
          cs_descriptor.dpl > cs_selector.rpl ) {
-      bx_panic("iret: conforming, DPL > cs_selector.RPL\n");
+      bio->panic("iret: conforming, DPL > cs_selector.RPL\n");
       exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       return;
       }
@@ -1553,14 +1553,14 @@ BX_CPU_C::iret_protected(BxInstruction_t
     //     then #GP(return selector)
     if ( cs_descriptor.u.segment.c_ed==0 &&
          cs_descriptor.dpl != cs_selector.rpl ) {
-      bx_printf("(mch) iret: Return with DPL != RPL. #GP(selector)\n");
+      bio->printd("[CPU%u] (mch) iret: Return with DPL != RPL. #GP(selector)\n", BX_SIM_ID);
       exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       return;
       }
 
     // segment must be present else #NP(return selector)
     if ( cs_descriptor.p==0 ) {
-      bx_panic("iret: not present\n");
+      bio->panic("iret: not present\n");
       exception(BX_NP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
       return;
       }
@@ -1572,7 +1572,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
       if (i->os_32) {
         /* return EIP must be in code segment limit else #GP(0) */
         if ( new_eip > cs_descriptor.u.segment.limit_scaled ) {
-          bx_panic("iret: IP > descriptor limit\n");
+          bio->panic("iret: IP > descriptor limit\n");
           exception(BX_GP_EXCEPTION, 0, 0);
           return;
           }
@@ -1587,7 +1587,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
       else {
         /* return IP must be in code segment limit else #GP(0) */
         if ( new_ip > cs_descriptor.u.segment.limit_scaled ) {
-          bx_panic("iret: IP > descriptor limit\n");
+          bio->panic("iret: IP > descriptor limit\n");
           exception(BX_GP_EXCEPTION, 0, 0);
           return;
           }
@@ -1619,7 +1619,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
 
       /* top 10/20 bytes on stack must be within limits else #SS(0) */
       if ( !can_pop(top_nbytes_outer) ) {
-        bx_panic("iret: top 10/20 bytes not within stack limits\n");
+        bio->panic("iret: top 10/20 bytes not within stack limits\n");
         exception(BX_SS_EXCEPTION, 0, 0);
         return;
         }
@@ -1630,7 +1630,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
 
       /* selector must be non-null, else #GP(0) */
       if ( (raw_ss_selector & 0xfffc) == 0 ) {
-        bx_panic("iret: SS selector null\n");
+        bio->panic("iret: SS selector null\n");
         exception(BX_GP_EXCEPTION, 0, 0);
         return;
         }
@@ -1640,7 +1640,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
       /* selector RPL must = RPL of return CS selector,
        * else #GP(SS selector) */
       if ( ss_selector.rpl != cs_selector.rpl) {
-        bx_panic("iret: SS.rpl != CS.rpl\n");
+        bio->panic("iret: SS.rpl != CS.rpl\n");
         exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
         return;
         }
@@ -1658,7 +1658,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
            ss_descriptor.segment==0  ||
            ss_descriptor.u.segment.executable  ||
            ss_descriptor.u.segment.r_w==0 ) {
-        bx_panic("iret: SS AR byte not writable code segment\n");
+        bio->panic("iret: SS AR byte not writable code segment\n");
         exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
         return;
         }
@@ -1666,14 +1666,14 @@ BX_CPU_C::iret_protected(BxInstruction_t
       /* stack segment DPL must equal the RPL of the return CS selector,
        * else #GP(SS selector) */
       if ( ss_descriptor.dpl != cs_selector.rpl ) {
-        bx_panic("iret: SS.dpl != CS selector RPL\n");
+        bio->panic("iret: SS.dpl != CS selector RPL\n");
         exception(BX_GP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
         return;
         }
 
       /* SS must be present, else #NP(SS selector) */
       if ( ss_descriptor.p==0 ) {
-        bx_panic("iret: SS not present!\n");
+        bio->panic("iret: SS not present!\n");
         exception(BX_NP_EXCEPTION, raw_ss_selector & 0xfffc, 0);
         return;
         }
@@ -1701,7 +1701,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
 
       /* EIP must be in code segment limit, else #GP(0) */
       if ( new_eip > cs_descriptor.u.segment.limit_scaled ) {
-        bx_panic("iret: IP > descriptor limit\n");
+        bio->panic("iret: IP > descriptor limit\n");
         exception(BX_GP_EXCEPTION, 0, 0);
         return;
         }
@@ -1734,7 +1734,7 @@ BX_CPU_C::iret_protected(BxInstruction_t
       return;
       }
     }
-  bx_panic("IRET: shouldn't get here!\n");
+  bio->panic("IRET: shouldn't get here!\n");
 }
 #endif
 
