Stackdb
Stackdb is a stackable, multi-target and -level source debugger and memory forensics library.
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
target_xen_vm.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012, 2013, 2014 The University of Utah
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License as
6  * published by the Free Software Foundation; either version 2 of
7  * the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18 
19 #include "config.h"
20 
21 #include <errno.h>
22 #include <assert.h>
23 #include <ctype.h>
24 #include <unistd.h>
25 #include <getopt.h>
26 #include <sys/types.h>
27 #include <sys/stat.h>
28 #include <fcntl.h>
29 #include <sys/mman.h>
30 #include <sys/socket.h>
31 #include <sys/un.h>
32 #if !defined(UNIX_PATH_MAX)
33 #define UNIX_PATH_MAX (size_t)sizeof(((struct sockaddr_un *) 0)->sun_path)
34 #endif
35 #include <libgen.h>
36 #include <endian.h>
37 #include <gelf.h>
38 #include <elf.h>
39 #include <libelf.h>
40 #include <argp.h>
41 
42 #include "common.h"
43 #include "glib_wrapper.h"
44 #include "object.h"
45 #include "arch.h"
46 #include "arch_x86.h"
47 #include "arch_x86_64.h"
48 #include "evloop.h"
49 #include "binfile.h"
50 #include "dwdebug.h"
51 #include "dwdebug_priv.h"
52 #include "target_api.h"
53 #include "target.h"
54 #include "target_event.h"
55 #include "target_arch_x86.h"
56 #include "target_os.h"
57 #include "probe_api.h"
58 
59 #include <xenctrl.h>
60 #include <xs.h>
61 
62 #include "target_xen_vm.h"
63 #include "target_xen_vm_vmp.h"
64 
65 #ifdef ENABLE_XENACCESS
66 extern struct xen_vm_mem_ops xen_vm_mem_ops_xenaccess;
67 #endif
68 #ifdef ENABLE_LIBVMI
70 #endif
71 
73 
74 /*
75  * Prototypes.
76  */
78  struct evloop *evloop);
79 
80 static struct target *xen_vm_attach(struct target_spec *spec,
81  struct evloop *evloop);
82 
83 static int xen_vm_snprintf(struct target *target,char *buf,int bufsiz);
84 static int xen_vm_init(struct target *target);
85 static int xen_vm_attach_internal(struct target *target);
86 static int xen_vm_detach(struct target *target,int stay_paused);
87 static int xen_vm_fini(struct target *target);
88 static int xen_vm_kill(struct target *target,int sig);
89 static int xen_vm_loadspaces(struct target *target);
90 static int xen_vm_loadregions(struct target *target,struct addrspace *space);
91 static int xen_vm_loaddebugfiles(struct target *target,struct addrspace *space,
92  struct memregion *region);
93 static int xen_vm_postloadinit(struct target *target);
94 static int xen_vm_postopened(struct target *target);
95 static int xen_vm_set_active_probing(struct target *target,
96  active_probe_flags_t flags);
97 
98 static target_status_t xen_vm_handle_exception(struct target *target,
100  int *again,void *priv);
101 
102 static struct target_spec *xen_vm_build_default_overlay_spec(struct target *target,
103  tid_t tid);
104 static struct target *
105 xen_vm_instantiate_overlay(struct target *target,
106  struct target_thread *tthread,
107  struct target_spec *spec,
108  struct target_thread **ntthread);
109 static struct target_thread *
110 xen_vm_lookup_overlay_thread_by_id(struct target *target,int id);
111 static struct target_thread *
112 xen_vm_lookup_overlay_thread_by_name(struct target *target,char *name);
113 int xen_vm_attach_overlay_thread(struct target *base,struct target *overlay,
114  tid_t newtid);
115 int xen_vm_detach_overlay_thread(struct target *base,struct target *overlay,
116  tid_t tid);
117 static target_status_t xen_vm_status(struct target *target);
118 static int xen_vm_pause(struct target *target,int nowait);
119 static int __xen_vm_resume(struct target *target,int detaching);
120 static int xen_vm_resume(struct target *target);
121 static target_status_t xen_vm_monitor(struct target *target);
122 static target_status_t xen_vm_poll(struct target *target,struct timeval *tv,
123  target_poll_outcome_t *outcome,int *pstatus);
124 int xen_vm_attach_evloop(struct target *target,struct evloop *evloop);
125 int xen_vm_detach_evloop(struct target *target);
126 static unsigned char *xen_vm_read(struct target *target,ADDR addr,
127  unsigned long length,unsigned char *buf);
128 static unsigned long xen_vm_write(struct target *target,ADDR addr,
129  unsigned long length,unsigned char *buf);
130 /*
131  * NB: initially, we will use VM phys addrs here. We could have also
132  * used Xen machine addrs; but for now, given the current
133  * libvmi code, using VM phys addrs is easiest. Later on,
134  * machine addrs will probably be *faster*. The risk with that approach
135  * is if the VM pfn/mfn mapping ever changes out from under us.
136  */
137 static int xen_vm_addr_v2p(struct target *target,tid_t tid,
138  ADDR vaddr,ADDR *paddr);
139 static unsigned char *xen_vm_read_phys(struct target *target,ADDR paddr,
140  unsigned long length,unsigned char *buf);
141 static unsigned long xen_vm_write_phys(struct target *target,ADDR paddr,
142  unsigned long length,unsigned char *buf);
143 
144 static tid_t xen_vm_gettid(struct target *target);
145 static void xen_vm_free_thread_state(struct target *target,void *state);
146 static struct array_list *xen_vm_list_available_tids(struct target *target);
147 static struct target_thread *xen_vm_load_thread(struct target *target,tid_t tid,
148  int force);
149 static struct target_thread *xen_vm_load_current_thread(struct target *target,
150  int force);
151 static int xen_vm_load_all_threads(struct target *target,int force);
152 static int xen_vm_load_available_threads(struct target *target,int force);
153 static int xen_vm_pause_thread(struct target *target,tid_t tid,int nowait);
154 static int xen_vm_flush_thread(struct target *target,tid_t tid);
155 static int xen_vm_flush_current_thread(struct target *target);
156 static int xen_vm_flush_all_threads(struct target *target);
157 static int xen_vm_invalidate_thread(struct target *target,
158  struct target_thread *tthread);
159 static int xen_vm_thread_snprintf(struct target *target,
160  struct target_thread *tthread,
161  char *buf,int bufsiz,
162  int detail,char *sep,char *key_val_sep);
163 /*
164 static REGVAL xen_vm_read_reg(struct target *target,tid_t tid,REG reg);
165 static int xen_vm_write_reg(struct target *target,tid_t tid,REG reg,REGVAL value);
166 static GHashTable *xen_vm_copy_registers(struct target *target,tid_t tid);
167 REGVAL xen_vm_read_reg_tidctxt(struct target *target,
168  tid_t tid,thread_ctxt_t tidctxt,REG reg);
169 int xen_vm_write_reg_tidctxt(struct target *target,
170  tid_t tid,thread_ctxt_t tidctxt,
171  REG reg,REGVAL value);
172 */
173 static REG xen_vm_get_unused_debug_reg(struct target *target,tid_t tid);
174 static int xen_vm_set_hw_breakpoint(struct target *target,tid_t tid,REG num,ADDR addr);
175 static int xen_vm_set_hw_watchpoint(struct target *target,tid_t tid,REG num,ADDR addr,
176  probepoint_whence_t whence,
177  probepoint_watchsize_t watchsize);
178 static int xen_vm_unset_hw_breakpoint(struct target *target,tid_t tid,REG num);
179 static int xen_vm_unset_hw_watchpoint(struct target *target,tid_t tid,REG num);
185  int notification);
186 int xen_vm_singlestep(struct target *target,tid_t tid,int isbp,
187  struct target *overlay);
189  struct target *overlay);
190 
191 uint64_t xen_vm_get_tsc(struct target *target);
192 uint64_t xen_vm_get_time(struct target *target);
193 uint64_t xen_vm_get_counter(struct target *target);
194 
195 int xen_vm_enable_feature(struct target *target,int feature,void *arg);
196 int xen_vm_disable_feature(struct target *target,int feature);
197 
199 
200 /* Internal prototypes. */
201 static int __xen_vm_pgd(struct target *target,tid_t tid,uint64_t *pgd);
203  struct vcpu_guest_context *context,
204  struct target_thread *tthread,
205  thread_ctxt_t tctxt);
207  struct target_thread *tthread,
208  thread_ctxt_t tctxt,
209  struct vcpu_guest_context *context);
210 static result_t xen_vm_active_memory_handler(struct probe *probe,tid_t tid,
211  void *handler_data,
212  struct probe *trigger,
213  struct probe *base);
214 static result_t xen_vm_active_thread_entry_handler(struct probe *probe,tid_t tid,
215  void *handler_data,
216  struct probe *trigger,
217  struct probe *base);
218 static result_t xen_vm_active_thread_exit_handler(struct probe *probe,tid_t tid,
219  void *handler_data,
220  struct probe *trigger,
221  struct probe *base);
222 
223 /* Format chars to print context registers. */
224 #if __WORDSIZE == 64
225 #define RF "lx"
226 #define DRF "lx"
227 #else
228 #define RF "x"
229 #define DRF "lx"
230 #endif
231 
232 /*
233  * Globals.
234  *
235  * We support a couple different ways of listening for debug exceptions
236  * from the hypervisor. Exceptions come via the VIRQ_DEBUGGER virq; and
237  * only one consumer may bind to that irq. This is a problem if we want
238  * to have multiple VMI programs each debugging one or more
239  * domains... we have to demultiplex the IRQ signal to the right VMI
240  * program. Unforunately, it's tricky to figure out which domain the
241  * IRQ was for, because of Xen bugs in the handling of x86 debug
242  * registers. So, the demultiplexer must pass the notification to *all*
243  * clients and let them decide if the signal was for them.
244  *
245  * So... we support a dedicated mode, where only one VMI program can run
246  * at a time; and a "shared" mode, where a demultiplexer process is
247  * spawned (if it doesn't already exist), and the VMI program(s) connect
248  * to it to receive VIRQ notifications.
249  *
250  * The xc_handle variable is always valid. For dedicated mode,
251  * xce_handle and dbg_port are valid; for shared mode,
252  * xen_vm_vmp_client_fd is valid instead.
253  */
254 static int xc_refcnt = 0;
255 
256 #ifdef XENCTRL_HAS_XC_INTERFACE
257 xc_interface *xc_handle = NULL;
258 static xc_interface *xce_handle = NULL;
259 #define XC_IF_INVALID (NULL)
260 #else
261 int xc_handle = -1;
262 static int xce_handle = -1;
263 #define XC_IF_INVALID (-1)
264 #endif
265 int xce_handle_fd = -1;
266 
267 #if !defined(XC_EVTCHN_PORT_T)
268 #error "XC_EVTCHN_PORT_T undefined!"
269 #endif
270 static XC_EVTCHN_PORT_T dbg_port = -1;
271 
272 /*
273  * Set up the target interface for this library.
274  */
276  .snprintf = xen_vm_snprintf,
277 
278  .init = xen_vm_init,
279  .fini = xen_vm_fini,
280  .attach = xen_vm_attach_internal,
281  .detach = xen_vm_detach,
282  .kill = xen_vm_kill,
283  .loadspaces = xen_vm_loadspaces,
284  .loadregions = xen_vm_loadregions,
285  .loaddebugfiles = xen_vm_loaddebugfiles,
286  .postloadinit = xen_vm_postloadinit,
287  .postopened = xen_vm_postopened,
288  .set_active_probing = xen_vm_set_active_probing,
289 
290  .handle_exception = xen_vm_handle_exception,
291  .handle_break = probepoint_bp_handler,
292  .handle_step = probepoint_ss_handler,
293  .handle_interrupted_step = NULL,
294 
295  .build_default_overlay_spec = xen_vm_build_default_overlay_spec,
296  .instantiate_overlay = xen_vm_instantiate_overlay,
297  .lookup_overlay_thread_by_id = xen_vm_lookup_overlay_thread_by_id,
298  .lookup_overlay_thread_by_name = xen_vm_lookup_overlay_thread_by_name,
299  .attach_overlay_thread = xen_vm_attach_overlay_thread,
300  .detach_overlay_thread = xen_vm_detach_overlay_thread,
301 
302  .status = xen_vm_status,
303  .pause = xen_vm_pause,
304  .resume = xen_vm_resume,
305  .monitor = xen_vm_monitor,
306  .poll = xen_vm_poll,
307  .read = xen_vm_read,
308  .write = xen_vm_write,
309  .addr_v2p = xen_vm_addr_v2p,
310  .read_phys = xen_vm_read_phys,
311  .write_phys = xen_vm_write_phys,
312 
313  .gettid = xen_vm_gettid,
314  .free_thread_state = xen_vm_free_thread_state,
315  .list_available_tids = xen_vm_list_available_tids,
316  .load_available_threads = xen_vm_load_available_threads,
317  .load_thread = xen_vm_load_thread,
318  .load_current_thread = xen_vm_load_current_thread,
319  .load_all_threads = xen_vm_load_all_threads,
320  .pause_thread = xen_vm_pause_thread,
321  .flush_thread = xen_vm_flush_thread,
322  .flush_current_thread = xen_vm_flush_current_thread,
323  .flush_all_threads = xen_vm_flush_all_threads,
324  .invalidate_thread = xen_vm_invalidate_thread,
325  .thread_snprintf = xen_vm_thread_snprintf,
326 
327  .attach_evloop = xen_vm_attach_evloop,
328  .detach_evloop = xen_vm_detach_evloop,
329 
330  .readreg = target_regcache_readreg,
331  .writereg = target_regcache_writereg,
332  .copy_registers = target_regcache_copy_registers,
333  .readreg_tidctxt = target_regcache_readreg_tidctxt,
334  .writereg_tidctxt = target_regcache_writereg_tidctxt,
335 
336  .get_unused_debug_reg = xen_vm_get_unused_debug_reg,
337  .set_hw_breakpoint = xen_vm_set_hw_breakpoint,
338  .set_hw_watchpoint = xen_vm_set_hw_watchpoint,
339  .unset_hw_breakpoint = xen_vm_unset_hw_breakpoint,
340  .unset_hw_watchpoint = xen_vm_unset_hw_watchpoint,
341  .disable_hw_breakpoints = xen_vm_disable_hw_breakpoints,
342  .enable_hw_breakpoints = xen_vm_enable_hw_breakpoints,
343  .disable_hw_breakpoint = xen_vm_disable_hw_breakpoint,
344  .enable_hw_breakpoint = xen_vm_enable_hw_breakpoint,
345  .notify_sw_breakpoint = xen_vm_notify_sw_breakpoint,
346  .singlestep = xen_vm_singlestep,
347  .singlestep_end = xen_vm_singlestep_end,
348  .instr_can_switch_context = xen_vm_instr_can_switch_context,
349  .get_tsc = xen_vm_get_tsc,
350  .get_time = xen_vm_get_time,
351  .get_counter = xen_vm_get_counter,
352  .enable_feature = xen_vm_enable_feature,
353  .disable_feature = xen_vm_disable_feature,
354 };
355 
356 #define XV_ARGP_USE_XENACCESS 0x550001
357 #define XV_ARGP_USE_LIBVMI 0x550002
358 #define XV_ARGP_CLEAR_MEM_CACHES 0x550003
359 #define XV_ARGP_MEMCACHE_MMAP_SIZE 0x550004
360 #define XV_ARGP_HIUE 0x550005
361 
362 struct argp_option xen_vm_argp_opts[] = {
363  /* These options set a flag. */
364  { "domain",'m',"DOMAIN",0,"The Xen domain ID or name.",-4 },
365  { "kernel-filename",'K',"FILE",0,
366  "Override xenstore kernel filepath for guest.",-4 },
367  { "no-clear-hw-debug-regs",'H',NULL,0,
368  "Don't clear hardware debug registers at target attach.",-4 },
369  { "clear-mem-caches-each-exception",XV_ARGP_CLEAR_MEM_CACHES,NULL,0,
370  "Clear mem caches on each debug exception.",-4 },
371 #ifdef ENABLE_LIBVMI
372  { "use-libvmi",XV_ARGP_USE_LIBVMI,NULL,0,
373  "Clear mem caches on each debug exception.",-4 },
374 #endif
375 #ifdef ENABLE_XENACCESS
376  { "use-xenaccess",XV_ARGP_USE_XENACCESS,NULL,0,
377  "Clear mem caches on each debug exception.",-4 },
378 #endif
379  { "memcache-mmap-size",XV_ARGP_MEMCACHE_MMAP_SIZE,"BYTES",0,
380  "Max size (bytes) of the mmap cache (default 128MB).",-4 },
381  { "no-hvm-setcontext",'V',NULL,0,
382  "Don't use HVM-specific libxc get/set context functions to access"
383  "virtual CPU info.",-4 },
384  { "configfile",'c',"FILE",0,"The Xen config file.",-4 },
385  { "replaydir",'r',"DIR",0,"The XenTT replay directory.",-4 },
386  { "no-use-multiplexer",'M',NULL,0,"Do not spawn/attach to the Xen multiplexer server",-4 },
387  { "dominfo-timeout",'T',"MICROSECONDS",0,"If libxc gets a \"NULL\" dominfo status, the number of microseconds we should keep retrying",-4 },
388  { "hypervisor-ignores-userspace-exceptions",XV_ARGP_HIUE,NULL,0,"If your Xen hypervisor is not a Utah-patched version, make sure to supply this flag!",-4 },
389  { 0,0,0,0,0,0 }
390 };
391 
392 int xen_vm_spec_to_argv(struct target_spec *spec,int *argc,char ***argv) {
393  struct xen_vm_spec *xspec =
394  (struct xen_vm_spec *)spec->backend_spec;
395  char **av = NULL;
396  int ac = 0;
397  int j;
398 
399  if (!xspec) {
400  if (argv)
401  *argv = NULL;
402  if (argc)
403  *argc = 0;
404  return 0;
405  }
406 
407  if (xspec->domain)
408  ac += 2;
409  if (xspec->kernel_filename)
410  ac += 2;
411  if (xspec->no_hw_debug_reg_clear)
412  ac += 1;
413  if (xspec->no_hvm_setcontext)
414  ac += 1;
416  ac += 1;
417 #ifdef ENABLE_LIBVMI
418  if (xspec->use_libvmi)
419  ac += 1;
420 #endif
421 #ifdef ENABLE_XENACCESS
422  if (xspec->use_xenaccess)
423  ac += 1;
424 #endif
425  if (xspec->memcache_mmap_size)
426  ac += 2;
427  if (xspec->config_file)
428  ac += 2;
429  if (xspec->replay_dir)
430  ac += 2;
431  if (xspec->no_use_multiplexer)
432  ac += 1;
433  if (xspec->dominfo_timeout > 0)
434  ac += 2;
436  ac += 1;
437 
438  av = calloc(ac + 1,sizeof(char *));
439  j = 0;
440  if (xspec->domain) {
441  av[j++] = strdup("-m");
442  av[j++] = strdup(xspec->domain);
443  }
444  if (xspec->kernel_filename) {
445  av[j++] = strdup("-K");
446  av[j++] = strdup(xspec->kernel_filename);
447  }
448  if (xspec->no_hw_debug_reg_clear) {
449  av[j++] = strdup("--no-clear-hw-debug-regs");
450  }
451  if (xspec->no_hvm_setcontext) {
452  av[j++] = strdup("--no-hvm-setcontext");
453  }
454  if (xspec->clear_mem_caches_each_exception) {
455  av[j++] = strdup("--clear-mem-caches-each-exception");
456  }
457 #ifdef ENABLE_LIBVMI
458  if (xspec->use_libvmi)
459  av[j++] = strdup("--use-libvmi");
460 #endif
461 #ifdef ENABLE_XENACCESS
462  if (xspec->use_xenaccess)
463  av[j++] = strdup("--use-xenaccess");
464 #endif
465  if (xspec->memcache_mmap_size) {
466  av[j++] = strdup("--memcache-mmap-size");
467  av[j] = malloc(32);
468  snprintf(av[j],32,"%lu",xspec->memcache_mmap_size);
469  j++;
470  }
471  if (xspec->config_file) {
472  av[j++] = strdup("-c");
473  av[j++] = strdup(xspec->config_file);
474  }
475  if (xspec->replay_dir) {
476  av[j++] = strdup("-r");
477  av[j++] = strdup(xspec->replay_dir);
478  }
479  if (xspec->no_use_multiplexer) {
480  av[j++] = strdup("--no-use-multiplexer");
481  }
482  if (xspec->dominfo_timeout > 0) {
483  av[j++] = strdup("-T");
484  av[j] = malloc(16);
485  snprintf(av[j],16,"%d",xspec->dominfo_timeout);
486  j++;
487  }
489  av[j++] = strdup("--hypervisor-ignores-userspace-exceptions");
490  av[j++] = NULL;
491 
492  if (argv)
493  *argv = av;
494  if (argc)
495  *argc = ac;
496 
497  return 0;
498 }
499 
500 error_t xen_vm_argp_parse_opt(int key,char *arg,struct argp_state *state) {
501  struct target_argp_parser_state *tstate = \
502  (struct target_argp_parser_state *)state->input;
503  struct target_spec *spec;
504  struct xen_vm_spec *xspec;
505  struct argp_option *opti;
506  int ourkey;
507 
508  if (key == ARGP_KEY_INIT)
509  return 0;
510  else if (!state->input)
511  return ARGP_ERR_UNKNOWN;
512 
513  if (tstate)
514  spec = tstate->spec;
515 
516  /*
517  * Check to see if this is really one of our keys. If it is, we
518  * need to see if some other backend has already started parsing
519  * args; if it has, we throw an error. Otherwise, we assume we are
520  * using this backend, and process the arg.
521  */
522  ourkey = 0;
523  for (opti = &xen_vm_argp_opts[0]; opti->key != 0; ++opti) {
524  if (key == opti->key) {
525  ourkey = 1;
526  break;
527  }
528  }
529 
530  if (ourkey) {
533  xspec = xen_vm_build_spec();
534  spec->backend_spec = xspec;
535  }
536  else if (spec->target_type != TARGET_TYPE_XEN) {
537  verror("cannot mix arguments for Xen target (%c) with non-Xen"
538  " target!\n",key);
539  return EINVAL;
540  }
541 
542  /* Only "claim" these args if this is our key. */
545  xspec = calloc(1,sizeof(*xspec));
546  spec->backend_spec = xspec;
547  }
548  else if (spec->target_type != TARGET_TYPE_XEN) {
549  verror("cannot mix arguments for Xen target with non-Xen target!\n");
550  return EINVAL;
551  }
552  }
553 
555  xspec = (struct xen_vm_spec *)spec->backend_spec;
556  else
557  xspec = NULL;
558 
559  switch (key) {
560  case ARGP_KEY_ARG:
561  case ARGP_KEY_ARGS:
562  /* Only handle these if you need arguments. */
563  return ARGP_ERR_UNKNOWN;
564  case ARGP_KEY_INIT:
565  case ARGP_KEY_END:
566  case ARGP_KEY_NO_ARGS:
567  /* Nothing to do unless you malloc something in _INIT. */
568  return 0;
569  case ARGP_KEY_SUCCESS:
570  case ARGP_KEY_ERROR:
571  case ARGP_KEY_FINI:
572  /* Check spec for sanity if necessary. */
573  return 0;
574 
575  case 'm':
576  xspec->domain = strdup(arg);
577  break;
578  case 'K':
579  xspec->kernel_filename = strdup(arg);
580  break;
581  case 'H':
582  xspec->no_hw_debug_reg_clear = 1;
583  break;
584  case 'V':
585  xspec->no_hvm_setcontext = 1;
586  break;
587  case 'c':
588  xspec->config_file = strdup(arg);
589  break;
591  xspec->clear_mem_caches_each_exception = 1;
592  break;
593 #ifdef ENABLE_LIBVMI
594  case XV_ARGP_USE_LIBVMI:
595  xspec->use_libvmi = 1;
596  break;
597 #endif
598 #ifdef ENABLE_XENACCESSS
600  xspec->use_xenaccess = 1;
601  break;
602 #endif
604  xspec->memcache_mmap_size = atoi(arg);
605  break;
606  case 'r':
607  xspec->replay_dir = strdup(arg);
608  break;
609  case 'M':
610  xspec->no_use_multiplexer = 1;
611  break;
612  case 'T':
613  xspec->dominfo_timeout = atoi(arg);
614  break;
615  case XV_ARGP_HIUE:
616  xspec->hypervisor_ignores_userspace_exceptions = 1;
617  break;
618  default:
619  return ARGP_ERR_UNKNOWN;
620  }
621 
622  return 0;
623 }
624 
625 struct argp xen_vm_argp = {
626  xen_vm_argp_opts,xen_vm_argp_parse_opt,NULL,NULL,NULL,NULL,NULL
627 };
628 char *xen_vm_argp_header = "Xen Backend Options";
629 
635  struct evloop *evloop) {
636  return xen_vm_attach(spec,evloop);
637 }
638 
640  struct xen_vm_spec *xspec;
641 
642  xspec = calloc(1,sizeof(*xspec));
643  /* default to 128MB. */
644  xspec->memcache_mmap_size = 128 * 1024 * 1024;
645 
646  return xspec;
647 }
648 
649 void xen_vm_free_spec(struct xen_vm_spec *xspec) {
650  if (xspec->domain)
651  free(xspec->domain);
652  if (xspec->config_file)
653  free(xspec->config_file);
654  if(xspec->replay_dir)
655  free(xspec->replay_dir);
656 
657  free(xspec);
658 }
659 
660 /*
661  * Attaches to domid. We basically check the xenstore to figure out
662  * what kernel the domain is running, and try to find vmlinux based on
663  * that. We also read how much mem the domain has; if it is
664  * PAE-enabled;
665  */
666 struct target *xen_vm_attach(struct target_spec *spec,
667  struct evloop *evloop) {
668  struct xen_vm_spec *xspec = (struct xen_vm_spec *)spec->backend_spec;
669  struct target *target = NULL;
670  struct xen_vm_state *xstate = NULL;
671  struct xs_handle *xsh = NULL;
672  xs_transaction_t xth = XBT_NULL;
673  char *buf = NULL;
674  char *tmp = NULL;
675  char **domains = NULL;
676  unsigned int size;
677  unsigned int i;
678  int have_id = 0;
679  char *domain;
680 
681  domain = xspec->domain;
682 
683  if (geteuid() != 0) {
684  verror("must be root!\n");
685  errno = EPERM;
686  return NULL;
687  }
688 
689  vdebug(5,LA_TARGET,LF_XV,"attaching to domain %s\n",domain);
690 
691  if (!(target = target_create("xen_vm",spec)))
692  return NULL;
693 
694  if (!(xstate = (struct xen_vm_state *)malloc(sizeof(*xstate)))) {
695  free(target);
696  return NULL;
697  }
698  memset(xstate,0,sizeof(*xstate));
699 
700  target->state = xstate;
701 
702  if (!(buf = malloc(PATH_MAX))) {
703  verror("could not allocate tmp path buffer: %s\n",strerror(errno));
704  goto errout;
705  }
706 
707  if (!(xsh = xs_domain_open())) {
708  verror("could not open xenstore!\n");
709  goto errout;
710  }
711 
712  xstate->evloop_fd = -1;
713 
714  /* First figure out whether we need to resolve the ID, or the name. */
715  errno = 0;
716  xstate->id = (domid_t)strtol(domain,&tmp,10);
717  if (errno == ERANGE) {
718  verror("bad domain id: %s\n",strerror(errno));
719  goto errout;
720  }
721  else if (errno == EINVAL || tmp == domain)
722  have_id = 0;
723  else {
724  vdebug(4,LA_TARGET,LF_XV,"found id %d (from %s)\n",xstate->id,domain);
725  have_id = 1;
726  }
727  tmp = NULL;
728 
729  /* We have to try to find the ID first. */
730  if (!have_id) {
731  domains = xs_directory(xsh,xth,"/local/domain",&size);
732  for (i = 0; i < size; ++i) {
733  /* read in name */
734  snprintf(buf,PATH_MAX,"/local/domain/%s/name",domains[i]);
735  tmp = xs_read(xsh,xth,buf,NULL);
736 
737  if (tmp && strcmp(domain,tmp) == 0) {
738  vdebug(9,LA_TARGET,LF_XV,"dom %s (from %s) matches\n",tmp,domain);
739  errno = 0;
740  xstate->id = (domid_t)strtol(domains[i],NULL,10);
741  if (errno) {
742  if (have_id) {
743  free(tmp);
744  tmp = NULL;
745  break;
746  }
747  else {
748  verror("matching domain name for %s; but bad"
749  " domain id %s: %s\n",
750  tmp,domains[i],strerror(errno));
751  free(tmp);
752  tmp = NULL;
753  goto errout;
754  }
755  }
756  else {
757  if (have_id)
758  free(xstate->name);
759  xstate->name = strdup(tmp);
760  have_id = 1;
761  vdebug(4,LA_TARGET,LF_XV,"dom %d (from %s) matches id\n",
762  xstate->id,domain);
763  }
764  }
765  else if (tmp) {
766  free(tmp);
767  tmp = NULL;
768  }
769  }
770 
771  free(domains);
772  domains = NULL;
773 
774  if (!have_id) {
775  verror("could not find domain id for %s!\n",domain);
776  errno = EINVAL;
777  goto errout;
778  }
779  }
780 
781  /* Once we have an ID, try that to find the name if we need. */
782  if (!xstate->name) {
783  snprintf(buf,PATH_MAX,"/local/domain/%d/name",xstate->id);
784  xstate->name = xs_read(xsh,xth,buf,NULL);
785  if (!xstate->name)
786  vwarn("could not read name for dom %d; may cause problems!\n",
787  xstate->id);
788  }
789 
790  /* Now try to find vmpath. */
791  snprintf(buf,PATH_MAX,"/local/domain/%d/vm",xstate->id);
792  xstate->vmpath = xs_read(xsh,xth,buf,NULL);
793  if (!xstate->vmpath)
794  vwarn("could not read vmpath for dom %d; may cause problems!\n",
795  xstate->id);
796  else {
797  snprintf(buf,PATH_MAX,"%s/image/ostype",xstate->vmpath);
798  xstate->ostype = xs_read(xsh,xth,buf,NULL);
799  if (!xstate->ostype) {
800  vwarn("could not read ostype for dom %d; may cause problems!\n",
801  xstate->id);
802  g_hash_table_insert(target->config,strdup("VM_TYPE"),
803  strdup("paravirt"));
804  }
805  else if (strcmp(xstate->ostype,"hvm") == 0) {
806  xstate->hvm = 1;
807  g_hash_table_insert(target->config,strdup("VM_TYPE"),
808  strdup("hvm"));
809  }
810  else {
811  g_hash_table_insert(target->config,strdup("OS_TYPE"),
812  strdup(xstate->ostype));
813  g_hash_table_insert(target->config,strdup("VM_TYPE"),
814  strdup("paravirt"));
815  }
816 
817  snprintf(buf,PATH_MAX,"%s/image/kernel",xstate->vmpath);
818  xstate->kernel_filename = xs_read(xsh,xth,buf,NULL);
819  if (!xstate->kernel_filename)
820  vwarn("could not read kernel for dom %d; may cause problems!\n",
821  xstate->id);
822  else {
823  g_hash_table_insert(target->config,strdup("OS_KERNEL_FILENAME"),
824  strdup(xstate->kernel_filename));
825  }
826  }
827 
828  if (xspec->kernel_filename) {
830  "using kernel filename %s (overrides %s from xenstore)\n",
831  xspec->kernel_filename,xstate->kernel_filename ? xstate->kernel_filename : "''");
832 
833  if (xstate->kernel_filename)
834  free(xstate->kernel_filename);
835 
836  xstate->kernel_filename = strdup(xspec->kernel_filename);
837 
838  g_hash_table_remove(target->config,"OS_KERNEL_FILENAME");
839  g_hash_table_insert(target->config,strdup("OS_KERNEL_FILENAME"),
840  strdup(xstate->kernel_filename));
841  }
842 
843  if (xsh) {
844  xs_daemon_close(xsh);
845  xsh = NULL;
846  }
847 
848  free(buf);
849  buf = NULL;
850 
851  /*
852  * Try to infer the personality.
853  */
854  if (!target->personality_ops
855  && xstate->kernel_filename
856  && (strstr(xstate->kernel_filename,"inux")
857  || strstr(xstate->kernel_filename,"inuz"))) {
858  if (target_personality_attach(target,"os_linux_generic",NULL) == 0) {
860  "autoinitialized the os_linux_generic personality!\n");
861  }
862  else {
863  verror("failed to autoinitialize the os_linux_generic personality!\n");
864  goto errout;
865  }
866  }
867  else {
868  vwarn("cannot initialize a personality!\n");
869  }
870 
871  target->live = 1;
872  target->writeable = 1;
873  target->mmapable = 0; /* XXX: change this once we get mmap API
874  worked out. */
875 
876  /*
877  * Now load up our {xa|vmi}_instance as much as we can now; we'll
878  * try to do more when we load the debuginfo file for the kernel.
879  */
880  xstate->memops = NULL;
881 #ifdef ENABLE_LIBVMI
882  if (!xstate->memops && xspec->use_libvmi)
883  xstate->memops = &xen_vm_mem_ops_libvmi;
884 #endif
885 #ifdef ENABLE_XENACCESS
886  if (!xstate->memops && xspec->use_xenaccess)
887  xstate->memops = &xen_vm_mem_ops_xenaccess;
888 #endif
889  if (!xstate->memops)
890  xstate->memops = &xen_vm_mem_ops_builtin;
891 
892  if (xstate->memops->init) {
893  if (xstate->memops->init(target)) {
894  verror("failed to init memops!\n");
895  goto errout;
896  }
897  }
898 
899  /* Our threads can have two contexts -- kernel and user spaces. */
901 
902  if (evloop && xstate->evloop_fd < 0) {
903  /*
904  * Just save it off; we can't use it until in xen_vm_attach_internal.
905  */
906  target->evloop = evloop;
907  }
908 
909  vdebug(5,LA_TARGET,LF_XV,"opened dom %d\n",xstate->id);
910 
911  return target;
912 
913  errout:
914  if (domains) {
915  for (i = 0; i < size; ++i) {
916  free(domains[i]);
917  }
918  free(domains);
919  }
920  if (xstate->vmpath) {
921  free(xstate->vmpath);
922  xstate->vmpath = NULL;
923  }
924  if (xstate->ostype) {
925  free(xstate->ostype);
926  xstate->ostype = NULL;
927  }
928  if (xstate->name) {
929  free(xstate->name);
930  xstate->name = NULL;
931  }
932  if (xsh)
933  xs_daemon_close(xsh);
934  if (xstate) {
935  free(xstate);
936  if (target)
937  target->state = NULL;
938  }
939  if (target)
941 
942  return NULL;
943 }
944 
949 static int xen_vm_load_dominfo(struct target *target) {
950  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
951  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
952  long total,waited;
953  /*
954  * Wait for 10us repeatedly if dominfo doesn't return what we think
955  * it should. 10us is arbitrary, but a mid-granularity compromise.
956  */
957  long interval = 10;
958  struct timeval itv = { 0,0 };
959  int rc;
960 
961  if (!xstate->dominfo_valid) {
963  "load dominfo; current dominfo is invalid\n");
964  memset(&xstate->dominfo,0,sizeof(xstate->dominfo));
965  if (xc_domain_getinfo(xc_handle,xstate->id,1,
966  &xstate->dominfo) <= 0) {
967  verror("could not get dominfo for %d\n",xstate->id);
968  errno = EINVAL;
969  return -1;
970  }
971 
972  waited = 0;
973  total = (xspec->dominfo_timeout > 0) ? xspec->dominfo_timeout : 0;
974 
975  while (!xstate->dominfo.dying && !xstate->dominfo.crashed
976  && !xstate->dominfo.shutdown && !xstate->dominfo.paused
977  && !xstate->dominfo.blocked && !xstate->dominfo.running
978  && (total - waited) > 0) {
979  vwarnopt(5,LA_TARGET,LF_XV,"domain %d has no status!\n",xstate->id);
980 
981  itv.tv_sec = 0;
982  itv.tv_usec = (interval > (total - waited)) \
983  ? (total - waited) : interval;
984 
985  rc = select(0,NULL,NULL,NULL,&itv);
986  if (rc < 0) {
987  if (errno != EINTR) {
988  verror("select(dominfo retry): %s\n",strerror(errno));
989  return -1;
990  }
991  else {
992  /* Assume itv timer has expired -- even though it
993  * may not have, of course, since select() errored
994  * and we can't trust the timer value.
995  */
996  itv.tv_usec = 0;
997  }
998  }
999 
1000  waited += (interval - itv.tv_usec);
1001 
1003  "waited %d of %d total microseconds to retry dominfo...\n",
1004  waited,total);
1005 
1006  if (xc_domain_getinfo(xc_handle,xstate->id,1,
1007  &xstate->dominfo) <= 0) {
1008  verror("could not get dominfo for %d\n",xstate->id);
1009  errno = EINVAL;
1010  return -1;
1011  }
1012  }
1013 
1014  /*
1015  * Only do this once, and use libxc directly.
1016  */
1017  if (unlikely(!xstate->live_shinfo)) {
1018  xstate->live_shinfo =
1019  xc_map_foreign_range(xc_handle,xstate->id,__PAGE_SIZE,PROT_READ,
1020  xstate->dominfo.shared_info_frame);
1021  if (!xstate->live_shinfo) {
1022  verror("could not mmap shared_info frame 0x%lx!\n",
1023  xstate->dominfo.shared_info_frame);
1024  errno = EFAULT;
1025  return -1;
1026  }
1027  }
1028 
1029  /*
1030  * Have to grab vcpuinfo out of shared frame, argh! This can't
1031  * be the only way to access the tsc, but I can't find a better
1032  * libxc way to do it!
1033  *
1034  * XXX: Do we really have to do this every time the domain is
1035  * interrupted?
1036  */
1037  memcpy(&xstate->vcpuinfo,&xstate->live_shinfo->vcpu_info[0],
1038  sizeof(xstate->vcpuinfo));
1039 
1040  xstate->dominfo_valid = 1;
1041  }
1042  else {
1044  "did not need to load dominfo; current dominfo is valid\n");
1045  }
1046 
1047  return 0;
1048 }
1049 
1050 static struct target_thread *__xen_vm_load_cached_thread(struct target *target,
1051  tid_t tid) {
1052  struct target_thread *tthread;
1053 
1054  tthread = target_lookup_thread(target,tid);
1055  if (!tthread)
1056  return NULL;
1057 
1058  if (!OBJVALID(tthread))
1059  return xen_vm_load_thread(target,tid,0);
1060 
1061  return tthread;
1062 }
1063 
1064 static int __xen_vm_in_userspace(struct target *target,int cpl,REGVAL ipval) {
1065  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1066 
1067  /*
1068  * This is a real pain. We have to use kernel_start_addr because on
1069  * at least some Xen hypervisors, %cs is zeroed out, so we cannot
1070  * extract the CPL. From my reading of the x86 and amd64 manuals,
1071  * it should not be zeroed out -- the segment selector registers are
1072  * only used for privilege levels in long mode.
1073  */
1074  if (xstate->kernel_start_addr && xstate->kernel_start_addr < ADDRMAX) {
1075  if (ipval < xstate->kernel_start_addr)
1076  return 1;
1077  else
1078  return 0;
1079  }
1080  else {
1081  if (cpl == 3)
1082  return 1;
1083  else
1084  return 0;
1085  }
1086 }
1087 
1088 static int __xen_get_cpl_thread(struct target *target,
1089  struct target_thread *tthread) {
1090  REG csr = -1;
1091  REGVAL cs;
1092 
1093  if (target->arch->type == ARCH_X86)
1094  csr = REG_X86_CS;
1095  else if (target->arch->type == ARCH_X86_64)
1096  csr = REG_X86_64_CS;
1097 
1098  /* Load the CPL. */
1099  errno = 0;
1100  cs = 0x3 & target_read_reg(target,tthread->tid,csr);
1101  if (errno) {
1102  verror("could not read CS register to find CPL!\n");
1103  return -1;
1104  }
1105 
1106  return (int)cs;
1107 }
1108 
1109 static int __xen_get_cpl(struct target *target,tid_t tid) {
1110  struct target_thread *tthread;
1111 
1112  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
1113  if (!errno)
1114  errno = EINVAL;
1115  verror("could not load cached thread %"PRIiTID"\n",tid);
1116  return 0;
1117  }
1118 
1119  return __xen_get_cpl_thread(target,tthread);
1120 }
1121 
1122 static struct target_thread *xen_vm_load_thread(struct target *target,
1123  tid_t tid,int force) {
1124  struct target_thread *tthread = NULL;
1125 
1126  /*
1127  * If we are asking for the global thread (TID_GLOBAL), do that
1128  * right away.
1129  */
1130  if (tid == TID_GLOBAL) {
1131  /*
1132  * We have to *not* call _load_current_thread if the global
1133  * thread is valid. This is part of a hack (chicken and egg)
1134  * problem where to "fully" load the global thread, we must have
1135  * its registers. Our register read functions try to load the
1136  * current thread if it's not loaded. So... see
1137  * _load_current_thread for more...
1138  */
1139  if (OBJVALID(target->global_thread))
1140  return target->global_thread;
1141  else {
1142  xen_vm_load_current_thread(target,force);
1143  return target->global_thread;
1144  }
1145  }
1146 
1147  /*
1148  * If we haven't loaded current_thread yet, we really should load it
1149  * because otherwise we don't know if current_thread->tid == @tid.
1150  * If it does, we don't want to do the below stuff, which only
1151  * applies to non-running threads.
1152  */
1153  if (!xen_vm_load_current_thread(target,force)) {
1154  vwarn("could not load current thread to compare with"
1155  " tid %"PRIiTID"!\n",tid);
1156  }
1157 
1158  /*
1159  * If the thread tid we are asking for is the current thread and is
1160  * valid, or if the thread is in our cache and is valid.
1161  */
1162  else if (target->current_thread
1163  && OBJVALID(target->current_thread)
1164  && target->current_thread->tid == tid) {
1165  return xen_vm_load_current_thread(target,force);
1166  }
1167  /*
1168  * Otherwise, try to lookup thread @tid.
1169  */
1170  else if ((tthread = target_lookup_thread(target,tid))) {
1171  if (OBJVALID(tthread) && !force) {
1172  vdebug(4,LA_TARGET,LF_XV,"did not need to load thread; copy is valid\n");
1173  return tthread;
1174  }
1175  }
1176 
1177  /*
1178  * Note:
1179  *
1180  * At this point, we can be sure that we are loading a thread that
1181  * is not running; thus, its CPU state is on the kernel stack.
1182  *
1183  * This means we must ask the personality to do it, because only the
1184  * personality can interpret the kernel stack.
1185  */
1186  SAFE_PERSONALITY_OP_WARN_NORET(load_thread,tthread,NULL,target,tid,force);
1187 
1188  return tthread;
1189 }
1190 
1191 #ifdef __x86_64__
1192 /*
1193  * NB: these functions do *NOT* zero out the destination's contents;
1194  * they just copy what they can into the destination.
1195  */
1196 static int __xen_vm_hvm_cpu_to_vcpu_context(HVM_SAVE_TYPE(CPU) *hvm,
1197  vcpu_guest_context_t *svm) {
1198  assert(sizeof(svm->fpu_ctxt.x) == sizeof(hvm->fpu_regs));
1199 
1200  memcpy(svm->fpu_ctxt.x,hvm->fpu_regs,sizeof(svm->fpu_ctxt.x));
1201 
1202  svm->user_regs.rax = hvm->rax;
1203  svm->user_regs.rbx = hvm->rbx;
1204  svm->user_regs.rcx = hvm->rcx;
1205  svm->user_regs.rdx = hvm->rdx;
1206  svm->user_regs.rbp = hvm->rbp;
1207  svm->user_regs.rsi = hvm->rsi;
1208  svm->user_regs.rdi = hvm->rdi;
1209  svm->user_regs.rsp = hvm->rsp;
1210  svm->user_regs.r8 = hvm->r8;
1211  svm->user_regs.r9 = hvm->r9;
1212  svm->user_regs.r10 = hvm->r10;
1213  svm->user_regs.r11 = hvm->r11;
1214  svm->user_regs.r12 = hvm->r12;
1215  svm->user_regs.r13 = hvm->r13;
1216  svm->user_regs.r14 = hvm->r14;
1217  svm->user_regs.r15 = hvm->r15;
1218 
1219  svm->user_regs.rip = hvm->rip;
1220  svm->user_regs.rflags = hvm->rflags;
1221 
1222  svm->user_regs.error_code = hvm->error_code;
1223 
1224  /* XXX: cs, ds, es, fs, gs */
1225 
1226  if (hvm->gs_base)
1227  svm->gs_base_kernel = hvm->gs_base;
1228  else
1229  svm->gs_base_kernel = hvm->shadow_gs;
1230 
1231  /* XXX: ldt/gdt stuff */
1232 
1233  /* XXX: kernel_ss, kernel_sp */
1234 
1235  svm->ctrlreg[0] = hvm->cr0;
1236  svm->ctrlreg[2] = hvm->cr2;
1237  svm->ctrlreg[3] = hvm->cr3;
1238  svm->ctrlreg[4] = hvm->cr4;
1239 
1240  svm->debugreg[0] = hvm->dr0;
1241  svm->debugreg[1] = hvm->dr1;
1242  svm->debugreg[2] = hvm->dr2;
1243  svm->debugreg[3] = hvm->dr3;
1244  svm->debugreg[6] = hvm->dr6;
1245  svm->debugreg[7] = hvm->dr7;
1246 
1247  /* XXX: fs_base, gs_base_kernel, gs_base_user */
1248 
1249  return 0;
1250 }
1251 
1252 static int __xen_vm_vcpu_to_hvm_cpu_context(vcpu_guest_context_t *svm,
1253  HVM_SAVE_TYPE(CPU) *hvm) {
1254  assert(sizeof(svm->fpu_ctxt.x) == sizeof(hvm->fpu_regs));
1255 
1256  memcpy(hvm->fpu_regs,svm->fpu_ctxt.x,sizeof(hvm->fpu_regs));
1257 
1258  if (hvm->rax != svm->user_regs.rax) {
1259  vdebug(9,LA_TARGET,LF_XV,"setting rax = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1260  svm->user_regs.rax,hvm->rax);
1261  hvm->rax = svm->user_regs.rax;
1262  }
1263  if (hvm->rbx != svm->user_regs.rbx) {
1264  vdebug(9,LA_TARGET,LF_XV,"setting rbx = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1265  svm->user_regs.rbx,hvm->rbx);
1266  hvm->rbx = svm->user_regs.rbx;
1267  }
1268  if (hvm->rcx != svm->user_regs.rcx) {
1269  vdebug(9,LA_TARGET,LF_XV,"setting rcx = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1270  svm->user_regs.rcx,hvm->rcx);
1271  hvm->rcx = svm->user_regs.rcx;
1272  }
1273  if (hvm->rdx != svm->user_regs.rdx) {
1274  vdebug(9,LA_TARGET,LF_XV,"setting rdx = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1275  svm->user_regs.rdx,hvm->rdx);
1276  hvm->rdx = svm->user_regs.rdx;
1277  }
1278  if (hvm->rbp != svm->user_regs.rbp) {
1279  vdebug(9,LA_TARGET,LF_XV,"setting rbp = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1280  svm->user_regs.rbp,hvm->rbp);
1281  hvm->rbp = svm->user_regs.rbp;
1282  }
1283  if (hvm->rsi != svm->user_regs.rsi) {
1284  vdebug(9,LA_TARGET,LF_XV,"setting rsi = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1285  svm->user_regs.rsi,hvm->rsi);
1286  hvm->rsi = svm->user_regs.rsi;
1287  }
1288  if (hvm->rdi != svm->user_regs.rdi) {
1289  vdebug(9,LA_TARGET,LF_XV,"setting rdi = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1290  svm->user_regs.rdi,hvm->rdi);
1291  hvm->rdi = svm->user_regs.rdi;
1292  }
1293  if (hvm->rsp != svm->user_regs.rsp) {
1294  vdebug(9,LA_TARGET,LF_XV,"setting rsp = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1295  svm->user_regs.rsp,hvm->rsp);
1296  hvm->rsp = svm->user_regs.rsp;
1297  }
1298  if (hvm->r8 != svm->user_regs.r8) {
1299  vdebug(9,LA_TARGET,LF_XV,"setting r8 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1300  svm->user_regs.r8,hvm->r8);
1301  hvm->r8 = svm->user_regs.r8;
1302  }
1303  if (hvm->r9 != svm->user_regs.r9) {
1304  vdebug(9,LA_TARGET,LF_XV,"setting r9 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1305  svm->user_regs.r9,hvm->r9);
1306  hvm->r9 = svm->user_regs.r9;
1307  }
1308  if (hvm->r10 != svm->user_regs.r10) {
1309  vdebug(9,LA_TARGET,LF_XV,"setting r10 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1310  svm->user_regs.r10,hvm->r10);
1311  hvm->r10 = svm->user_regs.r10;
1312  }
1313  if (hvm->r11 != svm->user_regs.r11) {
1314  vdebug(9,LA_TARGET,LF_XV,"setting r11 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1315  svm->user_regs.r11,hvm->r11);
1316  hvm->r11 = svm->user_regs.r11;
1317  }
1318  if (hvm->r12 != svm->user_regs.r12) {
1319  vdebug(9,LA_TARGET,LF_XV,"setting r12 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1320  svm->user_regs.r12,hvm->r12);
1321  hvm->r12 = svm->user_regs.r12;
1322  }
1323  if (hvm->r13 != svm->user_regs.r13) {
1324  vdebug(9,LA_TARGET,LF_XV,"setting r13 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1325  svm->user_regs.r13,hvm->r13);
1326  hvm->r13 = svm->user_regs.r13;
1327  }
1328  if (hvm->r14 != svm->user_regs.r14) {
1329  vdebug(9,LA_TARGET,LF_XV,"setting r14 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1330  svm->user_regs.r14,hvm->r14);
1331  hvm->r14 = svm->user_regs.r14;
1332  }
1333  if (hvm->r15 != svm->user_regs.r15) {
1334  vdebug(9,LA_TARGET,LF_XV,"setting r15 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1335  svm->user_regs.r15,hvm->r15);
1336  hvm->r15 = svm->user_regs.r15;
1337  }
1338 
1339  if (hvm->rip != svm->user_regs.rip) {
1340  vdebug(9,LA_TARGET,LF_XV,"setting rip = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1341  svm->user_regs.rip,hvm->rip);
1342  hvm->rip = svm->user_regs.rip;
1343  }
1344  if (hvm->rflags != svm->user_regs.rflags) {
1345  vdebug(9,LA_TARGET,LF_XV,"setting rflags = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1346  svm->user_regs.rflags,hvm->rflags);
1347  hvm->rflags = svm->user_regs.rflags;
1348  }
1349 
1350  if (hvm->error_code != svm->user_regs.error_code) {
1351  vdebug(9,LA_TARGET,LF_XV,"setting cr0 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1352  svm->user_regs.error_code,hvm->error_code);
1353  hvm->error_code = svm->user_regs.error_code;
1354  }
1355 
1356  /* XXX: cs, ds, es, fs, gs */
1357 
1358  /* XXX: ldt/gdt stuff */
1359 
1360  /* XXX: kernel_ss, kernel_sp */
1361 
1362  if (hvm->cr0 != svm->ctrlreg[0]) {
1363  vdebug(9,LA_TARGET,LF_XV,"setting cr0 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1364  svm->ctrlreg[0],hvm->cr0);
1365  hvm->cr0 = svm->ctrlreg[0];
1366  }
1367  if (hvm->cr2 != svm->ctrlreg[2]) {
1368  vdebug(9,LA_TARGET,LF_XV,"setting cr2 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1369  svm->ctrlreg[2],hvm->cr2);
1370  hvm->cr2 = svm->ctrlreg[2];
1371  }
1372  if (hvm->cr3 != svm->ctrlreg[3]) {
1373  vdebug(9,LA_TARGET,LF_XV,"setting cr3 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1374  svm->ctrlreg[3],hvm->cr3);
1375  hvm->cr3 = svm->ctrlreg[3];
1376  }
1377  if (hvm->cr4 != svm->ctrlreg[4]) {
1378  vdebug(9,LA_TARGET,LF_XV,"setting cr4 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1379  svm->ctrlreg[4],hvm->cr4);
1380  hvm->cr4 = svm->ctrlreg[4];
1381  }
1382 
1383  if (hvm->dr0 != svm->debugreg[0]) {
1384  vdebug(9,LA_TARGET,LF_XV,"setting dr0 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1385  svm->debugreg[0],hvm->dr0);
1386  hvm->dr0 = svm->debugreg[0];
1387  }
1388  if (hvm->dr1 != svm->debugreg[1]) {
1389  vdebug(9,LA_TARGET,LF_XV,"setting dr1 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1390  svm->debugreg[1],hvm->dr1);
1391  hvm->dr1 = svm->debugreg[1];
1392  }
1393  if (hvm->dr2 != svm->debugreg[2]) {
1394  vdebug(9,LA_TARGET,LF_XV,"setting dr2 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1395  svm->debugreg[2],hvm->dr2);
1396  hvm->dr2 = svm->debugreg[2];
1397  }
1398  if (hvm->dr3 != svm->debugreg[3]) {
1399  vdebug(9,LA_TARGET,LF_XV,"setting dr3 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1400  svm->debugreg[3],hvm->dr3);
1401  hvm->dr3 = svm->debugreg[3];
1402  }
1403  if (hvm->dr6 != svm->debugreg[6]) {
1404  vdebug(9,LA_TARGET,LF_XV,"setting dr6 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1405  svm->debugreg[6],hvm->dr6);
1406  hvm->dr6 = svm->debugreg[6];
1407  }
1408  if (hvm->dr7 != svm->debugreg[7]) {
1409  vdebug(9,LA_TARGET,LF_XV,"setting dr7 = 0x%"PRIx64" (old 0x%"PRIx64")\n",
1410  svm->debugreg[7],hvm->dr7);
1411  hvm->dr7 = svm->debugreg[7];
1412  }
1413 
1414  /* XXX: fs_base, gs_base_kernel, gs_base_user */
1415 
1416  return 0;
1417 }
1418 #endif
1419 
1420 /*
1421  * Simple wrapper around xc_vcpu_getcontext and the HVM stuff.
1422  *
1423  * NB: it appears that the only reason to use the HVM-specific stuff
1424  * (for CPU info) is to get correct segment register info, the VMCS/VMCB
1425  * stuff, LDT stuff; pretty much everything else is already in
1426  * vcpu_guest_context for the VCPU in question (see
1427  * xen/xen/arch/x86/hvm/hvm.c:hvm_save_cpu_ctxt()).
1428  *
1429  * If the domain is HVM, it populates a vcpu_guest_context as best as
1430  * possible from HVM info. It keeps the HVM data around for a later
1431  * setcontext operation.
1432  *
1433  * XXX: notice that we only load the highest-number VCPU. Initially we
1434  * focused on single-core VMs; that assumption is built into the code.
1435  * We can relax it sometime; but that's the reason for the code being
1436  * like it is.
1437  */
1438 static int __xen_vm_cpu_getcontext(struct target *target,
1439  vcpu_guest_context_t *context) {
1440  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1441  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
1442 #ifdef __x86_64__
1443  uint32_t size = 0;
1444  uint32_t offset = 0;
1445  struct hvm_save_descriptor *sdesc = NULL;
1446 #endif
1447 #ifdef XC_HAVE_CONTEXT_ANY
1448  vcpu_guest_context_any_t context_any;
1449 #endif
1450  int ret;
1451 
1452  if (!xstate->hvm || xspec->no_hvm_setcontext) {
1453 #ifdef XC_HAVE_CONTEXT_ANY
1454  ret = xc_vcpu_getcontext(xc_handle,xstate->id,
1455  xstate->dominfo.max_vcpu_id,&context_any);
1456 #else
1457  ret = xc_vcpu_getcontext(xc_handle,xstate->id,
1458  xstate->dominfo.max_vcpu_id,context);
1459 #endif
1460  if (ret < 0) {
1461  verror("could not get vcpu context for %d\n",xstate->id);
1462  return -1;
1463  }
1464 #ifdef XC_HAVE_CONTEXT_ANY
1465  else
1466  memcpy(context,&context_any.c,sizeof(*context));
1467 #endif
1468  }
1469  else {
1470 #ifdef __x86_64__
1471  if ((size = xc_domain_hvm_getcontext(xc_handle,xstate->id,0,0)) <= 0) {
1472  verror("Could not get HVM context buf size!\n");
1473  return -1;
1474  }
1475 
1476  /* Handle increasing size; this should not happen. */
1477  if (unlikely(!xstate->hvm_context_buf)) {
1478  xstate->hvm_context_bufsiz = size;
1479  xstate->hvm_context_buf = malloc(size);
1480  }
1481  else if (size >= xstate->hvm_context_bufsiz) {
1482  free(xstate->hvm_context_buf);
1483  xstate->hvm_context_bufsiz = size;
1484  xstate->hvm_context_buf = malloc(size);
1485  }
1486 
1487  xstate->hvm_cpu = NULL;
1488 
1489  if (xc_domain_hvm_getcontext(xc_handle,xstate->id,xstate->hvm_context_buf,
1490  xstate->hvm_context_bufsiz) < 0) {
1491  verror("Could not load HVM context buf!\n");
1492  return -1;
1493  }
1494 
1495  offset = 0;
1496  while (offset < size) {
1497  sdesc = (struct hvm_save_descriptor *) \
1498  (xstate->hvm_context_buf + offset);
1499 
1500  offset += sizeof(*sdesc);
1501 
1502  if (sdesc->typecode == HVM_SAVE_CODE(CPU)
1503  && sdesc->instance == xstate->dominfo.max_vcpu_id) {
1504  xstate->hvm_cpu = (HVM_SAVE_TYPE(CPU) *) \
1505  (xstate->hvm_context_buf + offset);
1506  break;
1507  }
1508 
1509  offset += sdesc->length;
1510  }
1511 
1512  if (!xstate->hvm_cpu) {
1513  verror("Could not find HVM context for VCPU %d!\n",
1514  xstate->dominfo.max_vcpu_id);
1515  return -1;
1516  }
1517 
1518  if (__xen_vm_hvm_cpu_to_vcpu_context(xstate->hvm_cpu,context)) {
1519  verror("Could not translate HVM vcpu info to software vcpu info!\n");
1520  return -1;
1521  }
1522 #else
1523  /* Impossible. */
1524  verror("HVM unsupported on 32-bit platform!\n");
1525  errno = EINVAL;
1526  return -1;
1527 #endif
1528  }
1529 
1530  return 0;
1531 }
1532 
1533 static int __xen_vm_cpu_setcontext(struct target *target,
1534  vcpu_guest_context_t *context) {
1535  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1536  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
1537 #ifdef XC_HAVE_CONTEXT_ANY
1538  vcpu_guest_context_any_t context_any;
1539 #endif
1540  int ret;
1541 
1542  if (!xstate->hvm || xspec->no_hvm_setcontext) {
1543 #ifdef XC_HAVE_CONTEXT_ANY
1544  memcpy(&context_any.c,context,sizeof(*context));
1545  ret = xc_vcpu_setcontext(xc_handle,xstate->id,
1546  xstate->dominfo.max_vcpu_id,&context_any);
1547 #else
1548  ret = xc_vcpu_setcontext(xc_handle,xstate->id,
1549  xstate->dominfo.max_vcpu_id,context);
1550 #endif
1551  if (ret < 0) {
1552  verror("could not set vcpu context for dom %d\n",xstate->id);
1553  errno = EINVAL;
1554  return -1;
1555  }
1556  }
1557  else {
1558 #ifdef __x86_64__
1559  if (__xen_vm_vcpu_to_hvm_cpu_context(context,xstate->hvm_cpu)) {
1560  verror("Could not translate software vcpu info to HVM vcpu info!\n");
1561  return -1;
1562  }
1563 
1564  if (xc_domain_hvm_setcontext(xc_handle,xstate->id,
1565  xstate->hvm_context_buf,
1566  xstate->hvm_context_bufsiz)) {
1567  verror("Could not store HVM context buf!\n");
1568  return -1;
1569  }
1570 #else
1571  /* Impossible. */
1572  verror("HVM unsupported on 32-bit platform!\n");
1573  errno = EINVAL;
1574  return -1;
1575 #endif
1576  }
1577 
1578  return 0;
1579 }
1580 
1581 static struct target_thread *__xen_vm_load_current_thread(struct target *target,
1582  int force,
1583  int globalonly) {
1584  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1585  struct target_thread *tthread = NULL;
1586  struct xen_vm_thread_state *tstate = NULL;
1587  struct xen_vm_thread_state *gtstate;
1588  REGVAL ipval;
1589  uint64_t pgd = 0;
1590  int cpl;
1591 
1592  /*
1593  * If the global thread has been loaded, and that's all the caller
1594  * wants, and they don't want to force a reload, give them that.
1595  */
1596  if (globalonly && !force
1597  && target->global_thread && OBJVALID(target->global_thread))
1598  return target->global_thread;
1599  /*
1600  * Otherwise, if the current thread is valid, and we're not forcing
1601  * a reload, give them the current thread.
1602  */
1603  else if (!globalonly && !force
1604  && target->current_thread && OBJVALID(target->current_thread))
1605  return target->current_thread;
1606 
1607  if (target_status(target) != TSTATUS_PAUSED) {
1608  verror("target not paused; cannot load current task!\n");
1609  errno = EBUSY;
1610  return NULL;
1611  }
1612 
1613  /*
1614  * The first thing to do is load the machine state into the global
1615  * thread, and set it as valid -- EVEN THOUGH we have not loaded
1616  * thread_info for it! We must do this so that a whole bunch of
1617  * register reads can work via the API.
1618  */
1619  if (xen_vm_load_dominfo(target)) {
1620  verror("could not load dominfo!\n");
1621  errno = EFAULT;
1622  return NULL;
1623  }
1624 
1625  gtstate = (struct xen_vm_thread_state *)target->global_thread->state;
1626 
1627  /*
1628  * Only need to call xc if we haven't loaded this thread.
1629  */
1630  if (!OBJVALID(target->global_thread)) {
1631  if (__xen_vm_cpu_getcontext(target,&gtstate->context) < 0) {
1632  verror("could not get vcpu context for %d\n",xstate->id);
1633  goto errout;
1634  }
1635  }
1636 
1637  /*
1638  * Load EIP for info, and CPL for user-mode check.
1639  *
1640  * NB: note that these two calls do *not* go through the target
1641  * API. They cannot, because the global thread has not been loaded
1642  * yet. And we can't finish loading the global thread yet, even
1643  * though we have the machine state, because we don't know which
1644  * thread context's regcache to put the machine state into (kernel
1645  * or userspace).
1646  */
1647  errno = 0;
1648 #ifdef __x86_64__
1649  ipval = gtstate->context.user_regs.rip;
1650 #else
1651  ipval = gtstate->context.user_regs.eip;
1652 #endif
1653 
1654  cpl = 0x3 & gtstate->context.user_regs.cs;
1655 
1656  /* Keep loading the global thread... */
1657  if(!OBJVALID(target->global_thread)) {
1658  if (__xen_vm_in_userspace(target,cpl,ipval))
1660  else
1662 
1663  /*
1664  * Push the registers into the regcache!
1665  */
1666  __xen_vm_vcpu_to_thread_regcache(target,&gtstate->context,
1667  target->global_thread,
1668  target->global_thread->tidctxt);
1669 
1670  /*
1671  * Very important. If thread is in userspace, we need to get
1672  * Xen's special kernel_sp register and set it as SP for the
1673  * kernel context so that personalities can load kernel threads
1674  * on i386 because they need kernel_sp to find the stack. On
1675  * x86_64 this is not necessary.
1676  */
1677  if (target->global_thread->tidctxt == THREAD_CTXT_USER) {
1679  THREAD_CTXT_KERNEL,target->spregno,
1680  gtstate->context.kernel_sp);
1681  }
1682 
1683  /*
1684  * NB: we must set the thread as valid now, because the next few
1685  * function calls are going to try to use the target API to read
1686  * registers from the global thread. So even through we're
1687  * technically still loading it, mark it as valid now... it'll
1688  * be fully valid shortly!
1689  */
1690  OBJSVALID(target->global_thread);
1692  }
1693 
1694  /*
1695  * Load CR3 for debug purposes.
1696  */
1697  __xen_vm_pgd(target,TID_GLOBAL,&pgd);
1698 
1700  "loading current thread (ip = 0x%"PRIxADDR",pgd = 0x%"PRIxADDR","
1701  "cpl = %d,tidctxt = %d)\n",ipval,pgd,cpl,
1702  target->global_thread->tidctxt);
1703 
1704  /*
1705  * If only loading the global thread, stop here.
1706  */
1707  if (globalonly)
1708  return target->global_thread;
1709 
1710  /*
1711  * Ask the personality to detect our current thread.
1712  */
1713  SAFE_PERSONALITY_OP(load_current_thread,tthread,NULL,target,force);
1714 
1715  /*
1716  * Set the current thread (might be a real thread, or the global
1717  * thread). If the personality detects a current thread, use it;
1718  * otherwise we have to just use the global thread!
1719  */
1720  if (tthread) {
1721  target->current_thread = tthread;
1722 
1723  /*
1724  * We want to set the current thread's context to whatever the
1725  * global thread was detected to be in. Enforce our will, no
1726  * matter what the personality does!
1727  */
1728  if (tthread->tidctxt != target->global_thread->tidctxt) {
1729  vwarn("personality set current thread context to %d; global thread"
1730  " context is %d; forcing current to global!\n",
1731  tthread->tidctxt,target->global_thread->tidctxt);
1732  tthread->tidctxt = target->global_thread->tidctxt;
1733  }
1734 
1735  /*
1736  * Now, copy in the machine state. Be careful -- if we have not
1737  * allocated tthread->state yet, allocate it now!
1738  */
1739  tstate = (struct xen_vm_thread_state *)tthread->state;
1740  if (!tstate)
1741  tthread->state = tstate = \
1742  (struct xen_vm_thread_state *)calloc(1,sizeof(*tstate));
1743 
1744  memcpy(&tstate->context,&gtstate->context,sizeof(gtstate->context));
1745 
1746  /* Also update the regcache for the current thread. */
1748  target->global_thread->tidctxt,
1749  tthread,tthread->tidctxt);
1750  }
1751  else
1752  target->current_thread = target->global_thread;
1753 
1755 
1757  "debug registers (vcpu context): 0x%"PRIxADDR",0x%"PRIxADDR
1758  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
1759  gtstate->context.debugreg[0],gtstate->context.debugreg[1],
1760  gtstate->context.debugreg[2],gtstate->context.debugreg[3],
1761  gtstate->context.debugreg[6],gtstate->context.debugreg[7]);
1762 
1763  /* Mark its state as valid in our cache. */
1764  OBJSVALID(tthread);
1765 
1766  return tthread;
1767 
1768  errout:
1769  /* XXX: should we really set this here? */
1770  target->current_thread = target->global_thread;
1771 
1772  vwarn("error loading current thread; trying to use default thread\n");
1773  errno = 0;
1774 
1775  return target->global_thread;
1776 }
1777 
1778 static struct target_thread *xen_vm_load_current_thread(struct target *target,
1779  int force) {
1780  return __xen_vm_load_current_thread(target,force,0);
1781 }
1782 
1787 /*
1788  * If the target is not paused, the result of this function is
1789  * undefined.
1790  *
1791  * Otherwise, first we get the CPL out of the lower two bits of the CS
1792  * register. Then we grab the current task and its pid.
1793  */
1794 tid_t xen_vm_gettid(struct target *target) {
1795  struct target_thread *tthread;
1796 
1797  if (target->current_thread && OBJVALID(target->current_thread))
1798  return target->current_thread->tid;
1799 
1800  tthread = xen_vm_load_current_thread(target,0);
1801  if (!tthread) {
1802  verror("could not load current thread to get TID!\n");
1803  return 0;
1804  }
1805 
1806  return tthread->tid;
1807 }
1808 
1809 void xen_vm_free_thread_state(struct target *target,void *state) {
1810  free(state);
1811 }
1812 
1813 static int xen_vm_snprintf(struct target *target,char *buf,int bufsiz) {
1814  struct xen_vm_spec *xspec = \
1815  (struct xen_vm_spec *)target->spec->backend_spec;
1816 
1817  return snprintf(buf,bufsiz,"domain(%s)",xspec->domain);
1818 }
1819 
1820 static int xen_vm_init(struct target *target) {
1821  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
1822  struct xen_vm_thread_state *tstate;
1823  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
1824 
1825  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
1826 
1827  if (target->spec->bpmode == THREAD_BPMODE_STRICT) {
1828  vwarn("auto-enabling SEMI_STRICT bpmode on Xen target.\n");
1830  }
1831 
1832  if (xspec && xspec->hypervisor_ignores_userspace_exceptions)
1833  g_hash_table_insert(target->config,
1834  strdup("OS_EMULATE_USERSPACE_EXCEPTIONS"),
1835  strdup("1"));
1836 
1837  /*
1838  * We can use the RF flag to temporarily disable the hw breakpoint
1839  * if we don't need to single step the breaked instruction (i.e.,
1840  * beacuse there are no post handlers nor actions). This saves us
1841  * from disable the hw breakpoint in this situation.
1842  */
1843  target->nodisablehwbponss = 1;
1844  target->threadctl = 0;
1845 
1846  xstate->dominfo_valid = 0;
1847  xstate->dominfo.domid = 0;
1848  xstate->dominfo.dying = 0;
1849  xstate->dominfo.crashed = 0;
1850  xstate->dominfo.shutdown = 0;
1851  xstate->dominfo.paused = 0;
1852  xstate->dominfo.blocked = 0;
1853  xstate->dominfo.running = 0;
1854  xstate->dominfo.hvm = 0;
1855  xstate->dominfo.debugged = 0;
1856  xstate->dominfo.shutdown_reason = 0;
1857  xstate->dominfo.max_vcpu_id = 0;
1858  xstate->dominfo.shared_info_frame = 0;
1859 
1860  xstate->xen_vm_vmp_client_fd = -1;
1861  xstate->xen_vm_vmp_client_path = NULL;
1862 
1863  /* Create the default thread. */
1864  tstate = (struct xen_vm_thread_state *)calloc(1,sizeof(*tstate));
1865 
1866  target->global_thread = target_create_thread(target,TID_GLOBAL,tstate,NULL);
1867  /* Default thread is always running. */
1869 
1870  /* Create our default context now; update its region later. */
1871  target->global_tlctxt =
1873 
1874  return 0;
1875 }
1876 
1877 #ifdef XENCTRL_HAS_XC_INTERFACE
1878 int xen_vm_xc_attach(xc_interface **xc_handle,xc_interface **xce_handle) {
1879 #else
1880 int xen_vm_xc_attach(int *xc_handle,int *xce_handle) {
1881 #endif
1882 
1883  if (xc_handle && *xc_handle == XC_IF_INVALID) {
1884 #ifdef XENCTRL_HAS_XC_INTERFACE
1885  *xc_handle = xc_interface_open(NULL,NULL,0);
1886 #else
1887  *xc_handle = xc_interface_open();
1888 #endif
1889  if (*xc_handle == XC_IF_INVALID) {
1890  verror("failed to open xc interface: %s\n",strerror(errno));
1891  return -1;
1892  }
1893  }
1894 
1895  if (xce_handle && *xce_handle == XC_IF_INVALID) {
1896 #ifdef XENCTRL_HAS_XC_INTERFACE
1897  *xce_handle = xc_evtchn_open(NULL,0);
1898 #else
1899  *xce_handle = xc_evtchn_open();
1900 #endif
1901  if (*xce_handle == XC_IF_INVALID) {
1902  verror("failed to open event channel: %s\n",strerror(errno));
1903  return -1;
1904  }
1905  }
1906 
1907  return 0;
1908 }
1909 
1910 #ifdef XENCTRL_HAS_XC_INTERFACE
1911 int xen_vm_xc_detach(xc_interface **xc_handle,xc_interface **xce_handle)
1912 #else
1913 int xen_vm_xc_detach(int *xc_handle,int *xce_handle)
1914 #endif
1915 {
1916  if (xc_handle && *xc_handle != XC_IF_INVALID) {
1917  xc_interface_close(*xc_handle);
1918  *xc_handle = XC_IF_INVALID;
1919  }
1920 
1921  if (xce_handle && *xce_handle != XC_IF_INVALID) {
1922  xc_evtchn_close(*xce_handle);
1923  *xce_handle = XC_IF_INVALID;
1924  }
1925 
1926  return 0;
1927 }
1928 
1929 #ifdef XENCTRL_HAS_XC_INTERFACE
1930 int xen_vm_virq_attach(xc_interface *xce_handle,XC_EVTCHN_PORT_T *dbg_port)
1931 #else
1932 int xen_vm_virq_attach(int xce_handle,XC_EVTCHN_PORT_T *dbg_port)
1933 #endif
1934 {
1935  if (dbg_port && *dbg_port == -1) {
1936  *dbg_port = xc_evtchn_bind_virq(xce_handle,VIRQ_DEBUGGER);
1937  /* Try to cast dbg_port to something signed. Old xc versions
1938  * have a bug in that evtchn_port_t is declared as uint32_t, but
1939  * the function prototypes that return them can theoretically
1940  * return -1. So, try to test for that...
1941  */
1942  if ((int32_t)*dbg_port < 0) {
1943  verror("failed to bind debug virq port: %s",strerror(errno));
1944  return -1;
1945  }
1946  }
1947 
1948  return 0;
1949 }
1950 
1951 #ifdef XENCTRL_HAS_XC_INTERFACE
1952 int xen_vm_virq_detach(xc_interface *xce_handle,XC_EVTCHN_PORT_T *dbg_port)
1953 #else
1954 int xen_vm_virq_detach(int xce_handle,XC_EVTCHN_PORT_T *dbg_port)
1955 #endif
1956 {
1957  if (dbg_port && *dbg_port != -1) {
1958  if (xc_evtchn_unbind(xce_handle,(evtchn_port_t)*dbg_port)) {
1959  verror("failed to unbind debug virq port\n");
1960  return -1;
1961  }
1962 
1963  *dbg_port = -1;
1964  }
1965 
1966  return 0;
1967 }
1968 
1969 int xen_vm_vmp_attach(char *path,int *cfd,char **cpath) {
1970  struct stat sbuf;
1971  struct sockaddr_un sun,sun_client;
1972  char *tmpdir;
1973  char *spath;
1974  int spath_len,cpath_len;
1975  int len;
1976 
1977  assert(cfd);
1978  assert(cpath);
1979 
1980  if (cfd && *cfd != -1)
1981  return 0;
1982 
1983  if (!path) {
1984  /*
1985  * Just try /var/run or TMPDIR or /tmp or .
1986  */
1987  if (stat("/var/run",&sbuf) == 0
1988  && S_ISDIR(sbuf.st_mode) && access("/var/run",W_OK) == 0) {
1989  spath_len = strlen("/var/run") + 1 + strlen(TARGET_XV_VMP_SOCKET_FILENAME) + 1;
1990  spath = malloc(spath_len);
1991  snprintf(spath,spath_len,"%s/%s","/var/run",TARGET_XV_VMP_SOCKET_FILENAME);
1992  }
1993  else if ((tmpdir = getenv("TMPDIR"))
1994  && stat(tmpdir,&sbuf) == 0 && access(tmpdir,W_OK) == 0) {
1995  spath_len = strlen(tmpdir) + 1 + strlen(TARGET_XV_VMP_SOCKET_FILENAME) + 1;
1996  spath = malloc(spath_len);
1997  snprintf(spath,spath_len,"%s/%s",tmpdir,TARGET_XV_VMP_SOCKET_FILENAME);
1998  }
1999  else if (stat("/tmp",&sbuf) == 0
2000  && S_ISDIR(sbuf.st_mode) && access("/tmp",W_OK) == 0) {
2001  spath_len = strlen("/tmp") + 1 + strlen(TARGET_XV_VMP_SOCKET_FILENAME) + 1;
2002  spath = malloc(spath_len);
2003  snprintf(spath,spath_len,"%s/%s","/tmp",TARGET_XV_VMP_SOCKET_FILENAME);
2004  }
2005  else {
2006  spath_len = strlen(".") + 1 + strlen(TARGET_XV_VMP_SOCKET_FILENAME) + 1;
2007  spath = malloc(spath_len);
2008  snprintf(spath,spath_len,"%s/%s",".",TARGET_XV_VMP_SOCKET_FILENAME);
2009  }
2010  }
2011  else
2012  spath = strdup(path);
2013 
2014  memset(&sun,0,sizeof(sun));
2015  sun.sun_family = AF_UNIX;
2016  snprintf(sun.sun_path,UNIX_PATH_MAX,"%s",spath);
2017 
2018  /*
2019  * The server only accepts path-bound unix domain socket
2020  * connections, so bind one and do it. Try to use the same basedir
2021  * as in @spath; else use TMPDIR or /tmp or .
2022  */
2023  if (1) {
2024  dirname(spath);
2025 
2026  cpath_len = strlen(spath) + 1
2029  *cpath = malloc(cpath_len);
2030 
2031  snprintf(*cpath,cpath_len,"%s/" TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT,
2032  spath,getpid());
2033  if (open(*cpath,O_CREAT | O_RDWR,S_IRUSR | S_IWUSR) < 0) {
2035  "could not open client VMP socket file %s: %s\n",
2036  *cpath,strerror(errno));
2037  free(*cpath);
2038  *cpath = NULL;
2039  }
2040  unlink(*cpath);
2041  }
2042 
2043  if (cpath[0] == '\0' && (tmpdir = getenv("TMPDIR"))) {
2044  cpath_len = strlen(tmpdir) + 1
2047  *cpath = malloc(cpath_len);
2048 
2049  snprintf(*cpath,cpath_len,"%s/" TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT,
2050  tmpdir,getpid());
2051  if (open(*cpath,O_CREAT | O_RDWR,S_IRUSR | S_IWUSR) < 0) {
2053  "could not open client VMP socket file %s: %s\n",
2054  *cpath,strerror(errno));
2055  free(*cpath);
2056  *cpath = NULL;
2057  }
2058  unlink(*cpath);
2059  }
2060 
2061  if (cpath[0] == '\0') {
2062  cpath_len = strlen("/tmp") + 1
2065  *cpath = malloc(cpath_len);
2066 
2067  snprintf(*cpath,cpath_len,"%s/" TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT,
2068  "/tmp",getpid());
2069  if (open(*cpath,O_CREAT | O_RDWR,S_IRUSR | S_IWUSR) < 0) {
2071  "could not open client VMP socket file %s: %s\n",
2072  *cpath,strerror(errno));
2073  free(*cpath);
2074  *cpath = NULL;
2075  }
2076  unlink(*cpath);
2077  }
2078 
2079  if (cpath[0] == '\0') {
2080  cpath_len = strlen(".") + 1
2083  *cpath = malloc(cpath_len);
2084 
2085  snprintf(*cpath,cpath_len,"%s/" TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT,
2086  ".",getpid());
2087  if (open(*cpath,O_CREAT | O_RDWR,S_IRUSR | S_IWUSR) < 0) {
2089  "could not open client VMP socket file %s: %s\n",
2090  *cpath,strerror(errno));
2091  free(*cpath);
2092  *cpath = NULL;
2093  }
2094  unlink(*cpath);
2095  }
2096 
2097  if (!*cpath) {
2098  verror("could not open a client VMP socket file; aborting!\n");
2099  goto err;
2100  }
2101 
2102  memset(&sun_client,0,sizeof(sun_client));
2103  sun_client.sun_family = AF_UNIX;
2104  snprintf(sun_client.sun_path,UNIX_PATH_MAX,"%s",*cpath);
2105 
2106  *cfd = socket(AF_UNIX,SOCK_STREAM,0);
2107  if (*cfd < 0) {
2108  verror("socket(): %s\n",strerror(errno));
2109  goto err;
2110  }
2111  len = offsetof(struct sockaddr_un,sun_path) + strlen(sun_client.sun_path);
2112  if (bind(*cfd,&sun_client,len) < 0) {
2113  verror("bind(%s): %s\n",sun_client.sun_path,strerror(errno));
2114  goto err;
2115  }
2116  if (fchmod(*cfd,S_IRUSR | S_IWUSR) < 0) {
2117  verror("chmod(%s): %s\n",sun_client.sun_path,strerror(errno));
2118  goto err;
2119  }
2120 
2121  len = offsetof(struct sockaddr_un,sun_path) + strlen(sun.sun_path);
2122  if (connect(*cfd,&sun,len) < 0) {
2123  verror("connect(%s): %s\n",sun.sun_path,strerror(errno));
2124  goto err;
2125  }
2126 
2127  free(spath);
2128 
2129  return 0;
2130 
2131  err:
2132  *cfd = -1;
2133  if (*cpath)
2134  free(*cpath);
2135  *cpath = NULL;
2136  free(spath);
2137 
2138  return -1;
2139 }
2140 
2141 int xen_vm_vmp_detach(int *cfd,char **cpath) {
2142  if (cfd && *cfd != -1) {
2143  close(*cfd);
2144  *cfd = -1;
2145  if (cpath && *cpath) {
2146  unlink(*cpath);
2147  free(*cpath);
2148  *cpath = NULL;
2149  }
2150  }
2151 
2152  return 0;
2153 }
2154 
2156  int rc;
2157 
2158  rc = system(TARGET_XV_VMP_BIN_PATH);
2159  if (rc) {
2160  verror("system(%s): %s\n",TARGET_XV_VMP_BIN_PATH,strerror(errno));
2161  return -1;
2162  }
2163 
2164  return 0;
2165 }
2166 
2167 int xen_vm_virq_or_vmp_attach_or_launch(struct target *target) {
2168  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2169  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
2170  int i;
2171  int rc = -1;
2172 
2173  if (xspec->no_use_multiplexer) {
2174  if (dbg_port > -1) {
2175  verror("cannot connect for multiple domains without multiplexer!\n");
2176  errno = EINVAL;
2177  return -1;
2178  }
2179  else
2180  return xen_vm_virq_attach(xce_handle,&dbg_port);
2181  }
2182 
2183  /* Try to connect. If we can't, then launch, wait, and try again. */
2184  if (xen_vm_vmp_attach(NULL,&xstate->xen_vm_vmp_client_fd,&xstate->xen_vm_vmp_client_path)) {
2185  if (xen_vm_vmp_launch()) {
2186  verror("could not launch Xen VIRQ_DEBUGGER multiplexer!\n");
2187  return -1;
2188  }
2189  else {
2190  vdebug(6,LA_TARGET,LF_XV,"launched Xen VIRQ_DEBUGGER multiplexer!\n");
2191  }
2192 
2193  for (i = 0; i < 5; ++i) {
2194  rc = xen_vm_vmp_attach(NULL,&xstate->xen_vm_vmp_client_fd,
2195  &xstate->xen_vm_vmp_client_path);
2196  if (rc == 0)
2197  break;
2198  else
2199  sleep(1);
2200  }
2201 
2202  if (rc) {
2203  verror("could not connect to launched Xen VIRQ_DEBUGGER multiplexer!\n");
2204  return -1;
2205  }
2206  }
2207 
2208  vdebug(6,LA_TARGET,LF_XV,"connected to Xen VIRQ_DEBUGGER multiplexer!\n");
2209 
2210  return 0;
2211 }
2212 
2213 int xen_vm_virq_or_vmp_detach(struct target *target) {
2214  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2215 
2216  if (dbg_port != -1) {
2217  xce_handle_fd = -1;
2218  return xen_vm_virq_detach(xce_handle,&dbg_port);
2219  }
2220  else
2221  return xen_vm_vmp_detach(&xstate->xen_vm_vmp_client_fd,
2222  &xstate->xen_vm_vmp_client_path);
2223 }
2224 
2225 int xen_vm_virq_or_vmp_get_fd(struct target *target) {
2226  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2227 
2228  if (dbg_port != -1) {
2229  if (xce_handle_fd == -1)
2230  xce_handle_fd = xc_evtchn_fd(xce_handle);
2231  return xce_handle_fd;
2232  }
2233  else
2234  return xstate->xen_vm_vmp_client_fd;
2235 }
2236 
2237  int xen_vm_virq_or_vmp_read(struct target *target,int *vmid) {
2238  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2239  XC_EVTCHN_PORT_T port = -1;
2240  struct target_xen_vm_vmp_client_response resp = { 0 };
2241  int retval;
2242  int rc;
2243 
2244  if (dbg_port != -1) {
2245  /* we've got something from eventchn. let's see what it is! */
2246  port = xc_evtchn_pending(xce_handle);
2247 
2248  /* unmask the event channel BEFORE doing anything else,
2249  * like unpausing the target!
2250  */
2251  retval = xc_evtchn_unmask(xce_handle, port);
2252  if (retval == -1) {
2253  verror("failed to unmask event channel\n");
2254  return -1;
2255  }
2256 
2257  if (port != dbg_port) {
2258  *vmid = -1;
2259  return 0;
2260  }
2261  else {
2262  /* XXX: don't try to figure out which VM; must check them
2263  * all; no infallible way to find out which one(s).
2264  */
2265  *vmid = 0;
2266  return 0;
2267  }
2268  }
2269  else {
2270  rc = read(xstate->xen_vm_vmp_client_fd,&resp,sizeof(resp));
2271  if (rc < 0) {
2272  if (errno == EINTR) {
2273  *vmid = -1;
2274  return 0;
2275  }
2276  return -1;
2277  }
2278  else if (rc == 0) {
2279  return -1;
2280  }
2281  else if (rc != sizeof(resp)) {
2282  return -1;
2283  }
2284  else {
2285  *vmid = resp.vmid;
2286  return 0;
2287  }
2288  }
2289 
2290  /* Not reached, despite what gcc thinks! */
2291  return -1;
2292 }
2293 
2294 static int xen_vm_attach_internal(struct target *target) {
2295  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2296  struct xen_domctl domctl;
2297  struct target_thread *tthread;
2298  struct xen_vm_thread_state *xtstate;
2299  struct xen_vm_spec *xspec;
2300 
2301  xspec = (struct xen_vm_spec *)target->spec->backend_spec;
2302 
2303  domctl.cmd = XEN_DOMCTL_setdebugging;
2304  domctl.domain = xstate->id;
2305  domctl.u.setdebugging.enable = true;
2306 
2307  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2308 
2309  /*
2310  * Always attach to XC.
2311  */
2312  if (xen_vm_xc_attach(&xc_handle,&xce_handle))
2313  return -1;
2314 
2315  /*
2316  * Connect to VIRQ_DEBUGGER, either through demultiplexer daemon, or
2317  * directly. If daemon, launch or connect...
2318  */
2320  return -1;
2321 
2322  /* NOT thread-safe! */
2323  ++xc_refcnt;
2324 
2325  if (xc_domctl(xc_handle,&domctl)) {
2326  verror("could not enable debugging of dom %d!\n",xstate->id);
2327  return -1;
2328  }
2329 
2330  /* Null out current state so we reload and see that it's paused! */
2331  xstate->dominfo_valid = 0;
2332  if (xen_vm_load_dominfo(target)) {
2333  verror("could not load dominfo for dom %d\n",xstate->id);
2334  return -1;
2335  }
2336 
2337  if (xen_vm_pause(target,0)) {
2338  verror("could not pause target before attaching; letting user handle!\n");
2339  }
2340 
2341  /*
2342  * Make sure memops is setup to read from memory.
2343  */
2344  if (xstate->memops && xstate->memops->attach) {
2345  if (xstate->memops->attach(target)) {
2346  verror("could not attach memops!\n");
2347  return -1;
2348  }
2349  }
2350 
2351  if (target->evloop && xstate->evloop_fd < 0) {
2352  xen_vm_attach_evloop(target,target->evloop);
2353  }
2354 
2355  if (!xspec->no_hw_debug_reg_clear) {
2356  /*
2357  * Null out hardware breakpoints, so that we don't try to infer that
2358  * one was set, only to error because it's a software BP, not a
2359  * hardware BP (even if the ip matches). This can happen if you do
2360  * one run with hw bps, then breakpoint the same ip with a sw bp.
2361  * Good practice anyway!
2362  */
2363 
2364  if (!(tthread = __xen_vm_load_cached_thread(target,TID_GLOBAL))) {
2365  if (!errno)
2366  errno = EINVAL;
2367  verror("could not load cached thread %"PRIiTID"\n",TID_GLOBAL);
2368  return -1;
2369  }
2370  xtstate = (struct xen_vm_thread_state *)tthread->state;
2371 
2372  xtstate->dr[0] = 0;
2373  xtstate->dr[1] = 0;
2374  xtstate->dr[2] = 0;
2375  xtstate->dr[3] = 0;
2376  /* Clear the status bits */
2377  xtstate->dr[6] = 0;
2378  /* Clear the control bit. */
2379  xtstate->dr[7] = 0;
2380 
2381  /* Now save these values for later write in flush_context! */
2382  xtstate->context.debugreg[0] = 0;
2383  xtstate->context.debugreg[1] = 0;
2384  xtstate->context.debugreg[2] = 0;
2385  xtstate->context.debugreg[3] = 0;
2386  xtstate->context.debugreg[6] = 0;
2387  xtstate->context.debugreg[7] = 0;
2388 
2389  OBJSDIRTY(tthread);
2390 
2391  if (target->current_thread) {
2392  tthread = target->current_thread;
2393  xtstate = (struct xen_vm_thread_state *)tthread->state;
2394 
2395  xtstate->dr[0] = 0;
2396  xtstate->dr[1] = 0;
2397  xtstate->dr[2] = 0;
2398  xtstate->dr[3] = 0;
2399  /* Clear the status bits */
2400  xtstate->dr[6] = 0;
2401  /* Clear the control bit. */
2402  xtstate->dr[7] = 0;
2403 
2404  /* Now save these values for later write in flush_context! */
2405  xtstate->context.debugreg[0] = 0;
2406  xtstate->context.debugreg[1] = 0;
2407  xtstate->context.debugreg[2] = 0;
2408  xtstate->context.debugreg[3] = 0;
2409  xtstate->context.debugreg[6] = 0;
2410  xtstate->context.debugreg[7] = 0;
2411 
2412  OBJSDIRTY(target->current_thread);
2413  }
2414  }
2415 
2416  return 0;
2417 }
2418 
2419 static int xen_vm_detach(struct target *target,int stay_paused) {
2420  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2421  struct xen_domctl domctl;
2422 
2423  domctl.cmd = XEN_DOMCTL_setdebugging;
2424  domctl.domain = xstate->id;
2425  domctl.u.setdebugging.enable = false;
2426 
2427  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2428 
2429  if (!target->opened)
2430  return 0;
2431 
2432  if (xen_vm_status(target) == TSTATUS_PAUSED
2433  && (g_hash_table_size(target->threads) || target->global_thread)) {
2434  /* Flush back registers if they're dirty, but if we don't have
2435  * any threads (i.e. because we're closing/detaching), don't
2436  * flush all, which would load the global thread!
2437  */
2438  target_flush_all_threads(target);
2439  }
2440 
2441  if (target->evloop && xstate->evloop_fd > -1)
2442  xen_vm_detach_evloop(target);
2443 
2444  if (xstate->memops->fini) {
2445  if (xstate->memops->fini(target)) {
2446  verror("failed to fini memops; continuing anyway!\n");
2447  return 0;
2448  }
2449  }
2450 
2451  if (xstate->live_shinfo)
2452  munmap(xstate->live_shinfo,__PAGE_SIZE);
2453 
2454  if (xc_domctl(xc_handle,&domctl)) {
2455  verror("could not disable debugging of dom %d!\n",xstate->id);
2456  return -1;
2457  }
2458 
2459  if (!stay_paused && xen_vm_status(target) == TSTATUS_PAUSED) {
2460  __xen_vm_resume(target,1);
2461  }
2462 
2463  --xc_refcnt;
2464 
2465  if (!xc_refcnt) {
2466  /* Close all the xc stuff; we're the last one. */
2467  vdebug(4,LA_TARGET,LF_XV,"last domain; closing xc/xce interfaces.\n");
2468 
2469  if (xen_vm_virq_or_vmp_detach(target))
2470  verror("failed to unbind debug virq port\n");
2471 
2472  if (xen_vm_xc_detach(&xc_handle,&xce_handle))
2473  verror("failed to close xc interfaces\n");
2474  }
2475 
2476  vdebug(3,LA_TARGET,LF_XV,"detach dom %d succeeded.\n",xstate->id);
2477 
2478  return 0;
2479 }
2480 
2481 static int xen_vm_fini(struct target *target) {
2482  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2483 
2484  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2485 
2486  if (xstate->vmpath)
2487  free(xstate->vmpath);
2488  if (xstate->kernel_filename)
2489  free(xstate->kernel_filename);
2490  if (xstate->name)
2491  free(xstate->name);
2492  if (xstate)
2493  free(xstate);
2494 
2495  return 0;
2496 }
2497 
2498 static int xen_vm_kill(struct target *target,int sig) {
2499  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2500 
2501  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2502 
2503  /* XXX: fill in! */
2504  return 0;
2505 }
2506 
2507 /*
2508  * For now, just one big address space.
2509  */
2510 static int xen_vm_loadspaces(struct target *target) {
2511  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2512 
2513  addrspace_create(target,"kernel",xstate->id);
2514 
2515  return 0;
2516 }
2517 
2518 /*
2519  * For now, just find our kernel binary path from xen store, as well as
2520  * the max amount of mem, and create a single region (with a single
2521  * range that is R/W/X) covering either all 32 or 64 bits.
2522  *
2523  * The immediate reason to do this is that figuring out which memory is
2524  * currently mapped to kernel or user address space is going to be slow
2525  * because it involves lots of list traverses. Plus, even if we had an
2526  * efficient data structure for searching address ranges, we would have
2527  * to reload the ranges/regions *every* time the domain runs. We do not
2528  * want to do this!
2529  *
2530  * So, XXX: come back to it later.
2531  */
2532 static int xen_vm_loadregions(struct target *target,struct addrspace *space) {
2533  struct memregion *region;
2534  struct memrange *range;
2535  char *kernel_filename;
2536 
2537  kernel_filename =
2538  (char *)g_hash_table_lookup(target->config,"OS_KERNEL_FILENAME");
2539 
2540  region = memregion_create(space,REGION_TYPE_MAIN,kernel_filename);
2541  if (!region)
2542  return -1;
2543  range = memrange_create(region,0,ADDRMAX,0,
2545  if (!range)
2546  return -1;
2547 
2548  target->global_tlctxt->region = region;
2549 
2550  return 0;
2551 }
2552 
2553 /*
2554  * For now, just try to find the debuginfo for our kernel, unless the
2555  * user told us about it in xstate.
2556  *
2557  * We need to look for gnu_debuglink first, and then look in
2558  * /usr*lib/debug for a match. Actually, we prefer the buildid because
2559  * for fedora kernel modules, we don't necessarily know the path to the
2560  * module in /lib/modules/VERSION/.../module.ko in the fs, so we can't
2561  * duplicate ... in the /usr/lib/debug search... so build id is the way
2562  * to go.
2563  *
2564  * But for just the kernel itself, this is easier. If we have buildid
2565  * or debuglink, we use /usr*lib/debug. Else, we look in /boot for a
2566  * file that replaces the vmlinuz part with vmlinux.
2567  */
2568 static int xen_vm_loaddebugfiles(struct target *target,
2569  struct addrspace *space,
2570  struct memregion *region) {
2571  int retval = -1;
2572  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2573  struct debugfile *debugfile;
2574  int bfn = 0;
2575  int bfpn = 0;
2576 
2577  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2578 
2579  /*
2580  * Open up the actual ELF binary and look for three sections to inform
2581  * our search. First, if there is a nonzero .debug_info section,
2582  * load that. Second, if there is a .note.gnu.build-id section,
2583  * read the build id and decompose it into a two-byte dir/file.debug
2584  * string that we look for in our search path (i.e., we look for
2585  * $PATH/.build-id/b1/b2..bX.debug). Otherwise, if there is a
2586  * .gnu_debuglink section, we read that section and try to find a
2587  * matching debug file.
2588  */
2589  if (!region->name || strlen(region->name) == 0)
2590  return -1;
2591 
2593  target->spec->debugfile_root_prefix,
2594  target->spec->debugfile_load_opts_list);
2595  if (!debugfile)
2596  goto out;
2597 
2598  if (target_associate_debugfile(target,region,debugfile)) {
2599  goto out;
2600  }
2601 
2602  /*
2603  * Try to figure out which binfile has the info we need. On
2604  * different distros, they're stripped different ways.
2605  */
2606  if (debugfile->binfile_pointing) {
2607  binfile_get_root_scope_sizes(debugfile->binfile,&bfn,NULL,NULL,NULL);
2609  NULL,NULL,NULL);
2610  if (bfpn > bfn) {
2611  RHOLD(debugfile->binfile_pointing,region);
2612  region->binfile = debugfile->binfile_pointing;
2613  }
2614  }
2615 
2616  if (!region->binfile) {
2617  RHOLD(debugfile->binfile,region);
2618  region->binfile = debugfile->binfile;
2619  }
2620 
2621  /*
2622  * With Xen VMs, we can't always know what the vCPU is running as
2623  * from the xenstore. For instance, with an HVM, we can't seem to
2624  * figure out whether it's running x86_64, x32, or i386 at all; we
2625  * have to load the kernel debuginfo binary to know.
2626  */
2627  if (!target->arch) {
2628  target->arch = debugfile->binfile->arch;
2629  }
2630 
2631  /*
2632  * Propagate some binfile info...
2633  */
2634  region->base_phys_addr = region->binfile->base_phys_addr;
2635  region->base_virt_addr = region->binfile->base_virt_addr;
2636 
2637  retval = 0;
2638 
2639  out:
2640  return retval;
2641 }
2642 
2643 static int xen_vm_postloadinit(struct target *target) {
2644  int rc;
2645  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2646 
2647  /*
2648  * We might not know this until now! Which register is the fbreg is
2649  * dependent on host cpu type, not target cpu type.
2650  */
2651  if (target->arch->type == ARCH_X86_64) {
2652  target->fbregno = REG_X86_64_RBP;
2653  target->spregno = REG_X86_64_RSP;
2654  target->ipregno = REG_X86_64_RIP;
2655  }
2656  else {
2657  target->fbregno = REG_X86_EBP;
2658  target->spregno = REG_X86_ESP;
2659  target->ipregno = REG_X86_EIP;
2660  }
2661 
2662  SAFE_PERSONALITY_OP_WARN(init,rc,0,target);
2663  SAFE_PERSONALITY_OP_WARN(postloadinit,rc,0,target);
2664 
2665  char *start = (char *)g_hash_table_lookup(target->config,
2666  "OS_KERNEL_START_ADDR");
2667  if (start)
2668  xstate->kernel_start_addr = strtoul(start,NULL,0);
2669 
2670  char *hpage = (char *)g_hash_table_lookup(target->config,
2671  "OS_KERNEL_HYPERCALL_PAGE");
2672  if (hpage)
2673  xstate->hypercall_page = strtoul(hpage,NULL,0);
2674 
2675  return 0;
2676 }
2677 
2678 static int xen_vm_postopened(struct target *target) {
2679  int rc;
2680  SAFE_PERSONALITY_OP_WARN(postopened,rc,0,target);
2681  return rc;
2682 }
2683 
2684 static int xen_vm_set_active_probing(struct target *target,
2685  active_probe_flags_t flags) {
2686  int rc;
2687  SAFE_PERSONALITY_OP_WARN(set_active_probing,rc,0,target,flags);
2688  return rc;
2689 }
2690 
2691 static struct target_spec *
2692 xen_vm_build_default_overlay_spec(struct target *target,tid_t tid) {
2694 }
2695 
2696 static struct target *
2697 xen_vm_instantiate_overlay(struct target *target,
2698  struct target_thread *tthread,
2699  struct target_spec *spec,
2700  struct target_thread **ntthread) {
2701  struct target *overlay;
2702  REGVAL thip;
2703  tid_t ltid;
2704  struct target_thread *leader;
2705 
2706  if (!spec)
2708 
2709  if (spec->target_type != TARGET_TYPE_OS_PROCESS) {
2710  errno = EINVAL;
2711  return NULL;
2712  }
2713 
2714  errno = 0;
2715  thip = target_read_reg_ctxt(target,tthread->tid,THREAD_CTXT_USER,
2716  target->ipregno);
2717  if (errno) {
2718  verror("could not read IP for tid %"PRIiTID"!!\n",tthread->tid);
2719  return NULL;
2720  }
2721  if (target_os_thread_is_user(target,tthread->tid) != 1) {
2722  errno = EINVAL;
2723  verror("tid %"PRIiTID" IP 0x%"PRIxADDR" is not a user thread!\n",
2724  tthread->tid,thip);
2725  return NULL;
2726  }
2727 
2728  /*
2729  * Flip to the group leader if it is not this thread itself.
2730  */
2731  ltid = target_os_thread_get_leader(target,tthread->tid);
2732  leader = target_lookup_thread(target,ltid);
2733  if (!leader) {
2734  verror("could not load group_leader for thread %d; BUG?!\n",tthread->tid);
2735  return NULL;
2736  }
2737  else if (leader != tthread) {
2739  "using group_leader %d instead of user-supplied overlay thread %d\n",
2740  leader->tid,tthread->tid);
2741  *ntthread = leader;
2742  }
2743 
2744  /*
2745  * All we want to do here is create the overlay target.
2746  */
2747  overlay = target_create("os_process",spec);
2748 
2749  return overlay;
2750 }
2751 
2752 static struct target_thread *
2753 xen_vm_lookup_overlay_thread_by_id(struct target *target,int id) {
2754  struct target_thread *retval;
2755 
2756  retval = xen_vm_load_thread(target,id,0);
2757  if (!retval) {
2758  if (!errno)
2759  errno = ESRCH;
2760  return NULL;
2761  }
2762 
2763  if (target_os_thread_is_user(target,retval->tid) == 1) {
2765  "found overlay thread %d\n",id);
2766  return retval;
2767  }
2768  else {
2769  verror("tid %d matched %d, but is a kernel thread!\n",retval->tid,id);
2770  errno = EINVAL;
2771  return NULL;
2772  }
2773 }
2774 
2775 static struct target_thread *
2776 xen_vm_lookup_overlay_thread_by_name(struct target *target,char *name) {
2777  struct target_thread *retval = NULL;
2778  struct target_thread *tthread;
2779  int slen;
2780  int rc;
2781  GHashTableIter iter;
2782 
2783  if ((rc = xen_vm_load_available_threads(target,0)))
2784  vwarn("could not load %d threads; continuing anyway!\n",-rc);
2785 
2786  g_hash_table_iter_init(&iter,target->threads);
2787  while (g_hash_table_iter_next(&iter,NULL,(gpointer)&tthread)) {
2788  if (tthread == target->global_thread)
2789  continue;
2790 
2791  if (!tthread->name) {
2792  vwarn("tid %d does not have a name; continuing!\n",
2793  tthread->tid);
2794  continue;
2795  }
2796 
2797  slen = strlen(tthread->name);
2799  "checking task with name '%*s' against '%s'\n",
2800  slen,tthread->name,name);
2801  if (strncmp(name,tthread->name,slen) == 0) {
2802  retval = tthread;
2803  break;
2804  }
2805  }
2806 
2807  if (retval) {
2808  if (target_os_thread_is_user(target,retval->tid) != 1) {
2809  verror("tid %d matched '%s', but is a kernel thread!\n",
2810  retval->tid,name);
2811  errno = EINVAL;
2812  return NULL;
2813  }
2814  else {
2816  "found overlay thread %"PRIiTID"\n",retval->tid);
2817  return tthread;
2818  }
2819  }
2820  else {
2821  errno = ESRCH;
2822  return NULL;
2823  }
2824 }
2825 
2826 int xen_vm_attach_overlay_thread(struct target *base,struct target *overlay,
2827  tid_t newtid) {
2828  tid_t cltid,nltid;
2829 
2830  nltid = target_os_thread_get_leader(base,newtid);
2831  cltid = target_os_thread_get_leader(base,overlay->base_thread->tid);
2832 
2833  if (nltid == -1 || cltid == -1)
2834  return -1;
2835 
2836  if (nltid == cltid)
2837  return 0;
2838 
2839  errno = EINVAL;
2840  return 1;
2841 }
2842 
2843 int xen_vm_detach_overlay_thread(struct target *base,struct target *overlay,
2844  tid_t tid) {
2845  return 0;
2846 }
2847 
2848 static target_status_t xen_vm_status(struct target *target) {
2849  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2851 
2852  if (xen_vm_load_dominfo(target)) {
2853  verror("could not load dominfo for dom %d\n",xstate->id);
2854  return retval;
2855  }
2856 
2857  if (xstate->dominfo.paused)
2858  retval = TSTATUS_PAUSED;
2859  else if (xstate->dominfo.running || xstate->dominfo.blocked)
2860  /* XXX: is this right? i.e., is "blocked" from the hypervisor
2861  perspective? */
2862  retval = TSTATUS_RUNNING;
2863  else if (xstate->dominfo.dying || xstate->dominfo.crashed)
2864  retval = TSTATUS_DEAD;
2865  else if (xstate->dominfo.shutdown)
2866  retval = TSTATUS_STOPPED;
2867  else
2868  retval = TSTATUS_ERROR;
2869 
2870  vdebug(9,LA_TARGET,LF_XV,"dom %d status %d\n",xstate->id,retval);
2871 
2872  return retval;
2873 }
2874 
2875 static int xen_vm_pause(struct target *target,int nowait) {
2876  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
2877  struct timeval check_tv = { 0,0};
2878  target_poll_outcome_t outcome;
2879  int pstatus;
2880 
2881  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
2882 
2883  if (xen_vm_load_dominfo(target))
2884  vwarn("could not load dominfo for dom %d, trying to pause anyway!\n",xstate->id);
2885 
2886  if (xstate->dominfo.paused) {
2887  if (target_get_status(target) != TSTATUS_PAUSED)
2889  else
2890  return 0;
2891  }
2892  else if (xc_domain_pause(xc_handle,xstate->id)) {
2893  verror("could not pause dom %d!\n",xstate->id);
2894  return -1;
2895  }
2896 
2897  /*
2898  * Give the memops a chance to handle pause.
2899  */
2900  if (xstate->memops && xstate->memops->handle_pause) {
2901  xstate->memops->handle_pause(target);
2902  }
2903 
2905 
2906  xstate->dominfo_valid = 0;
2907  if (xen_vm_load_dominfo(target))
2908  vwarn("could not reload dominfo for dom %d after pause!\n",xstate->id);
2909 
2910  /*
2911  * NB: very important.
2912  *
2913  * Since we allow pauses to be commanded asynchronously
2914  * w.r.t. target vm execution state, we have to check if there is
2915  * something to handle once we successfully pause it, and handle it
2916  * if so. Otherwise if a target_pause() and debug exception happen
2917  * at the "same" time relative to the user, we might leave a debug
2918  * event unhandled, and this could whack the target.
2919  *
2920  * We pass in a 0,0 timeval so that the select() in xen_vm_poll
2921  * truly polls.
2922  *
2923  * Also note that we don't care what the outcome is.
2924  */
2925  xen_vm_poll(target,&check_tv,&outcome,&pstatus);
2926 
2927  return 0;
2928 }
2929 
2930 static int xen_vm_flush_current_thread(struct target *target) {
2931  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
2932  struct target_thread *tthread;
2933  struct xen_vm_thread_state *tstate;
2934  tid_t tid;
2935  int rc;
2936 
2937  if (!target->current_thread) {
2938  verror("current thread not loaded!\n");
2939  errno = EINVAL;
2940  return -1;
2941  }
2942 
2943  tthread = target->current_thread;
2944  tid = tthread->tid;
2945  tstate = (struct xen_vm_thread_state *)tthread->state;
2946 
2947  vdebug(5,LA_TARGET,LF_XV,"dom %d tid %"PRIiTID"\n",xstate->id,tid);
2948 
2949  if (!OBJVALID(tthread) || !OBJDIRTY(tthread)) {
2951  "dom %d tid %"PRIiTID" not valid (%d) or not dirty (%d)\n",
2952  xstate->id,tid,OBJVALID(tthread),OBJDIRTY(tthread));
2953  return 0;
2954  }
2955 
2957  "EIP is 0x%"PRIxREGVAL" before flush (dom %d tid %"PRIiTID")\n",
2958  target_read_reg(target,TID_GLOBAL,target->ipregno),
2959  xstate->id,tid);
2960 
2961  if (__xen_vm_thread_regcache_to_vcpu(target,tthread,tthread->tidctxt,
2962  &tstate->context)) {
2963  verror("could not convert regcache to vcpu context(dom %d tid %"PRIiTID")\n",
2964  xstate->id,tid);
2965  errno = EINVAL;
2966  return -1;
2967  }
2968 
2969  /*
2970  * Flush Xen machine context.
2971  */
2972  if (__xen_vm_cpu_setcontext(target,&tstate->context) < 0) {
2973  verror("could not set vcpu context (dom %d tid %"PRIiTID")\n",
2974  xstate->id,tid);
2975  errno = EINVAL;
2976  return -1;
2977  }
2978 
2979 #if __WORDSIZE == 32
2981  "eflags (vcpu context): 0x%"PRIxADDR"\n",
2982  tstate->context.user_regs.eflags);
2983 #else
2985  "rflags (vcpu context): 0x%"PRIxADDR"\n",
2986  tstate->context.user_regs.rflags);
2987 #endif
2989  "debug registers (vcpu context): 0x%"PRIxADDR",0x%"PRIxADDR
2990  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
2991  tstate->context.debugreg[0],tstate->context.debugreg[1],
2992  tstate->context.debugreg[2],tstate->context.debugreg[3],
2993  tstate->context.debugreg[6],tstate->context.debugreg[7]);
2994 
2996  "debug registers (our copy): 0x%"PRIxADDR",0x%"PRIxADDR
2997  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
2998  tstate->dr[0],tstate->dr[1],tstate->dr[2],tstate->dr[3],
2999  tstate->dr[6],tstate->dr[7]);
3000 
3001  SAFE_PERSONALITY_OP(flush_current_thread,rc,0,target);
3002  return rc;
3003 }
3004 
3005 /*
3006  * Very similar to flush_current_thread -- BUT it doesn't flush anything
3007  * but CPU context.
3008  *
3009  * Also, if @current_thread is not NULL, we do a funny thing -- we use
3010  * the cpu context from @current_thread as our base, and overlay ONLY the
3011  * debug registers from the global thread -- and set the context to
3012  * that. If @current_thread is NULL, we upload the full CPU context we
3013  * have. @current_thread must not be the global thread itself.
3014  *
3015  * We do things this way because the only time we use the global thread
3016  * to pass to bp/ss handlers in the probe library is when Xen is in
3017  * interrupt context. In that case, there is no current_thread -- the
3018  * current_thread is the global thread. So in reality, the only thing
3019  * that gets stored in the global thread is hardware probepoints that
3020  * were set for TID_GLOBAL. However, when the bp/ss handlers handle
3021  * those probepoints, they do so in the context of the thread -- which
3022  * is either current_thread (if in task context) or global_thread (if in
3023  * interrupt context, because there is no task thread, just an interrupt
3024  * stack). So, even when a TID_GLOBAL hardware probepoint is being
3025  * handled, all the non-debug-register modifications to it happen in the
3026  * current_thread CPU state.
3027  */
3028 static int xen_vm_flush_global_thread(struct target *target,
3029  struct target_thread *current_thread) {
3030  struct xen_vm_state *xstate = (struct xen_vm_state *)(target->state);
3031  struct target_thread *gthread;
3032  struct xen_vm_thread_state *tstate;
3033  struct xen_vm_thread_state *gtstate;
3034  vcpu_guest_context_t *ctxp;
3035  vcpu_guest_context_t context;
3036  int i;
3037 
3038  if (!target->global_thread) {
3039  verror("BUG: no global thread loaded!!!\n");
3040  errno = EINVAL;
3041  return -1;
3042  }
3043  if (current_thread == target->global_thread)
3044  current_thread = NULL;
3045 
3046  gthread = target->global_thread;
3047  gtstate = (struct xen_vm_thread_state *)gthread->state;
3048  if (current_thread)
3049  tstate = (struct xen_vm_thread_state *)current_thread->state;
3050  else
3051  tstate = NULL;
3052 
3053  if (!OBJVALID(gthread) || !OBJDIRTY(gthread)) {
3055  "dom %d tid %"PRIiTID" not valid (%d) or not dirty (%d)\n",
3056  xstate->id,gthread->tid,OBJVALID(gthread),OBJDIRTY(gthread));
3057  return 0;
3058  }
3059  else {
3060  /*
3061  * Always have to convert the global thread.
3062  */
3064  target->global_thread->tidctxt,
3065  &gtstate->context)) {
3066  verror("could not convert regcache to vcpu context"
3067  " (dom %d tid %"PRIiTID") ctxt %d\n",
3068  xstate->id,target->global_thread->tid,
3069  target->global_thread->tidctxt);
3070  errno = EINVAL;
3071  return -1;
3072  }
3073  }
3074 
3075  if (!current_thread) {
3076  /* Flush the global thread's CPU context directly. */
3077 
3078  vdebug(5,LA_TARGET,LF_XV,"dom %d tid %"PRIiTID" (full global vCPU flush)\n",
3079  xstate->id,gthread->tid);
3080 
3081  ctxp = &gtstate->context;
3082 
3083  }
3084  else {
3086  target->current_thread->tidctxt,
3087  &tstate->context)) {
3088  verror("could not convert regcache to vcpu context"
3089  " (dom %d tid %"PRIiTID") ctxt %d\n",
3090  xstate->id,target->current_thread->tid,
3091  target->current_thread->tidctxt);
3092  errno = EINVAL;
3093  return -1;
3094  }
3095 
3096  /* We have to merge the hardware debug register state from the
3097  * current thread with the state for the global thread.
3098  */
3099  ctxp = &context;
3100 
3101  /* Copy the current_thread's whole context in; then overlay teh
3102  * global thread's debugreg values *that are in use*.
3103  */
3104  memcpy(ctxp,&tstate->context,sizeof(tstate->context));
3105 
3106  /* Unilaterally NULL status register out; we're about to flush. */
3107  ctxp->debugreg[6] = 0;
3108 
3109  /* For any TID_GLOBAL debugreg that is in use, copy the register
3110  * and its control bits into the merged ctxp.
3111  */
3112  for (i = 0; i < 4; ++i) {
3113  if (gtstate->context.debugreg[i] == 0)
3114  continue;
3115 
3116  vdebug(5,LA_TARGET,LF_XV,"merging global debug reg %d in!\n",i);
3117  /* Copy in the break address */
3118  ctxp->debugreg[i] = gtstate->context.debugreg[i];
3119  /* Overwrite the control bits; unset them first, then set. */
3120  ctxp->debugreg[7] &= ~(0x3 << (i * 2));
3121  ctxp->debugreg[7] |= ((0x3 << (i * 2)) & gtstate->context.debugreg[7]);
3122  /* Overwrite the break-on bits; unset them first, then set. */
3123  ctxp->debugreg[7] &= ~(0x3 << (16 + (i * 4)));
3124  ctxp->debugreg[7] |= ((0x3 << (16 + (i * 4))) & gtstate->context.debugreg[7]);
3125  /* Overwrite the break-on size bits (watchpoint size) */
3126  ctxp->debugreg[7] &= ~(0x3 << (18 + (i * 4)));
3127  ctxp->debugreg[7] |= ((0x3 << (18 + (i * 4))) & gtstate->context.debugreg[7]);
3128  }
3129 
3130  /* Unilaterally set the break-exact bits. */
3131  //ctxp->debugreg[7] |= 0x3 << 8;
3132 
3133  }
3134 
3135  if (!current_thread) {
3137  "EIP is 0x%"PRIxREGVAL" before flush (dom %d tid %"PRIiTID")\n",
3138  target_read_reg(target,TID_GLOBAL,target->ipregno),
3139  xstate->id,gthread->tid);
3140  }
3141  else {
3143  "EIP is 0x%"PRIxREGVAL" (in thread %"PRIiTID") before flush (dom %d tid %"PRIiTID")\n",
3144 #ifdef __x86_64__
3145  gtstate->context.user_regs.rip,
3146 #else
3147  gtstate->context.user_regs.eip,
3148 #endif
3149  current_thread->tid,
3150  xstate->id,gthread->tid);
3151  }
3152 
3153  /*
3154  * Flush Xen machine context.
3155  */
3156  if (__xen_vm_cpu_setcontext(target,ctxp) < 0) {
3157  verror("could not set vcpu context (dom %d tid %"PRIiTID")\n",
3158  xstate->id,gthread->tid);
3159  errno = EINVAL;
3160  return -1;
3161  }
3162 
3163  /* Mark cached copy as clean. */
3164  OBJSCLEAN(gthread);
3165 
3166  if (!current_thread)
3168  "debug registers (setting full vcpu context): 0x%"PRIxADDR",0x%"PRIxADDR
3169  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
3170  gtstate->context.debugreg[0],gtstate->context.debugreg[1],
3171  gtstate->context.debugreg[2],gtstate->context.debugreg[3],
3172  gtstate->context.debugreg[6],gtstate->context.debugreg[7]);
3173  else
3175  "debug registers (setting MERGED!!! vcpu context): 0x%"PRIxADDR",0x%"PRIxADDR
3176  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
3177  ctxp->debugreg[0],ctxp->debugreg[1],
3178  ctxp->debugreg[2],ctxp->debugreg[3],
3179  ctxp->debugreg[6],ctxp->debugreg[7]);
3180 
3181  if (!current_thread)
3183  "debug registers (our copy): 0x%"PRIxADDR",0x%"PRIxADDR
3184  ",0x%"PRIxADDR",0x%"PRIxADDR",0,0,0x%"PRIxADDR",0x%"PRIxADDR"\n",
3185  gtstate->dr[0],gtstate->dr[1],gtstate->dr[2],gtstate->dr[3],
3186  gtstate->dr[6],gtstate->dr[7]);
3187 
3188  return 0;
3189 }
3190 
3191 static int xen_vm_pause_thread(struct target *target,tid_t tid,int nowait) {
3192  verror("cannot pause individual threads in guests!\n");
3193  errno = EINVAL;
3194  return -1;
3195 }
3196 
3197 static int xen_vm_flush_thread(struct target *target,tid_t tid) {
3198  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
3199  struct target_thread *tthread;
3200  int rc;
3201 
3202  vdebug(16,LA_TARGET,LF_XV,"dom %d tid %"PRIiTID"\n",xstate->id,tid);
3203 
3204  /*
3205  * If we are flushing the global thread (TID_GLOBAL), do it right
3206  * away.
3207  */
3208  if (tid == TID_GLOBAL)
3209  return xen_vm_flush_current_thread(target);
3210 
3211  /*
3212  * If we haven't loaded current_thread yet, we really should load it
3213  * because otherwise we don't know if current_thread->tid == @tid.
3214  * If it does, we don't want to do the below stuff, which only
3215  * applies to non-running threads -- in this case, we want to flush
3216  * to the hardware directly.
3217  *
3218  * BUT -- we can't load a thread in the flush code; we might be
3219  * iterating over the threads hashtable, so a load might result in
3220  * a thread create which would result in the hashtable being
3221  * modified.
3222  */
3223  if (!target->current_thread) {
3225  "current thread not loaded to compare with"
3226  " tid %"PRIiTID"; exiting, user-mode EIP, or BUG?\n",
3227  tid);
3228  }
3229  else if (!OBJVALID(target->current_thread)) {
3231  "current thread not valid to compare with"
3232  " tid %"PRIiTID"; exiting, user-mode EIP, or BUG?\n",
3233  tid);
3234  }
3235 
3236  /*
3237  * If the thread tid we are asking for is the current thread and is
3238  * valid, or if the thread is in our cache and is valid.
3239  */
3240  if (target->current_thread && target->current_thread->tid == tid) {
3241  return xen_vm_flush_current_thread(target);
3242  }
3243  /*
3244  * Otherwise, try to lookup thread @tid.
3245  */
3246  tthread = target_lookup_thread(target,tid);
3247 
3248  if (!tthread) {
3249  verror("cannot flush unknown thread %"PRIiTID"; you forgot to load?\n",
3250  tid);
3251  errno = EINVAL;
3252  return -1;
3253  }
3254 
3255  if (tthread == target->current_thread)
3256  return xen_vm_flush_current_thread(target);
3257 
3258  if (!OBJVALID(tthread) || !OBJDIRTY(tthread)) {
3260  "dom %d tid %"PRIiTID" not valid (%d) or not dirty (%d)\n",
3261  xstate->id,tthread->tid,OBJVALID(tthread),OBJDIRTY(tthread));
3262  return 0;
3263  }
3264 
3265  SAFE_PERSONALITY_OP(flush_thread,rc,0,target,tid);
3266  if (rc)
3267  goto errout;
3268 
3269  OBJSCLEAN(tthread);
3270 
3271  return 0;
3272 
3273  errout:
3274  return -1;
3275 }
3276 
3277 static int xen_vm_flush_all_threads(struct target *target) {
3278  int rc, retval = 0;
3279  GHashTableIter iter;
3280  struct target_thread *tthread;
3281  struct target_thread *current_thread = NULL;
3282 
3283  g_hash_table_iter_init(&iter,target->threads);
3284  while (g_hash_table_iter_next(&iter,NULL,(gpointer)&tthread)) {
3285  if (tthread == target->current_thread
3286  || tthread == target->global_thread)
3287  continue;
3288  else
3289  rc = xen_vm_flush_thread(target,tthread->tid);
3290  if (rc) {
3291  verror("could not flush thread %"PRIiTID"\n",tthread->tid);
3292  ++retval;
3293  }
3294  }
3295 
3296  /*
3297  * If the current thread is not the global thread, we have to try to
3298  * flush it.
3299  */
3300  if (target->current_thread
3301  && target->current_thread != target->global_thread) {
3302  /* Save this off to tell flush_global_thread below that
3303  * it must merge its state with this thread's state.
3304  *
3305  * So if the current thread is not the global thread itself, and
3306  * its state is valid (whether it is dirty or not!!), we must
3307  * merge.
3308  */
3309  if (OBJVALID(target->current_thread))
3310  current_thread = target->current_thread;
3311 
3312  rc = xen_vm_flush_current_thread(target);
3313  if (rc) {
3314  verror("could not flush current thread %"PRIiTID"\n",
3315  target->current_thread->tid);
3316  ++retval;
3317  }
3318  }
3319 
3320  /*
3321  * Also, we always have to try to flush the "global" thread.
3322  * Remember, the global thread is a fake thread; it never maps to
3323  * anything real; it is just the current CPU registers. If the user
3324  * sets any probes or modifies registers with TID_GLOBAL, they only
3325  * get flushed if we flush the global thread.
3326  *
3327  * OF COURSE, this means that if you mix per-thread probing/register
3328  * modification and global thread modification, your changes to the
3329  * current hardware state will almost certainly stomp on each
3330  * other. OK, this is no longer permitted; get_unused_debug_reg now
3331  * makes sure this cannot happen.
3332  *
3333  * If we were handling a software breakpoint, we would have modified
3334  * cpu context in the current thread; if we were hanlding a hardware
3335  * probe or modifying a hardware probe, we would have written the
3336  * the global thread's cpu state (AND the current thread's CPU state
3337  * too, like EIP, etc). So what we need to is arbitrate between the
3338  * two contexts depending on what we're doing. For instance, if we
3339  * handled a hardware probepoint, we'll always need to flush the
3340  * global thread -- see monitor() and flush_global_thread().
3341  */
3342  rc = xen_vm_flush_global_thread(target,current_thread);
3343  if (rc) {
3344  verror("could not flush global thread %"PRIiTID"\n",TID_GLOBAL);
3345  ++retval;
3346  }
3347 
3348  return retval;
3349 }
3350 
3351 static int __value_get_append_tid(struct target *target,struct value *value,
3352  void *data) {
3353  struct array_list *list = (struct array_list *)data;
3354  struct value *v;
3355 
3357  value,"pid",NULL,LOAD_FLAG_NONE);
3358  if (!v) {
3359  verror("could not load pid in task; BUG?\n");
3360  /* errno should be set for us. */
3361  return -1;
3362  }
3363  array_list_append(list,(void *)(uintptr_t)v_i32(v));
3364  value_free(v);
3365 
3366  return 0;
3367 }
3368 
3369 static struct array_list *xen_vm_list_available_tids(struct target *target) {
3370  struct array_list *retval;
3371  SAFE_PERSONALITY_OP(list_available_tids,retval,NULL,target);
3372  return retval;
3373 }
3374 
3375 static int xen_vm_load_all_threads(struct target *target,int force) {
3376  struct array_list *cthreads;
3377  int rc = 0;
3378  int i;
3379  struct target_thread *tthread;
3380 
3381  cthreads = target_list_threads(target);
3382 
3383  for (i = 0; i < array_list_len(cthreads); ++i) {
3384  tthread = (struct target_thread *)array_list_item(cthreads,i);
3385 
3387  "tid %"PRIiTID" (%p)\n",tthread->tid,tthread);
3388 
3389  if (!xen_vm_load_thread(target,tthread->tid,force)) {
3390  if (target_lookup_thread(target,tthread->tid)) {
3391  verror("could not load thread %"PRIiTID"\n",tthread->tid);
3392  --rc;
3393  continue;
3394  }
3395  /*
3396  * If it's no longer in the cache, we evicted it because it
3397  * no longer exists... so this is not an error.
3398  */
3399  }
3400  }
3401 
3402  return rc;
3403 }
3404 
3405 static int xen_vm_load_available_threads(struct target *target,int force) {
3406  int rc;
3407 
3408  /*
3409  * Load the current thread first to load the global thread. The
3410  * current thread will get loaded again in the loop below if @force
3411  * is set...
3412  */
3413  if (!__xen_vm_load_current_thread(target,force,1)) {
3414  verror("could not load current thread!\n");
3415  return -1;
3416  }
3417 
3418  SAFE_PERSONALITY_OP(load_available_threads,rc,0,target,force);
3419  return rc;
3420 }
3421 
3422 static int xen_vm_thread_snprintf(struct target *target,
3423  struct target_thread *tthread,
3424  char *buf,int bufsiz,
3425  int detail,char *sep,char *kvsep) {
3426  int rc = 0;
3427  int nrc;
3428 
3429  if (tthread == target->current_thread || tthread == target->global_thread) {
3430  rc = target_regcache_snprintf(target,tthread,tthread->tidctxt,
3431  buf,bufsiz,detail,sep,kvsep,0);
3432  if (rc < 0)
3433  return rc;
3434  }
3435 
3436  SAFE_PERSONALITY_OP(thread_snprintf,nrc,0,target,tthread,
3437  (rc >= bufsiz) ? NULL : buf + rc,
3438  (rc >= bufsiz) ? 0 : bufsiz - rc,
3439  detail,sep,kvsep);
3440  if (nrc < 0) {
3441  verror("could not snprintf personality info for thread %d!\n",
3442  tthread->tid);
3443  return nrc;
3444  }
3445 
3446  return rc + nrc;
3447 }
3448 
3453 #if 0
3454 static int xen_vm_thread_snprintf(struct target_thread *tthread,
3455  char *buf,int bufsiz,
3456  int detail,char *sep,char *kvsep) {
3457  struct xen_vm_thread_state *tstate;
3458  struct cpu_user_regs *r;
3459  int rc = 0;
3460  int nrc;
3461 
3462  if (detail < 0)
3463  goto personality_out;
3464 
3465  tstate = (struct xen_vm_thread_state *)tthread->state;
3466  if (!tstate)
3467  goto personality_out;
3468 
3469  r = &tstate->context.user_regs;
3470 
3471  if (detail >= 0) {
3472  ;
3473  }
3474 
3475  if (detail >= 1)
3476  rc += snprintf((rc >= bufsiz) ? NULL : buf + rc,
3477  (rc >= bufsiz) ? 0 :bufsiz - rc,
3478  "%s" "ip%s%"RF "%s" "bp%s%"RF "%s" "sp%s%"RF "%s"
3479  "flags%s%"RF "%s" "ax%s%"RF "%s" "bx%s%"RF "%s"
3480  "cx%s%"RF "%s" "dx%s%"RF "%s" "di%s%"RF "%s"
3481  "si%s%"RF "%s" "cs%s%d" "%s" "ss%s%d" "%s"
3482  "ds%s%d" "%s" "es%s%d" "%s"
3483  "fs%s%d" "%s" "gs%s%d",
3484 #if __WORDSIZE == 64
3485  sep,kvsep,r->rip,sep,kvsep,r->rbp,sep,kvsep,r->rsp,sep,
3486  kvsep,r->eflags,sep,kvsep,r->rax,sep,kvsep,r->rbx,sep,
3487  kvsep,r->rcx,sep,kvsep,r->rdx,sep,kvsep,r->rdi,sep,
3488  kvsep,r->rsi,sep,kvsep,r->cs,sep,kvsep,r->ss,sep,
3489  kvsep,r->ds,sep,kvsep,r->es,sep,
3490  kvsep,r->fs,sep,kvsep,r->gs
3491 #else
3492  sep,kvsep,r->eip,sep,kvsep,r->ebp,sep,kvsep,r->esp,sep,
3493  kvsep,r->eflags,sep,kvsep,r->eax,sep,kvsep,r->ebx,sep,
3494  kvsep,r->ecx,sep,kvsep,r->edx,sep,kvsep,r->edi,sep,
3495  kvsep,r->esi,sep,kvsep,r->cs,sep,kvsep,r->ss,sep,
3496  kvsep,r->ds,sep,kvsep,r->es,sep,
3497  kvsep,r->fs,sep,kvsep,r->gs
3498 #endif
3499  );
3500  if (detail >= 2)
3501  rc += snprintf((rc >= bufsiz) ? NULL : buf + rc,
3502  (rc >= bufsiz) ? 0 :bufsiz - rc,
3503  "%s" "dr0%s%"DRF "%s" "dr1%s%"DRF
3504  "%s" "dr2%s%"DRF "%s" "dr3%s%"DRF
3505  "%s" "dr6%s%"DRF "%s" "dr7%s%"DRF,
3506  sep,kvsep,tstate->dr[0],sep,kvsep,tstate->dr[1],
3507  sep,kvsep,tstate->dr[1],sep,kvsep,tstate->dr[2],
3508  sep,kvsep,tstate->dr[6],sep,kvsep,tstate->dr[7]);
3509 
3510  personality_out:
3511  nrc = target_personality_thread_snprintf(tthread,
3512  (rc >= bufsiz) ? NULL : buf + rc,
3513  (rc >= bufsiz) ? 0 : bufsiz - rc,
3514  detail,sep,kvsep);
3515  if (nrc < 0) {
3516  verror("could not snprintf personality info for thread %d!\n",
3517  tthread->tid);
3518  return rc;
3519  }
3520 
3521  return rc + nrc;
3522 }
3523 #endif /* 0 */
3524 
3525 static int xen_vm_invalidate_thread(struct target *target,
3526  struct target_thread *tthread) {
3527  struct xen_vm_thread_state *xtstate;
3528  int rc;
3529 
3530  xtstate = (struct xen_vm_thread_state *)tthread->state;
3531  if (xtstate)
3532  xtstate->pgd_phys = 0;
3533 
3534  SAFE_PERSONALITY_OP(invalidate_thread,rc,0,target,tthread);
3535 
3536  return rc;
3537 }
3538 
3539 static int __xen_vm_resume(struct target *target,int detaching) {
3540  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
3541  int rc;
3542 
3543  vdebug(5,LA_TARGET,LF_XV,"dom %d\n",xstate->id);
3544 
3545  if (xen_vm_load_dominfo(target))
3546  vwarn("could not load dominfo for dom %d, trying to pause anyway!\n",xstate->id);
3547 
3548  if (!xstate->dominfo.paused) {
3549  vwarn("dom %d not paused; not invalidating and resuming; bug?\n",
3550  xstate->id);
3551  return -1;
3552  }
3553 
3554  /*
3555  * Only call this if we have threads still, or we are not detaching;
3556  * if we're detaching and the target_api has already deleted our
3557  * threads, flush_all_threads will end up loading at least the
3558  * global thread... which is counterproductive.
3559  */
3560  if (!detaching
3561  || g_hash_table_size(target->threads) || target->global_thread) {
3562  /* Flush back registers if they're dirty! */
3563  target_flush_all_threads(target);
3564 
3565  /* Invalidate our cached copies of threads. */
3567  }
3568 
3569  /* flush_context will not have done this necessarily! */
3570  xstate->dominfo_valid = 0;
3571 
3572  rc = xc_domain_unpause(xc_handle,xstate->id);
3573 
3575 
3576  return rc;
3577 }
3578 
3579 static int xen_vm_resume(struct target *target) {
3580  return __xen_vm_resume(target,0);
3581 }
3582 
3583 /*
3584  * If again is not NULL, we set again
3585  * to -1 if there was an error, but we should try again;
3586  * to 0 if not again;
3587  * to 1 if just handled a bp and should try again;
3588  * to 2 if just handled an ss and should try again.
3589  */
3590 static target_status_t xen_vm_handle_exception(struct target *target,
3592  int *again,void *priv) {
3593  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
3594  int cpl;
3595  REGVAL ipval;
3596  int dreg = -1;
3597  struct probepoint *dpp;
3598  struct target_thread *tthread;
3599  tid_t overlay_leader_tid;
3600  struct xen_vm_thread_state *gtstate;
3601  struct xen_vm_thread_state *xtstate;
3602  tid_t tid;
3603  struct probepoint *spp;
3604  struct target_thread *sstep_thread;
3605  struct target_thread *bogus_sstep_thread;
3606  ADDR bogus_sstep_probepoint_addr;
3607  struct target *overlay;
3608  struct target_memmod *pmmod;
3609  ADDR paddr;
3610  REGVAL tmp_ipval;
3611  int rc;
3612  target_status_t tstatus;
3613 
3614  /* Reload our dominfo */
3615  xstate->dominfo_valid = 0;
3616  if (xen_vm_load_dominfo(target)) {
3617  verror("could not load dominfo; returning to user!\n");
3618  goto out_err;
3619  }
3620 
3621  tstatus = target_status(target);
3622 
3623  if (tstatus == TSTATUS_RUNNING) {
3625  "ignoring \"exception\" in our running VM %d; not for us\n",
3626  xstate->id);
3627  if (again)
3628  *again = 0;
3629  return tstatus;
3630  }
3631  else if (tstatus == TSTATUS_PAUSED) {
3633  "new debug event (brctr = %"PRIu64", tsc = %"PRIx64")\n",
3634  xen_vm_get_counter(target),xen_vm_get_tsc(target));
3635 
3636  target->monitorhandling = 1;
3637 
3638  /* Force the current thread to be reloaded. */
3639  target->current_thread = NULL;
3640 
3641  /*
3642  * Load the global thread (machine state) very first... we have
3643  * to be able to read some register state!
3644  */
3645  if (!__xen_vm_load_current_thread(target,0,1)) {
3646  verror("could not load global thread!\n");
3647  goto out_err;
3648  }
3649 
3650  /*
3651  * Grab EIP and CPL first so we can see if we're in user or
3652  * kernel space and print better messages.
3653  */
3654  errno = 0;
3655  cpl = __xen_get_cpl(target,TID_GLOBAL);
3656  if (errno) {
3657  verror("could not read CPL while checking debug event: %s\n",
3658  strerror(errno));
3659  goto out_err;
3660  }
3661  ipval = target_read_reg(target,TID_GLOBAL,target->ipregno);
3662  if (errno) {
3663  verror("could not read EIP while checking debug event: %s\n",
3664  strerror(errno));
3665  goto out_err;
3666  }
3667 
3668  /*
3669  * Give the personality a chance to update its state.
3670  */
3671  SAFE_PERSONALITY_OP_WARN_NORET(handle_exception,rc,0,target,flags);
3672 
3673  /*
3674  * Give the memops a chance to update.
3675  */
3676  if (xstate->memops && xstate->memops->handle_exception_ours) {
3677  xstate->memops->handle_exception_ours(target);
3678  }
3679 
3680  /*
3681  * Reload the current thread. We don't force it because we
3682  * flush all threads before continuing the loop via again:,
3683  * or in target_resume/target_singlestep.
3684  */
3685  xen_vm_load_current_thread(target,0);
3686 
3687  if (__xen_vm_in_userspace(target,cpl,ipval)) {
3688  tthread = target->current_thread;
3689 
3690  if (!tthread) {
3691  verror("could not load current userspace thread at 0x%"PRIxADDR"!\n",
3692  ipval);
3693  goto out_err;
3694  }
3695 
3696  gtstate = (struct xen_vm_thread_state *) \
3697  target->global_thread->state;
3698  xtstate = (struct xen_vm_thread_state *) \
3699  target->current_thread->state;
3700  tid = target->current_thread->tid;
3701 
3703  "user-mode debug event at EIP 0x%"PRIxADDR" in tid %"PRIiTID";"
3704  " will try to handle it if it is single step!\n",
3705  ipval,tid);
3706  }
3707  else {
3708  /*
3709  * First, we check the current thread's state/registers to
3710  * try to handle the exception in the current thread. If
3711  * there is no information (and the current thread was not
3712  * the global thread), we try the global thread.
3713  */
3714  if (!(tthread = target->current_thread)) {
3715  verror("could not read current thread!\n");
3716  goto out_err_again;
3717  }
3718 
3719  /*
3720  * Next, if auto garbage collection is enabled, do it.
3721  *
3722  * We need to only do this every N interrupts, or something,
3723  * but what we really want is something that is related to
3724  * how many cycles have eclipsed in the target -- i.e., if
3725  * more than one second's worth of wallclock time has
3726  * elapsed in the target, we should garbage collect.
3727  *
3728  * But I don't know how to grab the current cycle counter
3729  * off the top of my head, so just do it when we accumulate
3730  * at least 32 threads.
3731  */
3732  /*
3733  if (g_hash_table_size(target->threads) > 32) {
3734  target_gc_threads(target);
3735  }
3736  */
3737 
3738  gtstate = (struct xen_vm_thread_state *)target->global_thread->state;
3739  xtstate = (struct xen_vm_thread_state *)tthread->state;
3740  tid = tthread->tid;
3741  }
3742 
3744  "thread %d at EIP 0x%"PRIxADDR": "
3745  "dbgreg[6]=0x%"DRF", eflags=0x%"RF"\n",
3746  tid, ipval, xtstate->context.debugreg[6],
3747  xtstate->context.user_regs.eflags);
3748 
3749  /* handle the triggered probe based on its event type */
3750  if (xtstate->context.debugreg[6] & 0x4000
3751  || (xstate->hvm && xstate->hvm_monitor_trap_flag_set)
3752  || (tthread->emulating_debug_mmod)) {
3753  vdebug(3,LA_TARGET,LF_XV,"new single step debug event (MTF %d)\n",
3754  xstate->hvm_monitor_trap_flag_set);
3755 
3756  /*
3757  * Three cases:
3758  * 1) We had to emulate a breakpoint/singlestep for a shared
3759  * page breakpoint; or
3760  * 2) we single-stepped an instruction that could have taken
3761  * us to a userspace EIP; or
3762  * 3) somehow the kernel jumped to one!
3763  */
3764  if (tthread->emulating_debug_mmod) {
3765  //&& __xen_vm_in_userspace(target,cpl,ipval)) {
3766  /* This is a shared-page singlestep. */
3767  tmp_ipval = ipval - target->arch->breakpoint_instrs_len;
3768 
3770  "emulating debug memmod at ss for tid %"PRIiTID
3771  " at paddr 0x%"PRIxADDR" (vaddr 0x%"PRIxADDR")\n",
3772  tid,tthread->emulating_debug_mmod->addr,tmp_ipval);
3773 
3774  target_os_emulate_ss_handler(target,tid,tthread->tidctxt,
3775  tthread->emulating_debug_mmod);
3776 
3777  /* Clear the status bits right now. */
3778  /*
3779  xtstate->context.debugreg[6] = 0;
3780  OBJSDIRTY(tthread);
3781 
3782  gtstate->context.debugreg[6] = 0;
3783  OBJSDIRTY(target->global_thread);
3784  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3785  */
3786 
3787  goto out_ss_again;
3788  }
3789  else if (target->sstep_thread
3790  && ((target->sstep_thread->tpc
3792  || (__xen_vm_in_userspace(target,cpl,ipval)
3793  && !target->sstep_thread_overlay))) {
3794  sstep_thread = target->sstep_thread;
3795  }
3796  else if (target->sstep_thread
3797  && target->sstep_thread_overlay) {
3798  if (__xen_vm_in_userspace(target,cpl,ipval)) {
3800  "single step event in overlay tid %"PRIiTID
3801  " (tgid %"PRIiTID"); notifying overlay\n",
3802  tid,target->sstep_thread_overlay->base_tid);
3803 
3804  /* Clear the status bits right now. */
3805  xtstate->context.debugreg[6] = 0;
3806  OBJSDIRTY(tthread);
3807 
3810  tid,ipval,again);
3811  }
3812  else {
3813  /*
3814  * This is a thread that was stepping in userspace,
3815  * and found itself in the kernel. This can happen
3816  * if we have to use HVM global monitor trap flag
3817  * instead of EFLAGS TF. Even if we had setup the
3818  * MTF to single step the guest in userspace, that
3819  * may not be what happens. For instance, suppose
3820  * the instruction causes a page fault, or that a
3821  * clock interrupt happened. We'll find ourselves
3822  * stepping in the kernel, in its handlers, I
3823  * believe.
3824  *
3825  * We assume the thread did *not* do its singlestep
3826  * of the breakpoint's original instruction. If it
3827  * had, the EIP for the MTF event would still be in
3828  * userspace -- because single step debug exceptions
3829  * are traps following an instruction's execution.
3830  * Thus, we need to put the breakpoint back into
3831  * place and remove all state setup to handle it,
3832  * EXCEPT to note down that this thread's overlay SS
3833  * was interrupted at probepoint X, but that the
3834  * prehandler was already run. This way, we won't
3835  * run the prehandler again.
3836  *
3837  * This of course is somewhat bogus, because it
3838  * might affect vCPU state (we hit the BP twice
3839  * instead of just once)... but whatever.
3840  *
3841  * See target_thread::interrupted_ss_probepoint .
3842  */
3844  "single step event in overlay tid %"PRIiTID
3845  " (tgid %"PRIiTID") INTO KERNEL (at 0x%"PRIxADDR")"
3846  " notifying overlay\n",
3847  tid,target->sstep_thread_overlay->base_tid,ipval);
3848 
3849  /* Clear the status bits right now. */
3850  xtstate->context.debugreg[6] = 0;
3851  OBJSDIRTY(tthread);
3852 
3853  /*
3854  * Notify the overlay that a "bogus" singlestep
3855  * happened.
3856  */
3860  tid,ipval,again);
3861  }
3862  }
3863  else
3864  sstep_thread = NULL;
3865 
3866  target->sstep_thread = NULL;
3867 
3868  if (xtstate->context.user_regs.eflags & X86_EF_TF
3869  || (xstate->hvm && xstate->hvm_monitor_trap_flag_set)) {
3870  handle_inferred_sstep:
3871  if (!tthread->tpc) {
3872  if (sstep_thread && __xen_vm_in_userspace(target,cpl,ipval)) {
3873  vwarn("single step event (status reg and eflags) into"
3874  " userspace; trying to handle in sstep thread"
3875  " %"PRIiTID"!\n",sstep_thread->tid);
3876  goto handle_sstep_thread;
3877  }
3878  else {
3879  target->ops->handle_step(target,tthread,NULL);
3880 
3881  /* Clear the status bits right now. */
3882  xtstate->context.debugreg[6] = 0;
3883  OBJSDIRTY(tthread);
3884  /*
3885  * MUST DO THIS. If we are going to modify both the
3886  * current thread's CPU state possibly, and possibly
3887  * operate on the global thread's CPU state, we need
3888  * to clear the global thread's debug reg status
3889  * here; this also has the important side effect of
3890  * forcing a merge of the global thread's debug reg
3891  * state; see flush_global_thread !
3892  */
3893  gtstate->context.debugreg[6] = 0;
3894  OBJSDIRTY(target->global_thread);
3895  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3896 
3897  goto out_ss_again;
3898  /*
3899  verror("single step event (status reg and eflags), but"
3900  " no handling context in thread %"PRIiTID"!"
3901  " letting user handle.\n",tthread->tid);
3902  goto out_paused;
3903  */
3904  }
3905  }
3906 
3907  /* Save the currently hanlding probepoint;
3908  * handle_step may clear tpc.
3909  */
3910  spp = tthread->tpc->probepoint;
3911 
3912  target->ops->handle_step(target,tthread,tthread->tpc->probepoint);
3913 
3914  /* Clear the status bits right now. */
3915  xtstate->context.debugreg[6] = 0;
3916  OBJSDIRTY(tthread);
3917  /*
3918  * MUST DO THIS. If we are going to modify both the
3919  * current thread's CPU state possibly, and possibly
3920  * operate on the global thread's CPU state, we need
3921  * to clear the global thread's debug reg status
3922  * here; this also has the important side effect of
3923  * forcing a merge of the global thread's debug reg
3924  * state; see flush_global_thread !
3925  */
3926  if (spp->style == PROBEPOINT_HW) {
3927  gtstate->context.debugreg[6] = 0;
3928  OBJSDIRTY(target->global_thread);
3929  }
3930  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3931 
3932  goto out_ss_again;
3933  }
3934  else if (sstep_thread) {
3936  "thread %"PRIiTID" single stepped can_context_switch"
3937  " instr; trying to handle exception in old thread!\n",
3938  sstep_thread->tid);
3939 
3940  handle_sstep_thread:
3941  target->ops->handle_step(target,sstep_thread,
3942  sstep_thread->tpc->probepoint);
3943 
3944  /* Clear the status bits right now. */
3945  xtstate->context.debugreg[6] = 0;
3946  OBJSDIRTY(tthread);
3947  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3948 
3949  goto out_ss_again;
3950  }
3951  else if (__xen_vm_in_userspace(target,cpl,ipval)) {
3952  verror("user-mode debug event (single step) at 0x%"PRIxADDR
3953  "; debug status reg 0x%"DRF"; eflags 0x%"RF
3954  "; skipping handling!\n",
3955  ipval,xtstate->context.debugreg[6],
3956  xtstate->context.user_regs.eflags);
3957  goto out_err_again;
3958  }
3959  else {
3960  target->ops->handle_step(target,tthread,NULL);
3961 
3962  /* Clear the status bits right now. */
3963  xtstate->context.debugreg[6] = 0;
3964  OBJSDIRTY(tthread);
3965  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
3966 
3967  goto out_ss_again;
3968  }
3969  }
3970  else {
3971  vdebug(3,LA_TARGET,LF_XV,"new (breakpoint?) debug event\n");
3972  /*
3973  * Some Xen kernels send us a debug event after a successful
3974  * singlestep, but they do not set the right flag to notify
3975  * us. So, if the TF flag is set, and we were expecting a
3976  * singlestep to happen, and there is not a breakpoint
3977  * exception instead -- assume that it is a singlestep
3978  * event.
3979  *
3980  * So, save it off in a special variable and handle below.
3981  */
3982  bogus_sstep_thread = target->sstep_thread;
3983  target->sstep_thread = NULL;
3984 
3985  dreg = -1;
3986 
3987  /* Check the hw debug status reg first */
3988 
3989  /* Only check the 4 low-order bits */
3990  if (xtstate->context.debugreg[6] & 15) {
3991  if (xtstate->context.debugreg[6] & 0x1)
3992  dreg = 0;
3993  else if (xtstate->context.debugreg[6] & 0x2)
3994  dreg = 1;
3995  else if (xtstate->context.debugreg[6] & 0x4)
3996  dreg = 2;
3997  else if (xtstate->context.debugreg[6] & 0x8)
3998  dreg = 3;
3999  }
4000 
4001  /*
4002  * More hypervisor bugs: some Xens don't appropriately
4003  * signal us for hw debug exceptions, and leave a stale
4004  * value in DR6. So, even if the debugreg[6] status
4005  * indicated that an HW debug reg was hit, check (if the HW
4006  * debug reg is for a breakpoint) that EIP is the same as
4007  * that debug reg! If it is not, don't believe DR6, and
4008  * look for soft breakpoints.
4009  */
4010  if (dreg > -1) {
4011  dpp = (struct probepoint *) \
4012  g_hash_table_lookup(tthread->hard_probepoints,
4013  (gpointer)ipval);
4014  if (!dpp) {
4015  dpp = (struct probepoint *) \
4016  g_hash_table_lookup(target->global_thread->hard_probepoints,
4017  (gpointer)ipval);
4018  if (!dpp) {
4019  verror("DR6 said hw dbg reg %d at 0x%"DRF" was hit;"
4020  " but EIP 0x%"PRIxADDR" in tid %"PRIiTID" does not"
4021  " match! ignoring hw dbg status; continuing"
4022  " other checks!\n",
4023  dreg,xtstate->context.debugreg[dreg],ipval,
4024  tthread->tid);
4025  dreg = -1;
4026 
4027  /*
4028  * Clear DR6 in global thread; it is clearly wrong!
4029  *
4030  * MUST DO THIS. If we are going to modify both the
4031  * current thread's CPU state possibly, and possibly
4032  * operate on the global thread's CPU state, we need to
4033  * clear the global thread's debug reg status here; this
4034  * also has the important side effect of forcing a merge
4035  * of the global thread's debug reg state; see
4036  * flush_global_thread !
4037  */
4038  gtstate->context.debugreg[6] = 0;
4039  OBJSDIRTY(target->global_thread);
4040  }
4041  }
4042  }
4043 
4044  if (dreg > -1) {
4045  if (__xen_vm_in_userspace(target,cpl,ipval)) {
4046  vwarn("user-mode debug event (hw dbg reg)"
4047  " at 0x%"PRIxADDR"; debug status reg 0x%"DRF"; eflags"
4048  " 0x%"RF"; trying to handle in global thread!\n",
4049  ipval,xtstate->context.debugreg[6],
4050  xtstate->context.user_regs.eflags);
4051  }
4052 
4053  /* If we are relying on the status reg to tell us,
4054  * then also read the actual hw debug reg to get the
4055  * address we broke on.
4056  */
4057  errno = 0;
4058  ipval = xtstate->context.debugreg[dreg];
4059 
4061  "found hw break (status) in dreg %d on 0x%"PRIxADDR"\n",
4062  dreg,ipval);
4063  }
4064  else if (__xen_vm_in_userspace(target,cpl,ipval)) {
4065  overlay = target_lookup_overlay(target,tid);
4066 
4067  /* If we didn't find one, try to find its leader as an overlay. */
4068  if (!overlay) {
4069  overlay_leader_tid =
4070  target_os_thread_get_leader(target,tthread->tid);
4071  overlay = target_lookup_overlay(target,overlay_leader_tid);
4072  if (overlay) {
4074  "found yet-unknown thread %d with"
4075  " overlay leader %d; will notify!\n",
4076  tthread->tid,overlay_leader_tid);
4077  }
4078  }
4079 
4080  if (overlay) {
4081  /*
4082  * Try to notify the overlay!
4083  */
4085  "user-mode debug event in overlay tid %"PRIiTID
4086  " (tgid %"PRIiTID") (not single step, not hw dbg reg)"
4087  " at 0x%"PRIxADDR"; debug status reg 0x%"DRF"; eflags"
4088  " 0x%"RF"; passing to overlay!\n",
4089  tid,overlay->base_tid,ipval,
4090  xtstate->context.debugreg[6],
4091  xtstate->context.user_regs.eflags);
4092 
4093  /*
4094  * We don't really know what kind of exception it
4095  * is. We can only assume it might be a breakpoint,
4096  * and set additional singlestep indicator flags if
4097  * possible. Some Xens don't get dr6 set
4098  * appropriately, it seems, so we have to catch the
4099  * inferred single step. But just cause we
4100  * commanded a single step doesn't mean it happened
4101  * appropriately, etc... the overlay has to handle
4102  * it.
4103  */
4105  if (xtstate->context.debugreg[6] & 0x4000) {
4106  bp_ef |= EXCEPTION_SINGLESTEP;
4108  "single step debug event in overlay\n");
4109  }
4110  else if (bogus_sstep_thread
4111  && target->sstep_thread_overlay == overlay
4112  && xtstate->context.user_regs.eflags & X86_EF_TF) {
4114  "inferred single step debug event in overlay\n");
4115  bp_ef |= EXCEPTION_SINGLESTEP_CMD;
4116  }
4117 
4118  /* Clear the status bits right now. */
4119  xtstate->context.debugreg[6] = 0;
4120  OBJSDIRTY(tthread);
4121 
4122  return target_notify_overlay(overlay,bp_ef,
4123  tid,ipval,again);
4124  }
4125  else {
4126  /*
4127  * Try to lookup paddr for ipval; if it matches and
4128  * hits as a memmod... then emulate a breakpoint.
4129  *
4130  * To do this, we must mark this kthread as
4131  * emulating a breakpoint at a memmod; flip the
4132  * memmod; then catch its singlestep above; and
4133  * flip the memmod back.
4134  */
4135 
4136  /* XXX: this is bad. We use the base target's
4137  * breakpoint_instr_len to try to detect an overlay!
4138  * It's ok for Xen and the Xen-process overlay, but
4139  * it's a definite abstraction breakdown.
4140  */
4141  tmp_ipval = ipval - target->arch->breakpoint_instrs_len;
4142  rc = xen_vm_addr_v2p(target,TID_GLOBAL,tmp_ipval,&paddr);
4143  if (!rc)
4144  pmmod = target_memmod_lookup(target,TID_GLOBAL,paddr,1);
4145  if (!rc && pmmod) {
4146  /*
4147  * Emulate it!
4148  */
4150  "emulating debug memmod at bp for tid %"PRIiTID
4151  " at paddr 0x%"PRIxADDR" (vaddr 0x%"PRIxADDR")\n",
4152  tid,pmmod->addr,tmp_ipval);
4153 
4154  if (target_os_emulate_bp_handler(target,tid,
4155  tthread->tidctxt,
4156  pmmod)) {
4157  verror("could not emulate debug memmod for"
4158  " tid %"PRIiTID" at paddr 0x%"PRIxADDR"\n",
4159  tid,pmmod->addr);
4160  goto out_err_again;
4161  }
4162  else {
4163  /* Clear the status bits right now. */
4164  xtstate->context.debugreg[6] = 0;
4165  OBJSDIRTY(tthread);
4166 
4167  gtstate->context.debugreg[6] = 0;
4168  OBJSDIRTY(target->global_thread);
4170  "cleared status debug reg 6\n");
4171 
4172  goto out_bp_again;
4173  }
4174  }
4175  else {
4176  verror("user-mode debug event (not single step, not"
4177  " hw dbg reg) at 0x%"PRIxADDR"; debug status reg"
4178  " 0x%"DRF"; eflags 0x%"RF"; skipping handling!\n",
4179  tmp_ipval,xtstate->context.debugreg[6],
4180  xtstate->context.user_regs.eflags);
4181  goto out_err_again;
4182  }
4183  }
4184  }
4185  else {
4187  "dreg status was 0x%"PRIxREGVAL"; trying eip method\n",
4188  (ADDR)xtstate->context.debugreg[6]);
4189 
4190  if (xtstate->dr[0] == ipval)
4191  dreg = 0;
4192  else if (xtstate->dr[1] == ipval)
4193  dreg = 1;
4194  else if (xtstate->dr[2] == ipval)
4195  dreg = 2;
4196  else if (xtstate->dr[3] == ipval)
4197  dreg = 3;
4198 
4199  if (dreg > -1)
4201  "found hw break (eip) in dreg %d on 0x%"PRIxADDR"\n",
4202  dreg,ipval);
4203  else {
4204  if (xtstate != gtstate) {
4205  /*
4206  * Check the global thread too; might be a
4207  * global breakpoint/watchpoint.
4208  */
4209  if (gtstate->dr[0] == ipval)
4210  dreg = 0;
4211  else if (gtstate->dr[1] == ipval)
4212  dreg = 1;
4213  else if (gtstate->dr[2] == ipval)
4214  dreg = 2;
4215  else if (gtstate->dr[3] == ipval)
4216  dreg = 3;
4217 
4218  if (dreg > -1)
4220  "found hw break (eip) in GLOBAL dreg %d on 0x%"PRIxADDR"\n",
4221  dreg,ipval);
4222  else
4224  "did NOT find hw break (eip) on 0x%"PRIxADDR
4225  " (neither global nor per-thread!)\n",
4226  ipval);
4227  }
4228  else {
4230  "did NOT find hw break (eip) on 0x%"PRIxADDR"\n",
4231  ipval);
4232  }
4233  }
4234  }
4235 
4236  if (dreg > -1) {
4237  /* Found HW breakpoint! */
4238  dpp = (struct probepoint *) \
4239  g_hash_table_lookup(tthread->hard_probepoints,
4240  (gpointer)ipval);
4241 
4242  if (dpp) {
4244  "found hw break in thread %"PRIiTID"\n",
4245  tthread->tid);
4246  }
4247  else {
4248  /* Check the global thread if not already checking it! */
4249  dpp = (struct probepoint *) \
4250  g_hash_table_lookup(target->global_thread->hard_probepoints,
4251  (gpointer)ipval);
4252  if (!dpp) {
4253  verror("could not find probepoint for hw dbg reg %d"
4254  " in current or global threads!\n",dreg);
4255  goto out_err;
4256  }
4257  else {
4259  "found hw break in global thread!\n");
4260 
4261  /*
4262  * MUST DO THIS. If we are going to modify both
4263  * the current thread's CPU state possibly, and
4264  * possibly operate on the global thread's CPU
4265  * state, we need to clear the global thread's
4266  * debug reg status here; this also has the
4267  * important side effect of forcing a merge of
4268  * the global thread's debug reg state; see
4269  * flush_global_thread !
4270  */
4271  gtstate->context.debugreg[6] = 0;
4272  OBJSDIRTY(target->global_thread);
4273  }
4274  }
4275 
4276  /* BEFORE we run the bp handler:
4277  *
4278  * If the domain happens to be in singlestep mode, and
4279  * we are hitting a breakpoint anyway... we have to
4280  * handle the breakpoint, singlestep ourselves, AND
4281  * THEN leave the processor in single step mode.
4282  */
4283  if (0 && xtstate->context.user_regs.eflags & X86_EF_TF) {
4284  //target->sstep_leave_enabled = 1;
4285  }
4286 
4287  /* Run the breakpoint handler. */
4288  target->ops->handle_break(target,tthread,dpp,
4289  xtstate->context.debugreg[6] & 0x4000);
4290 
4291  /* Clear the status bits right now. */
4292  xtstate->context.debugreg[6] = 0;
4293  OBJSDIRTY(tthread);
4294  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
4295 
4296  goto out_bp_again;
4297  }
4298  else if ((dpp = (struct probepoint *) \
4299  g_hash_table_lookup(target->soft_probepoints,
4300  (gpointer)(ipval - target->arch->breakpoint_instrs_len)))) {
4301  /* Run the breakpoint handler. */
4302  target->ops->handle_break(target,tthread,dpp,
4303  xtstate->context.debugreg[6] & 0x4000);
4304 
4305  /* Clear the status bits right now. */
4306  xtstate->context.debugreg[6] = 0;
4307  OBJSDIRTY(tthread);
4308  vdebug(5,LA_TARGET,LF_XV,"cleared status debug reg 6\n");
4309 
4310  goto out_bp_again;
4311  }
4312  else if (xtstate->context.user_regs.eflags & X86_EF_TF
4313  && tthread
4314  && tthread->tpc
4315  && tthread->tpc->probepoint) {
4317  "thread-inferred single step for dom %d (TF set, but not"
4318  " dreg status!) at 0x%"PRIxADDR" (stepped %lu bytes"
4319  " from probepoint)!\n",
4320  xstate->id,ipval,ipval - tthread->tpc->probepoint->addr);
4321  sstep_thread = tthread;
4322  goto handle_inferred_sstep;
4323  }
4324  else if (xtstate->context.user_regs.eflags & X86_EF_TF
4325  && bogus_sstep_thread
4326  && bogus_sstep_thread->tpc
4327  && bogus_sstep_thread->tpc->probepoint) {
4328  bogus_sstep_probepoint_addr =
4329  bogus_sstep_thread->tpc->probepoint->addr;
4330 
4331  /*
4332  * We have to assume it's valid. We can't do expensive
4333  * stuff and see if it could have gotten here validly;
4334  * we could have stepped a RET, IRET, anything.
4335  */
4337  "inferred single step for dom %d (TF set, but not"
4338  " dreg status!) at 0x%"PRIxADDR" (stepped %d bytes"
4339  " from probepoint)!\n",
4340  xstate->id,ipval,ipval - bogus_sstep_probepoint_addr);
4341  sstep_thread = bogus_sstep_thread;
4342  goto handle_inferred_sstep;
4343  }
4344  else if (xtstate->context.user_regs.eflags & X86_EF_TF) {
4345  //phantom:
4346  vwarn("phantom single step for dom %d (no breakpoint"
4347  " set either!); letting user handle fault at"
4348  " 0x%"PRIxADDR"!\n",xstate->id,ipval);
4349  goto out_paused;
4350  }
4351  else {
4352  vwarn("could not find hardware bp and not sstep'ing;"
4353  " letting user handle fault at 0x%"PRIxADDR"!\n",
4354  ipval);
4355  goto out_paused;
4356  }
4357  }
4358  }
4359 
4360  out_err:
4361  target->monitorhandling = 0;
4362  if (again)
4363  *again = 0;
4364  return TSTATUS_ERROR;
4365 
4366  out_err_again:
4367  target->monitorhandling = 0;
4368  if (again)
4369  *again = -1;
4370  return TSTATUS_ERROR;
4371 
4372  out_paused:
4373  target->monitorhandling = 0;
4374  if (again)
4375  *again = 0;
4376  return TSTATUS_PAUSED;
4377 
4378  out_bp_again:
4379  target->monitorhandling = 0;
4380  if (again)
4381  *again = 1;
4382  return TSTATUS_PAUSED;
4383 
4384  out_ss_again:
4385  target->monitorhandling = 0;
4386  if (again)
4387  *again = 2;
4388  return TSTATUS_PAUSED;
4389 }
4390 
4391 int xen_vm_evloop_handler(int readfd,int fdtype,void *state) {
4392  struct target *target = (struct target *)state;
4393  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4394  int again;
4395  target_status_t retval;
4396  int vmid = -1;
4397 
4398  if (xen_vm_virq_or_vmp_read(target,&vmid)) {
4399  return EVLOOP_HRET_BADERROR;
4400  }
4401 
4402  if (vmid == -1)
4403  return EVLOOP_HRET_SUCCESS;
4404 
4405  if (vmid != 0 && vmid != xstate->id)
4406  return EVLOOP_HRET_SUCCESS;
4407 
4408  again = 0;
4409  retval = xen_vm_handle_exception(target,0,&again,NULL);
4410  if (retval == TSTATUS_ERROR && again == 0)
4411  return EVLOOP_HRET_ERROR;
4412  /*
4413  * XXX: this is the "abort to user handler" case -- but in this
4414  * case, we have no user, basically. Fix this.
4415  */
4416  //else if (retval == TSTATUS_PAUSED && again == 0)
4417  // return EVLOOP_HRET_SUCCESS;
4418 
4419  if (retval != TSTATUS_RUNNING)
4420  __xen_vm_resume(target,0);
4421 
4422  return EVLOOP_HRET_SUCCESS;
4423 }
4424 
4425 int xen_vm_attach_evloop(struct target *target,struct evloop *evloop) {
4426  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4427 
4428  if (!target->evloop) {
4429  verror("no evloop attached!\n");
4430  return -1;
4431  }
4432 
4433  /* get a select()able file descriptor of the event channel */
4434  xstate->evloop_fd = xen_vm_virq_or_vmp_get_fd(target);
4435  if (xstate->evloop_fd == -1) {
4436  verror("event channel not initialized\n");
4437  return -1;
4438  }
4439 
4441  xen_vm_evloop_handler,target);
4442 
4444  "added evloop readfd %d event channel\n",xstate->evloop_fd);
4445 
4446  return 0;
4447 }
4448 
4449 int xen_vm_detach_evloop(struct target *target) {
4450  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4451 
4452  if (xstate->evloop_fd < 0)
4453  return 0;
4454 
4456 
4457  xstate->evloop_fd = -1;
4458 
4459  return 0;
4460 }
4461 
4462 static target_status_t xen_vm_monitor(struct target *target) {
4463  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4464  int ret, fd;
4465  struct timeval tv;
4466  fd_set inset;
4467  int again;
4468  target_status_t retval;
4469  int vmid = -1;
4470 
4471  /* get a select()able file descriptor of the event channel */
4472  fd = xen_vm_virq_or_vmp_get_fd(target);
4473  if (fd == -1) {
4474  verror("event channel not initialized\n");
4475  return TSTATUS_ERROR;
4476  }
4477 
4478  while (1) {
4479  tv.tv_sec = 0;
4480  tv.tv_usec = 50;
4481  FD_ZERO(&inset);
4482  FD_SET(fd,&inset);
4483 
4484  /* wait for a domain to trigger the VIRQ */
4485  ret = select(fd+1,&inset,NULL,NULL,&tv);
4486  if (ret == -1) // timeout
4487  continue;
4488 
4489  if (!FD_ISSET(fd, &inset))
4490  continue; // nothing in eventchn
4491 
4492  if (xen_vm_virq_or_vmp_read(target,&vmid)) {
4493  verror("failed to unmask event channel\n");
4494  break;
4495  }
4496 
4497  /* we've got something from eventchn. let's see what it is! */
4498  if (vmid != 0 && vmid != xstate->id)
4499  continue; // not the event that we are looking for
4500 
4501  again = 0;
4502  retval = xen_vm_handle_exception(target,0,&again,NULL);
4503  if (retval == TSTATUS_ERROR && again == 0) {
4504  target->needmonitorinterrupt = 0;
4505  return retval;
4506  }
4507  else if (target->needmonitorinterrupt) {
4508  target->needmonitorinterrupt = 0;
4509  return TSTATUS_INTERRUPTED;
4510  }
4511 
4512  //else if (retval == TSTATUS_PAUSED && again == 0)
4513  // return retval;
4514 
4515  if (xen_vm_load_dominfo(target)) {
4516  vwarn("could not load dominfo for dom %d, trying to unpause anyway!\n",
4517  xstate->id);
4518  __xen_vm_resume(target,0);
4519  }
4520  else if (xstate->dominfo.paused) {
4521  __xen_vm_resume(target,0);
4522  }
4523  }
4524 
4525  return TSTATUS_ERROR; /* Never hit, just compiler foo */
4526 }
4527 
4528 static target_status_t xen_vm_poll(struct target *target,struct timeval *tv,
4529  target_poll_outcome_t *outcome,int *pstatus) {
4530  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
4531  int ret, fd;
4532  struct timeval itv;
4533  fd_set inset;
4534  int again;
4535  target_status_t retval;
4536  int vmid = -1;
4537 
4538  /* get a select()able file descriptor of the event channel */
4539  fd = xen_vm_virq_or_vmp_get_fd(target);
4540  if (fd == -1) {
4541  verror("event channel not initialized\n");
4542  return TSTATUS_ERROR;
4543  }
4544 
4545  if (!tv) {
4546  itv.tv_sec = 0;
4547  itv.tv_usec = 0;
4548  tv = &itv;
4549  }
4550  FD_ZERO(&inset);
4551  FD_SET(fd,&inset);
4552 
4553  /* see if the VIRQ is lit for this domain */
4554  ret = select(fd+1,&inset,NULL,NULL,tv);
4555  if (ret == 0) {
4556  if (outcome)
4557  *outcome = POLL_NOTHING;
4558  return TSTATUS_RUNNING;
4559  }
4560 
4561  if (!FD_ISSET(fd, &inset)) {
4562  if (outcome)
4563  *outcome = POLL_NOTHING;
4564  return TSTATUS_RUNNING;
4565  }
4566 
4567  if (xen_vm_virq_or_vmp_read(target,&vmid)) {
4568  verror("failed to unmask event channel\n");
4569  if (outcome)
4570  *outcome = POLL_ERROR;
4571  return TSTATUS_ERROR;
4572  }
4573 
4574  /* we've got something from eventchn. let's see what it is! */
4575  if (vmid != 0 && vmid != xstate->id) {
4576  if (outcome)
4577  *outcome = POLL_NOTHING;
4578  return TSTATUS_RUNNING; // not the event that we are looking for
4579  }
4580 
4581  again = 0;
4582  retval = xen_vm_handle_exception(target,0,&again,NULL);
4583  if (pstatus)
4584  *pstatus = again;
4585 
4586  return retval;
4587 }
4588 
4589 static unsigned char *xen_vm_read(struct target *target,ADDR addr,
4590  unsigned long target_length,
4591  unsigned char *buf) {
4592  return xen_vm_read_pid(target,TID_GLOBAL,addr,target_length,buf);
4593 }
4594 
4595 static unsigned long xen_vm_write(struct target *target,ADDR addr,
4596  unsigned long length,unsigned char *buf) {
4597  return xen_vm_write_pid(target,TID_GLOBAL,addr,length,buf);
4598 }
4599 
4600 /*
4601  * We have to either load pgd from vcpu context (for a running task), or
4602  * from the task struct (for a swapped out task).
4603  *
4604  * NB: @cr3 will be a physical address, not a kernel virtual address.
4605  * The mm_struct contains a virtual address; but the CR3 register of
4606  * course contains a physical one. And the CR3 content is not quite a
4607  * physical address, sometimes, it seems.
4608  */
4609 static int __xen_vm_pgd(struct target *target,tid_t tid,uint64_t *pgd) {
4610  struct xen_vm_state *xstate;
4611  struct target_thread *tthread;
4612  struct xen_vm_thread_state *xtstate;
4613  REGVAL cr0 = 0,cr4 = 0,msr_efer = 0,cpuid_edx = 0;
4614 
4615  xstate = (struct xen_vm_state *)target->state;
4616 
4617  if (tid == TID_GLOBAL) {
4618  tthread = __xen_vm_load_current_thread(target,0,1);
4619  if (!tthread) {
4620  verror("could not load global thread!\n");
4621  return -1;
4622  }
4623  xtstate = (struct xen_vm_thread_state *)tthread->state;
4624 
4625  /*
4626  * Use cached pgd if possible.
4627  */
4628  if (OBJVALID(tthread) && xtstate->pgd_phys > 0) {
4629  *pgd = xtstate->pgd_phys;
4630 
4631  vdebug(12,LA_TARGET,LF_XV,
4632  "tid %"PRIiTID" pgd (phys) = 0x%"PRIx64" (cached)\n",tid,*pgd);
4633 
4634  return 0;
4635  }
4636 
4637  if (xtstate->context.vm_assist & (1 << VMASST_TYPE_pae_extended_cr3)) {
4638  *pgd = ((uint64_t)xen_cr3_to_pfn(xtstate->context.ctrlreg[3])) \
4639  << XC_PAGE_SHIFT;
4640  }
4641  else {
4642  *pgd = xtstate->context.ctrlreg[3] & ~(__PAGE_SIZE - 1);
4643  }
4644 
4645  /*
4646  * XXX NB: Also load the current paging flags! This seems to be
4647  * the right place to do it... realistically, the flags are not
4648  * going to change much except during boot... or in the future
4649  * where there are nested HVMs! I suppose, in the future, we'll
4650  * have to have these set on a per-thread basis...
4651  *
4652  * (Pass cpuid_edx=REGVALMAX for now to make sure the NOPSE*
4653  * bits don't get set -- until we actually bother to find the
4654  * cpuid info.)
4655  */
4656  cr0 = xtstate->context.ctrlreg[0];
4657  cr4 = xtstate->context.ctrlreg[4];
4658  if (xstate->hvm && xstate->hvm_cpu)
4659  msr_efer = xstate->hvm_cpu->msr_efer;
4660  cpuid_edx = ADDRMAX;
4661 
4662  if (target_arch_x86_v2p_get_flags(target,cr0,cr4,msr_efer,
4663  cpuid_edx,&xstate->v2p_flags)) {
4664  if (target->arch->type == ARCH_X86_64) {
4665  verror("could not determine v2p_flags! pgd walks might fail;"
4666  " assuming 64-bit long mode and paging!\n");
4667  xstate->v2p_flags = ARCH_X86_V2P_LMA;
4668  }
4669  else {
4670  verror("could not determine v2p_flags! pgd walks might fail;"
4671  " assuming 32-bit mode and PAE (and auto-PSE)!\n");
4672  xstate->v2p_flags = ARCH_X86_V2P_PAE;
4673  }
4674  }
4675 
4676  /* Also quickly set the V2P_PV flag if this domain is paravirt. */
4677  if (!xstate->hvm)
4678  xstate->v2p_flags |= ARCH_X86_V2P_PV;
4679 
4680  if (vdebug_is_on(8,LA_TARGET,LF_XV)) {
4681  char buf[256];
4682  buf[0] = '\0';
4684  buf,sizeof(buf));
4685  vdebug(8,LA_TARGET,LF_TARGET,"v2p_flags = %s\n",buf);
4686  }
4687 
4688  xtstate->pgd_phys = *pgd;
4689  }
4690  else {
4691  tthread = xen_vm_load_thread(target,tid,0);
4692  if (!tthread) {
4693  verror("could not load tid %"PRIiTID"!\n",tid);
4694  return -1;
4695  }
4696  xtstate = (struct xen_vm_thread_state *)tthread->state;
4697  if (!xtstate) {
4698  xtstate = (struct xen_vm_thread_state *)calloc(1,sizeof(*xtstate));
4699  tthread->state = xtstate;
4700  }
4701 
4702  /*
4703  * Use cached pgd if possible.
4704  */
4705  if (OBJVALID(tthread) && xtstate->pgd_phys > 0) {
4706  *pgd = xtstate->pgd_phys;
4707 
4708  vdebug(12,LA_TARGET,LF_XV,
4709  "tid %"PRIiTID" pgd (phys) = 0x%"PRIx64" (cached)\n",tid,*pgd);
4710 
4711  return 0;
4712  }
4713 
4714  /*
4715  if (target->wordsize == 8) {
4716  if (xtstate->pgd >= xstate->kernel_start_addr)
4717  *pgd = xtstate->pgd - xstate->kernel_start_addr;
4718  else
4719 #if __WORDSIZE == 64
4720  *pgd = xtstate->pgd - 0xffff810000000000UL;
4721 #else
4722  *pgd = xtstate->pgd - 0xffff810000000000ULL;
4723 #endif
4724  }
4725  else {
4726  *pgd = xtstate->pgd - xstate->kernel_start_addr;
4727  }
4728  */
4729 
4730  if (target_os_thread_get_pgd_phys(target,tid,pgd)) {
4731  verror("could not get phys pgd for tid %"PRIiTID": %s!\n",
4732  tid,strerror(errno));
4733  return -1;
4734  }
4735 
4736  xtstate->pgd_phys = *pgd;
4737  }
4738 
4739  vdebug(12,LA_TARGET,LF_XV,
4740  "tid %"PRIiTID" pgd (phys) = 0x%"PRIx64"\n",tid,*pgd);
4741 
4742  return 0;
4743 }
4744 
4745 static int xen_vm_addr_v2p(struct target *target,tid_t tid,
4746  ADDR vaddr,ADDR *paddr) {
4747  struct xen_vm_state *xstate;
4748  uint64_t pgd = 0;
4749 
4750  xstate = (struct xen_vm_state *)target->state;
4751 
4752  if (__xen_vm_pgd(target,tid,&pgd)) {
4753  verror("could not read pgd for tid %"PRIiTID"!\n",tid);
4754  return -1;
4755  }
4756 
4757  if (!xstate->memops || !xstate->memops->addr_v2p) {
4758  errno = EINVAL;
4759  return -1;
4760  }
4761 
4762  return xstate->memops->addr_v2p(target,tid,pgd,vaddr,paddr);
4763 }
4764 
4765 static unsigned char *xen_vm_read_phys(struct target *target,ADDR paddr,
4766  unsigned long length,unsigned char *buf) {
4767  struct xen_vm_state *xstate;
4768 
4769  xstate = (struct xen_vm_state *)target->state;
4770 
4771  if (!xstate->memops || !xstate->memops->read_phys) {
4772  errno = EINVAL;
4773  return NULL;
4774  }
4775 
4776  return xstate->memops->read_phys(target,paddr,length,buf);
4777 }
4778 
4779 static unsigned long xen_vm_write_phys(struct target *target,ADDR paddr,
4780  unsigned long length,unsigned char *buf) {
4781  struct xen_vm_state *xstate;
4782 
4783  xstate = (struct xen_vm_state *)target->state;
4784 
4785  if (!xstate->memops || !xstate->memops->read_phys) {
4786  errno = EINVAL;
4787  return 0;
4788  }
4789 
4790  return xstate->memops->write_phys(target,paddr,length,buf);
4791 }
4792 
4793 unsigned char *xen_vm_read_pid(struct target *target,tid_t tid,ADDR vaddr,
4794  unsigned long length,unsigned char *buf) {
4795  struct xen_vm_state *xstate;
4796  uint64_t pgd = 0;
4797 
4798  xstate = (struct xen_vm_state *)target->state;
4799 
4800  if (!xstate->memops || !xstate->memops->read_tid) {
4801  errno = EINVAL;
4802  return 0;
4803  }
4804 
4805  if (__xen_vm_pgd(target,tid,&pgd)) {
4806  verror("could not read pgd for tid %"PRIiTID"!\n",tid);
4807  return NULL;
4808  }
4809 
4810  return xstate->memops->read_tid(target,tid,pgd,vaddr,length,buf);
4811 }
4812 
4813 unsigned long xen_vm_write_pid(struct target *target,tid_t tid,ADDR vaddr,
4814  unsigned long length,unsigned char *buf) {
4815  struct xen_vm_state *xstate;
4816  uint64_t pgd = 0;
4817 
4818  xstate = (struct xen_vm_state *)target->state;
4819 
4820  if (!xstate->memops || !xstate->memops->write_tid) {
4821  errno = EINVAL;
4822  return 0;
4823  }
4824 
4825  if (__xen_vm_pgd(target,tid,&pgd)) {
4826  verror("could not read pgd for tid %"PRIiTID"!\n",tid);
4827  return 0;
4828  }
4829 
4830  return xstate->memops->write_tid(target,tid,pgd,vaddr,length,buf);
4831 }
4832 
4833 /* Register mapping.
4834  *
4835  * First, be aware that our host bit size (64/32) *does* influence which
4836  * registers we can access -- i.e., 64-bit host tracing a
4837  * 32-bit process still gets the 64-bit registers -- but even then, we
4838  * want the 32-bit mapping for DWARF reg num to i386 reg.
4839  *
4840  * XXX XXX XXX
4841  * If structs in xen/xen.h (and arch-specific includes containing
4842  * cpu_user_regs) change, ever, these mappings will be wrong.
4843  */
4844 #ifdef __x86_64__
4845 static int dreg_to_offset64[ARCH_X86_64_REG_COUNT] = {
4846  offsetof(struct vcpu_guest_context,user_regs.rax),
4847  offsetof(struct vcpu_guest_context,user_regs.rdx),
4848  offsetof(struct vcpu_guest_context,user_regs.rcx),
4849  offsetof(struct vcpu_guest_context,user_regs.rbx),
4850  offsetof(struct vcpu_guest_context,user_regs.rsi),
4851  offsetof(struct vcpu_guest_context,user_regs.rdi),
4852  offsetof(struct vcpu_guest_context,user_regs.rbp),
4853  offsetof(struct vcpu_guest_context,user_regs.rsp),
4854  offsetof(struct vcpu_guest_context,user_regs.r8),
4855  offsetof(struct vcpu_guest_context,user_regs.r9),
4856  offsetof(struct vcpu_guest_context,user_regs.r10),
4857  offsetof(struct vcpu_guest_context,user_regs.r11),
4858  offsetof(struct vcpu_guest_context,user_regs.r12),
4859  offsetof(struct vcpu_guest_context,user_regs.r13),
4860  offsetof(struct vcpu_guest_context,user_regs.r14),
4861  offsetof(struct vcpu_guest_context,user_regs.r15),
4862  offsetof(struct vcpu_guest_context,user_regs.rip),
4863  -1, -1, -1, -1, -1, -1, -1, -1,
4864  -1, -1, -1, -1, -1, -1, -1, -1,
4865  -1, -1, -1, -1, -1, -1, -1, -1,
4866  -1, -1, -1, -1, -1, -1, -1, -1,
4867  offsetof(struct vcpu_guest_context,user_regs.rflags),
4868  offsetof(struct vcpu_guest_context,user_regs.es),
4869  offsetof(struct vcpu_guest_context,user_regs.cs),
4870  offsetof(struct vcpu_guest_context,user_regs.ss),
4871  offsetof(struct vcpu_guest_context,user_regs.ds),
4872  offsetof(struct vcpu_guest_context,user_regs.fs),
4873  offsetof(struct vcpu_guest_context,user_regs.gs),
4874  -1, -1,
4875  /* What about fs_base, gs_base, gs_base_kernel; that's what these are. */
4876  offsetof(struct vcpu_guest_context,fs_base),
4877  offsetof(struct vcpu_guest_context,gs_base_kernel), /* XXX: reuse kernel */
4878  offsetof(struct vcpu_guest_context,gs_base_kernel),
4879  offsetof(struct vcpu_guest_context,gs_base_user),
4880  -1, -1, -1, -1, -1, -1,
4881  -1, -1,
4882  offsetof(struct vcpu_guest_context,ctrlreg[0]),
4883  offsetof(struct vcpu_guest_context,ctrlreg[1]),
4884  offsetof(struct vcpu_guest_context,ctrlreg[2]),
4885  offsetof(struct vcpu_guest_context,ctrlreg[3]),
4886  offsetof(struct vcpu_guest_context,ctrlreg[4]),
4887  -1, -1, -1, -1, -1,
4888  offsetof(struct vcpu_guest_context,debugreg[0]),
4889  offsetof(struct vcpu_guest_context,debugreg[1]),
4890  offsetof(struct vcpu_guest_context,debugreg[2]),
4891  offsetof(struct vcpu_guest_context,debugreg[3]),
4892  -1,-1,
4893  offsetof(struct vcpu_guest_context,debugreg[6]),
4894  offsetof(struct vcpu_guest_context,debugreg[7]),
4895  -1,
4896 };
4897 #endif
4898 static int dreg_to_offset32[ARCH_X86_REG_COUNT] = {
4899  offsetof(struct vcpu_guest_context,user_regs.eax),
4900  offsetof(struct vcpu_guest_context,user_regs.ecx),
4901  offsetof(struct vcpu_guest_context,user_regs.edx),
4902  offsetof(struct vcpu_guest_context,user_regs.ebx),
4903  offsetof(struct vcpu_guest_context,user_regs.esp),
4904  offsetof(struct vcpu_guest_context,user_regs.ebp),
4905  offsetof(struct vcpu_guest_context,user_regs.esi),
4906  offsetof(struct vcpu_guest_context,user_regs.edi),
4907  offsetof(struct vcpu_guest_context,user_regs.eip),
4908  offsetof(struct vcpu_guest_context,user_regs.eflags),
4909  -1, -1, -1, -1, -1, -1, -1, -1,
4910  -1, -1,
4911  -1, -1, -1, -1, -1, -1, -1, -1,
4912  -1, -1, -1, -1, -1, -1, -1, -1,
4913  -1, -1, -1,
4914  /* These are "fake" DWARF regs. */
4915  offsetof(struct vcpu_guest_context,user_regs.es),
4916  offsetof(struct vcpu_guest_context,user_regs.cs),
4917  offsetof(struct vcpu_guest_context,user_regs.ss),
4918  offsetof(struct vcpu_guest_context,user_regs.ds),
4919  offsetof(struct vcpu_guest_context,user_regs.fs),
4920  offsetof(struct vcpu_guest_context,user_regs.gs),
4921  offsetof(struct vcpu_guest_context,ctrlreg[0]),
4922  offsetof(struct vcpu_guest_context,ctrlreg[1]),
4923  offsetof(struct vcpu_guest_context,ctrlreg[2]),
4924  offsetof(struct vcpu_guest_context,ctrlreg[3]),
4925  offsetof(struct vcpu_guest_context,ctrlreg[4]),
4926  offsetof(struct vcpu_guest_context,debugreg[0]),
4927  offsetof(struct vcpu_guest_context,debugreg[1]),
4928  offsetof(struct vcpu_guest_context,debugreg[2]),
4929  offsetof(struct vcpu_guest_context,debugreg[3]),
4930  -1,-1,
4931  offsetof(struct vcpu_guest_context,debugreg[6]),
4932  offsetof(struct vcpu_guest_context,debugreg[7]),
4933  -1,
4934 };
4935 
4936 /*
4937  * Register functions.
4938  */
4939 int __xen_vm_vcpu_to_thread_regcache(struct target *target,
4940  struct vcpu_guest_context *context,
4941  struct target_thread *tthread,
4942  thread_ctxt_t tctxt) {
4943  int offset;
4944  int i;
4945  int count = 0;
4946  REGVAL regval;
4947 
4948  vdebug(9,LA_TARGET,LF_XV,"translating vcpu to thid %d tctxt %d\n",
4949  tthread->tid,tctxt);
4950 
4951  /*
4952  * NB: we need to read 64-bit numbers from the vcpu structs if the
4953  * host is 64-bit, even if the target is 32-bit, I think...
4954  */
4955  if (arch_wordsize(target->arch) == 8 || __WORDSIZE == 64) {
4956  for (i = 0; i < ARCH_X86_64_REG_COUNT; ++i) {
4957  offset = dreg_to_offset64[i];
4958  if (offset < 0)
4959  continue;
4960 
4961  if (likely(i < REG_X86_64_ES) || likely(i > REG_X86_64_GS))
4962  regval = (REGVAL)*(uint64_t *)(((char *)context) + offset);
4963  else
4964  regval = (REGVAL)*(uint16_t *)(((char *)context) + offset);
4965 
4966  if (target_regcache_init_reg_tidctxt(target,tthread,tctxt,
4967  i,regval)) {
4968  vwarn("could not set reg %d thid %d tctxt %d\n",
4969  i,tthread->tid,tctxt);
4970  }
4971  else
4972  ++count;
4973  }
4974  }
4975  else if (arch_wordsize(target->arch) == 4) {
4976  for (i = 0; i < ARCH_X86_REG_COUNT; ++i) {
4977  offset = dreg_to_offset32[i];
4978  if (offset < 0)
4979  continue;
4980 
4981  regval = (REGVAL)*(uint32_t *)(((char *)context) + offset);
4982 
4983  if (target_regcache_init_reg_tidctxt(target,tthread,tctxt,
4984  i,regval)) {
4985  vwarn("could not set reg %d thid %d tctxt %d\n",
4986  i,tthread->tid,tctxt);
4987  }
4988  else
4989  ++count;
4990  }
4991  }
4992 
4994  "translated %d vcpu regs to thid %d tctxt %d regcache\n",
4995  count,tthread->tid,tctxt);
4996 
4997  return 0;
4998 }
4999 
5001  struct target_thread *tthread,
5002  thread_ctxt_t tctxt,
5003  REG reg,REGVAL regval,void *priv) {
5004  struct vcpu_guest_context *context;
5005  int offset;
5006 
5007  if (reg > ARCH_X86_64_REG_COUNT) {
5008  vwarn("unsupported reg %d!\n",reg);
5009  errno = EINVAL;
5010  return -1;
5011  }
5012 
5013  context = (struct vcpu_guest_context *)priv;
5014  offset = dreg_to_offset64[reg];
5015 
5016  if (offset < 0) {
5017  vwarn("unsupported reg %d!\n",reg);
5018  errno = EINVAL;
5019  return -1;
5020  }
5021 
5022  vdebug(16,LA_TARGET,LF_XV,
5023  "tid %d thid %d tctxt %d regcache->vcpu %d 0x%"PRIxREGVAL"\n",
5024  target->id,tthread->tid,tctxt,reg,regval);
5025 
5026  if (likely(reg < REG_X86_64_ES) || likely(reg > REG_X86_64_GS))
5027  *(uint64_t *)(((char *)context) + offset) =
5028  (uint64_t)regval;
5029  else
5030  *(uint16_t *)(((char *)context) + offset) =
5031  (uint16_t)regval;
5032 
5033  return 0;
5034 }
5035 
5037  struct target_thread *tthread,
5038  thread_ctxt_t tctxt,
5039  REG reg,void *rawval,int rawlen,
5040  void *priv) {
5041  //struct vcpu_guest_context *context;
5042  int offset;
5043 
5044  if (reg > ARCH_X86_64_REG_COUNT) {
5045  vwarn("unsupported reg %d!\n",reg);
5046  errno = EINVAL;
5047  return -1;
5048  }
5049 
5050  //context = (struct vcpu_guest_context *)priv;
5051  offset = dreg_to_offset64[reg];
5052 
5053  if (offset < 0) {
5054  vwarn("unsupported reg %d!\n",reg);
5055  errno = EINVAL;
5056  return -1;
5057  }
5058 
5059  vwarn("tid %d thid %d tctxt %d regcache->vcpu %d"
5060  " -- unsupported rawval len %d\n",
5061  target->id,tthread->tid,tctxt,reg,rawlen);
5062 
5063  return -1;
5064 }
5065 
5067  struct target_thread *tthread,
5068  thread_ctxt_t tctxt,
5069  REG reg,REGVAL regval,void *priv) {
5070  struct vcpu_guest_context *context;
5071  int offset;
5072 
5073  if (reg > ARCH_X86_REG_COUNT) {
5074  vwarn("unsupported reg %d!\n",reg);
5075  errno = EINVAL;
5076  return -1;
5077  }
5078 
5079  context = (struct vcpu_guest_context *)priv;
5080  offset = dreg_to_offset32[reg];
5081 
5082  if (offset < 0) {
5083  vwarn("unsupported reg %d!\n",reg);
5084  errno = EINVAL;
5085  return -1;
5086  }
5087 
5088  vdebug(16,LA_TARGET,LF_XV,
5089  "tid %d thid %d tctxt %d regcache->vcpu %d 0x%"PRIxREGVAL"\n",
5090  target->id,tthread->tid,tctxt,reg,regval);
5091 
5092  *(uint32_t *)(((char *)context) + offset) = (uint32_t)regval;
5093 
5094  return 0;
5095 }
5096 
5098  struct target_thread *tthread,
5099  thread_ctxt_t tctxt,
5100  REG reg,void *rawval,int rawlen,
5101  void *priv) {
5102  //struct vcpu_guest_context *context;
5103  int offset;
5104 
5105  if (reg > ARCH_X86_REG_COUNT) {
5106  vwarn("unsupported reg %d!\n",reg);
5107  errno = EINVAL;
5108  return -1;
5109  }
5110 
5111  //context = (struct vcpu_guest_context *)priv;
5112  offset = dreg_to_offset32[reg];
5113 
5114  if (offset < 0) {
5115  vwarn("unsupported reg %d!\n",reg);
5116  errno = EINVAL;
5117  return -1;
5118  }
5119 
5120  vwarn("tid %d thid %d tctxt %d regcache->vcpu %d"
5121  " -- unsupported rawval len %d\n",
5122  target->id,tthread->tid,tctxt,reg,rawlen);
5123 
5124  return -1;
5125 }
5126 
5127 int __xen_vm_thread_regcache_to_vcpu(struct target *target,
5128  struct target_thread *tthread,
5129  thread_ctxt_t tctxt,
5130  struct vcpu_guest_context *context) {
5131  vdebug(9,LA_TARGET,LF_XV,"translating thid %d tctxt %d to vcpu\n",
5132  tthread->tid,tctxt);
5133 
5134  /*
5135  * NB: we need to write 64-bit numbers from the vcpu structs if the
5136  * host is 64-bit, even if the target is 32-bit, I think...
5137  */
5138  if (arch_wordsize(target->arch) == 8 || __WORDSIZE == 64) {
5139  target_regcache_foreach_dirty(target,tthread,tctxt,
5142  context);
5143  }
5144  else if (arch_wordsize(target->arch) == 4) {
5145  target_regcache_foreach_dirty(target,tthread,tctxt,
5148  context);
5149  }
5150 
5151  return 0;
5152 }
5153 
5154 /*
5155  * Hardware breakpoint support.
5156  */
5157 static REG xen_vm_get_unused_debug_reg(struct target *target,tid_t tid) {
5158  REG retval = -1;
5159  struct target_thread *tthread;
5160  struct xen_vm_thread_state *xtstate;
5161 
5162  if (tid != TID_GLOBAL) {
5163  verror("currently must use TID_GLOBAL for hardware probepoints!\n");
5164  return -1;
5165  }
5166 
5167  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5168  if (!errno)
5169  errno = EINVAL;
5170  verror("could not load cached thread %"PRIiTID"\n",tid);
5171  return 0;
5172  }
5173  xtstate = (struct xen_vm_thread_state *)tthread->state;
5174 
5175  if (!xtstate->dr[0]) { retval = 0; }
5176  else if (!xtstate->dr[1]) { retval = 1; }
5177  else if (!xtstate->dr[2]) { retval = 2; }
5178  else if (!xtstate->dr[3]) { retval = 3; }
5179 
5180  vdebug(5,LA_TARGET,LF_XV,"returning unused debug reg %d\n",retval);
5181 
5182  return retval;
5183 }
5184 
5185 /*
5186  * struct x86_dr_format {
5187  * int dr0_l:1;
5188  * int dr0_g:1;
5189  * int dr1_l:1;
5190  * int dr1_g:1;
5191  * int dr2_l:1;
5192  * int dr2_g:1;
5193  * int dr3_l:1;
5194  * int dr3_g:1;
5195  * int exact_l:1;
5196  * int exact_g:1;
5197  * int reserved:6;
5198  * probepoint_whence_t dr0_break:2;
5199  * probepoint_watchsize_t dr0_len:2;
5200  * probepoint_whence_t dr1_break:2;
5201  * probepoint_watchsize_t dr1_len:2;
5202  * probepoint_whence_t dr2_break:2;
5203  * probepoint_watchsize_t dr2_len:2;
5204  * probepoint_whence_t dr3_break:2;
5205  * probepoint_watchsize_t dr3_len:2;
5206  * };
5207 */
5208 
5209 static int xen_vm_set_hw_breakpoint(struct target *target,tid_t tid,
5210  REG reg,ADDR addr) {
5211  struct target_thread *tthread;
5212  struct xen_vm_thread_state *xtstate;
5213 
5214  if (reg < 0 || reg > 3) {
5215  errno = EINVAL;
5216  return -1;
5217  }
5218 
5219  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5220  if (!errno)
5221  errno = EINVAL;
5222  verror("could not load cached thread %"PRIiTID"\n",tid);
5223  return -1;
5224  }
5225  xtstate = (struct xen_vm_thread_state *)tthread->state;
5226 
5227  if (xtstate->context.debugreg[reg] != 0) {
5228  vwarn("debug reg %"PRIiREG" already has an address, overwriting (0x%lx)!\n",
5229  reg,xtstate->context.debugreg[reg]);
5230  //errno = EBUSY;
5231  //return -1;
5232  }
5233 
5234  /* Set the address, then the control bits. */
5235  xtstate->dr[reg] = (unsigned long)addr;
5236 
5237  /* Clear the status bits */
5238  xtstate->dr[6] = 0; //&= ~(1 << reg);
5239 
5240  /* Set the local control bit, and unset the global bit. */
5241  xtstate->dr[7] |= (1 << (reg * 2));
5242  xtstate->dr[7] &= ~(1 << (reg * 2 + 1));
5243  /* Set the break to be on execution (00b). */
5244  xtstate->dr[7] &= ~(3 << (16 + (reg * 4)));
5245 
5246  /* Now save these values for later write in flush_context! */
5247  xtstate->context.debugreg[reg] = xtstate->dr[reg];
5248  xtstate->context.debugreg[6] = xtstate->dr[6];
5249  xtstate->context.debugreg[7] = xtstate->dr[7];
5250 
5251  OBJSDIRTY(tthread);
5252 
5253 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5254  struct xen_vm_state *xstate;
5255  xstate = (struct xen_vm_state *)(target->state);
5256  assert(xstate->dominfo_valid);
5257  if (xstate->dominfo.ttd_replay_flag) {
5258  int ret = xc_ttd_vmi_add_probe(xc_handle,xstate->id,addr);
5259 
5260  if (ret) {
5261  verror("failed to register probe [dom%d:%"PRIxADDR" (%d)\n",
5262  xstate->id,addr,ret);
5263  return ret;
5264  }
5266  "registered probe in replay domain [dom%d:%"PRIxADDR"]\n",
5267  xstate->id,addr);
5268  }
5269 #endif
5270 
5271  return 0;
5272 }
5273 
5274 static int xen_vm_set_hw_watchpoint(struct target *target,tid_t tid,
5275  REG reg,ADDR addr,
5276  probepoint_whence_t whence,
5277  probepoint_watchsize_t watchsize) {
5278  struct target_thread *tthread;
5279  struct xen_vm_thread_state *xtstate;
5280 
5281  if (reg < 0 || reg > 3) {
5282  errno = EINVAL;
5283  return -1;
5284  }
5285 
5286  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5287  if (!errno)
5288  errno = EINVAL;
5289  verror("could not load cached thread %"PRIiTID"\n",tid);
5290  return -1;
5291  }
5292  xtstate = (struct xen_vm_thread_state *)tthread->state;
5293 
5294  if (xtstate->context.debugreg[reg] != 0) {
5295  vwarn("debug reg %"PRIiREG" already has an address, overwriting (0x%lx)!\n",
5296  reg,xtstate->context.debugreg[reg]);
5297  //errno = EBUSY;
5298  //return -1;
5299  }
5300 
5301  /* Set the address, then the control bits. */
5302  xtstate->dr[reg] = addr;
5303 
5304  /* Clear the status bits */
5305  xtstate->dr[6] = 0; //&= ~(1 << reg);
5306 
5307  /* Set the local control bit, and unset the global bit. */
5308  xtstate->dr[7] |= (1 << (reg * 2));
5309  xtstate->dr[7] &= ~(1 << (reg * 2 + 1));
5310  /* Set the break to be on whatever whence was) (clear the bits first!). */
5311  xtstate->dr[7] &= ~(3 << (16 + (reg * 4)));
5312  xtstate->dr[7] |= (whence << (16 + (reg * 4)));
5313  /* Set the watchsize to be whatever watchsize was). */
5314  xtstate->dr[7] &= ~(3 << (18 + (reg * 4)));
5315  xtstate->dr[7] |= (watchsize << (18 + (reg * 4)));
5316 
5318  "dreg6 = 0x%"PRIxADDR"; dreg7 = 0x%"PRIxADDR", w = %d, ws = 0x%x\n",
5319  xtstate->dr[6],xtstate->dr[7],whence,watchsize);
5320 
5321  /* Now save these values for later write in flush_context! */
5322  xtstate->context.debugreg[reg] = xtstate->dr[reg];
5323  xtstate->context.debugreg[6] = xtstate->dr[6];
5324  xtstate->context.debugreg[7] = xtstate->dr[7];
5325 
5326  OBJSDIRTY(tthread);
5327 
5328 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5329  struct xen_vm_state *xstate;
5330  xstate = (struct xen_vm_state *)(target->state);
5331  assert(xstate->dominfo_valid);
5332  if (xstate->dominfo.ttd_replay_flag) {
5333  int ret = xc_ttd_vmi_add_probe(xc_handle,xstate->id,addr);
5334 
5335  if (ret) {
5336  verror("failed to register probe [dom%d:%"PRIxADDR" (%d)\n",
5337  xstate->id,addr,ret);
5338  return ret;
5339  }
5341  "registered probe in replay domain [dom%d:%"PRIxADDR"]\n",
5342  xstate->id,addr);
5343  }
5344 #endif
5345 
5346  return 0;
5347 }
5348 
5349 static int xen_vm_unset_hw_breakpoint(struct target *target,tid_t tid,REG reg) {
5350 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5351  ADDR addr;
5352 #endif
5353  struct target_thread *tthread;
5354  struct xen_vm_thread_state *xtstate;
5355 
5356  if (reg < 0 || reg > 3) {
5357  errno = EINVAL;
5358  return -1;
5359  }
5360 
5361  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5362  if (!errno)
5363  errno = EINVAL;
5364  verror("could not load cached thread %"PRIiTID"\n",tid);
5365  return -1;
5366  }
5367  xtstate = (struct xen_vm_thread_state *)tthread->state;
5368 
5369 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5370  addr = xtstate->dr[reg];
5371 #endif
5372 
5373  /* Set the address, then the control bits. */
5374  xtstate->dr[reg] = 0;
5375 
5376  /* Clear the status bits */
5377  xtstate->dr[6] = 0; //&= ~(1 << reg);
5378 
5379  /* Unset the local control bit, and unset the global bit. */
5380  xtstate->dr[7] &= ~(3 << (reg * 2));
5381 
5382  /* Now save these values for later write in flush_context! */
5383  xtstate->context.debugreg[reg] = xtstate->dr[reg];
5384  xtstate->context.debugreg[6] = xtstate->dr[6];
5385  xtstate->context.debugreg[7] = xtstate->dr[7];
5386 
5387  OBJSDIRTY(tthread);
5388 
5389 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5390  struct xen_vm_state *xstate;
5391  xstate = (struct xen_vm_state *)(target->state);
5392  assert(xstate->dominfo_valid);
5393  if (xstate->dominfo.ttd_replay_flag) {
5394  int ret = xc_ttd_vmi_remove_probe(xc_handle,xstate->id,addr);
5395 
5396  if (ret) {
5397  verror("failed to unregister probe [dom%d:%"PRIxADDR" (%d)\n",
5398  xstate->id,addr,ret);
5399  return ret;
5400  }
5402  "unregistered probe in replay domain [dom%d:%"PRIxADDR"]\n",
5403  xstate->id,addr);
5404  }
5405 #endif
5406 
5407  return 0;
5408 }
5409 
5410 static int xen_vm_unset_hw_watchpoint(struct target *target,tid_t tid,REG reg) {
5411  /* It's the exact same thing, yay! */
5412  return xen_vm_unset_hw_breakpoint(target,tid,reg);
5413 }
5414 
5415 int xen_vm_disable_hw_breakpoints(struct target *target,tid_t tid) {
5416  struct target_thread *tthread;
5417  struct xen_vm_thread_state *xtstate;
5418 
5419  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5420  if (!errno)
5421  errno = EINVAL;
5422  verror("could not load cached thread %"PRIiTID"\n",tid);
5423  return -1;
5424  }
5425  xtstate = (struct xen_vm_thread_state *)tthread->state;
5426 
5427  xtstate->context.debugreg[7] = 0;
5428 
5429  OBJSDIRTY(tthread);
5430 
5431  return 0;
5432 }
5433 
5434 int xen_vm_enable_hw_breakpoints(struct target *target,tid_t tid) {
5435  struct target_thread *tthread;
5436  struct xen_vm_thread_state *xtstate;
5437 
5438  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5439  if (!errno)
5440  errno = EINVAL;
5441  verror("could not load cached thread %"PRIiTID"\n",tid);
5442  return -1;
5443  }
5444  xtstate = (struct xen_vm_thread_state *)tthread->state;
5445 
5446  xtstate->context.debugreg[7] = xtstate->dr[7];
5447 
5448  OBJSDIRTY(tthread);
5449 
5450  return 0;
5451 }
5452 
5453 int xen_vm_disable_hw_breakpoint(struct target *target,tid_t tid,REG dreg) {
5454  struct target_thread *tthread;
5455  struct xen_vm_thread_state *xtstate;
5456 
5457  if (dreg < 0 || dreg > 3) {
5458  errno = EINVAL;
5459  return -1;
5460  }
5461 
5462  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5463  if (!errno)
5464  errno = EINVAL;
5465  verror("could not load cached thread %"PRIiTID"\n",tid);
5466  return -1;
5467  }
5468  xtstate = (struct xen_vm_thread_state *)tthread->state;
5469 
5470  /* Clear the status bits */
5471  xtstate->dr[6] = 0; //&= ~(1 << reg);
5472 
5473  /* Unset the local control bit, and unset the global bit. */
5474  xtstate->dr[7] &= ~(3 << (dreg * 2));
5475 
5476  /* Now save these values for later write in flush_context! */
5477  xtstate->context.debugreg[6] = xtstate->dr[6];
5478  xtstate->context.debugreg[7] = xtstate->dr[7];
5479 
5480  OBJSDIRTY(tthread);
5481 
5482 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5483  struct xen_vm_state *xstate;
5484  xstate = (struct xen_vm_state *)(target->state);
5485  assert(xstate->dominfo_valid);
5486  if (xstate->dominfo.ttd_replay_flag) {
5487  int ret = xc_ttd_vmi_remove_probe(xc_handle,xstate->id,xtstate->dr[dreg]);
5488 
5489  if (ret) {
5490  verror("failed to unregister probe [dom%d:%lx (%d)\n",
5491  xstate->id,xtstate->dr[dreg],ret);
5492  return ret;
5493  }
5495  "unregistered probe in replay domain [dom%d:%lx]\n",
5496  xstate->id,xtstate->dr[dreg]);
5497  }
5498 #endif
5499 
5500  return 0;
5501 }
5502 
5503 int xen_vm_enable_hw_breakpoint(struct target *target,tid_t tid,REG dreg) {
5504  struct target_thread *tthread;
5505  struct xen_vm_thread_state *xtstate;
5506 
5507  if (dreg < 0 || dreg > 3) {
5508  errno = EINVAL;
5509  return -1;
5510  }
5511 
5512  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5513  if (!errno)
5514  errno = EINVAL;
5515  verror("could not load cached thread %"PRIiTID"\n",tid);
5516  return -1;
5517  }
5518  xtstate = (struct xen_vm_thread_state *)tthread->state;
5519 
5520  /* Clear the status bits */
5521  xtstate->dr[6] = 0; //&= ~(1 << reg);
5522 
5523  /* Set the local control bit, and unset the global bit. */
5524  xtstate->dr[7] |= (1 << (dreg * 2));
5525  xtstate->dr[7] &= ~(1 << (dreg * 2 + 1));
5526 
5527  /* Now save these values for later write in flush_context! */
5528  xtstate->context.debugreg[6] = xtstate->dr[6];
5529  xtstate->context.debugreg[7] = xtstate->dr[7];
5530 
5531  OBJSDIRTY(tthread);
5532 
5533 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5534  struct xen_vm_state *xstate;
5535  xstate = (struct xen_vm_state *)(target->state);
5536  assert(xstate->dominfo_valid);
5537  if (xstate->dominfo.ttd_replay_flag) {
5538  int ret = xc_ttd_vmi_add_probe(xc_handle,xstate->id,xtstate->dr[dreg]);
5539 
5540  if (ret) {
5541  verror("failed to register probe [dom%d:%lx (%d)\n",
5542  xstate->id,xtstate->dr[dreg],ret);
5543  return ret;
5544  }
5546  "registered probe in replay domain [dom%d:%lx]\n",
5547  xstate->id,xtstate->dr[dreg]);
5548  }
5549 #endif
5550 
5551  return 0;
5552 }
5553 
5554 int xen_vm_notify_sw_breakpoint(struct target *target,ADDR addr,
5555  int notification) {
5556 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5557  struct xen_vm_state *xstate;
5558  int ret = -1;
5559  char *msg = "unregister";
5560 
5561  xstate = (struct xen_vm_state *)(target->state);
5562 
5563  /* SW breakpoints are only implemented for replay domains right now */
5564  assert(xstate->dominfo_valid);
5565  if (!xstate->dominfo.ttd_replay_flag)
5566  return 0;
5567 
5568  if (notification) {
5569  msg = "register";
5570  ret = xc_ttd_vmi_add_probe(xc_handle,xstate->id,addr);
5571  }
5572  else {
5573  ret = xc_ttd_vmi_remove_probe(xc_handle,xstate->id,addr);
5574  }
5575 
5576  if (ret) {
5577  verror("failed to %s probe [dom%d:%"PRIxADDR" (%d)\n",
5578  msg,xstate->id,addr,ret);
5579  return ret;
5580  }
5582  "%sed probe in replay domain [dom%d:%"PRIxADDR"]\n",
5583  msg,xstate->id,addr);
5584 #endif
5585  return 0;
5586 }
5587 
5588 int xen_vm_singlestep(struct target *target,tid_t tid,int isbp,
5589  struct target *overlay) {
5590  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5591  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
5592  struct target_thread *tthread;
5593  struct xen_vm_thread_state *xtstate;
5594 
5595  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5596  if (!errno)
5597  errno = EINVAL;
5598  verror("could not load cached thread %"PRIiTID"\n",tid);
5599  return -1;
5600  }
5601  xtstate = (struct xen_vm_thread_state *)tthread->state;
5602 
5603  /*
5604  * Try to use xc_domain_debug_control for HVM domains; but if it
5605  * fails, abort to the old way.
5606  *
5607  * NB: it had better not fail. HVM Xen looks to see if EFLAGS_TF is
5608  * set, and if it is, it will reinject the debug trap into the guest
5609  * after we see it... which we don't want! Maybe I can find a way
5610  * around that too.
5611  *
5612  * NB: this uses the CPU's monitor trap flag. Xen's VMX HVM support
5613  * doesn't give us a way to figure out that the monitor trap flag is
5614  * what was triggered... so for the hvm case, we keep a special bit
5615  * (only need one cause we only support one VCPU).
5616  *
5617  * XXX: in the future, only use HVM trap monitor flag if the thread
5618  * is the current or global thread. Otherwise obviously we won't
5619  * get what we want. Ugh, this is all crazy.
5620  *
5621  * We also can't use the MTF if this is an overlay thread and the
5622  * hypervisor is not patched to handle userspace debug exceptions.
5623  */
5624  if (xstate->hvm
5625  && (!overlay
5626  || (overlay && !xspec->hypervisor_ignores_userspace_exceptions))) {
5627 #ifdef XC_HAVE_DOMAIN_DEBUG_CONTROL
5628  if (xc_domain_debug_control(xc_handle,xstate->id,
5629  XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON,
5630  xstate->dominfo.max_vcpu_id)) {
5631  vwarn("xc_domain_debug_control failed! falling back to eflags!\n");
5632  goto nohvm;
5633  }
5634  else
5635  xstate->hvm_monitor_trap_flag_set = 1;
5636 #else
5637  vwarn("xc_domain_debug_control does not exist; falling back to eflags!\n");
5638  goto nohvm;
5639 #endif
5640  }
5641  else if (overlay && xspec->hypervisor_ignores_userspace_exceptions) {
5642  /*
5643  * We have to emulate the exception in the userspace part of the
5644  * target's thread.
5645  */
5646  verror("BUG: overlay process driver should call"
5647  " target_os_thread_singlestep()!\n");
5648  errno = EINVAL;
5649  return -1;
5650  }
5651  else {
5652  nohvm:
5653 #if __WORDSIZE == 32
5654  xtstate->context.user_regs.eflags |= X86_EF_TF;
5655  /*
5656  * If this is a single step of an instruction for which a breakpoint
5657  * is set, set the RF flag. Why? Because then we don't have to
5658  * disable the hw breakpoint at this instruction if there is one.
5659  * The x86 clears it after one instruction anyway, so it's safe.
5660  */
5661  if (isbp)
5662  xtstate->context.user_regs.eflags |= X86_EF_RF;
5663  xtstate->context.user_regs.eflags &= ~X86_EF_IF;
5664 #else
5665  xtstate->context.user_regs.rflags |= X86_EF_TF;
5666  if (isbp)
5667  xtstate->context.user_regs.rflags |= X86_EF_RF;
5668  xtstate->context.user_regs.rflags &= ~X86_EF_IF;
5669 #endif
5670  OBJSDIRTY(tthread);
5671  }
5672 
5673  target->sstep_thread = tthread;
5674  if (overlay)
5675  target->sstep_thread_overlay = overlay;
5676  else
5677  target->sstep_thread_overlay = NULL;
5678 
5679  return 0;
5680 }
5681 
5682 int xen_vm_singlestep_end(struct target *target,tid_t tid,
5683  struct target *overlay) {
5684  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5685  struct xen_vm_spec *xspec = (struct xen_vm_spec *)target->spec->backend_spec;
5686  struct target_thread *tthread;
5687  struct xen_vm_thread_state *xtstate;
5688 
5689  if (!(tthread = __xen_vm_load_cached_thread(target,tid))) {
5690  if (!errno)
5691  errno = EINVAL;
5692  verror("could not load cached thread %"PRIiTID"\n",tid);
5693  return -1;
5694  }
5695  xtstate = (struct xen_vm_thread_state *)tthread->state;
5696 
5697  /*
5698  * Try to use xc_domain_debug_control for HVM domains; but if it
5699  * fails, abort to the old way.
5700  */
5701  if (xstate->hvm
5702  && (!overlay
5703  || (overlay && !xspec->hypervisor_ignores_userspace_exceptions))) {
5704 #ifdef XC_HAVE_DOMAIN_DEBUG_CONTROL
5705  if (xc_domain_debug_control(xc_handle,xstate->id,
5706  XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF,
5707  xstate->dominfo.max_vcpu_id)) {
5708  vwarn("xc_domain_debug_control failed! falling back to eflags!\n");
5709  goto nohvm;
5710  }
5711  else
5712  xstate->hvm_monitor_trap_flag_set = 0;
5713 #else
5714  vwarn("xc_domain_debug_control does not exist; falling back to eflags!\n");
5715  goto nohvm;
5716 #endif
5717  }
5718  else if (overlay && xspec->hypervisor_ignores_userspace_exceptions) {
5719  /*
5720  * We have to emulate the exception in the userspace part of the
5721  * target's thread.
5722  */
5723  verror("BUG: overlay process driver should call"
5724  " target_os_thread_singlestep_end()!\n");
5725  errno = EINVAL;
5726  return -1;
5727  }
5728  else {
5729  nohvm:
5730 #if __WORDSIZE ==32
5731  xtstate->context.user_regs.eflags &= ~X86_EF_TF;
5732 #else
5733  xtstate->context.user_regs.rflags &= ~X86_EF_TF;
5734 #endif
5735  OBJSDIRTY(tthread);
5736  }
5737 
5738  target->sstep_thread = NULL;
5739  target->sstep_thread_overlay = NULL;
5740 
5741  return 0;
5742 }
5743 
5744 int xen_vm_instr_can_switch_context(struct target *target,ADDR addr) {
5745  unsigned char buf[2];
5746 
5747  if (!target_read_addr(target,addr,2,buf)) {
5748  verror("could not read 2 bytes at 0x%"PRIxADDR"!\n",addr);
5749  return -1;
5750  }
5751 
5752  /* For now, if it's an IRET, or INT, return 1; otherwise, don't. */
5753  if (buf[0] == 0xcf)
5754  return (int)buf[0];
5755  else if (buf[0] == 0xcc || buf[0] == 0xcd || buf[1] == 0xce)
5756  return (int)buf[0];
5757 
5758  return 0;
5759 }
5760 
5761 uint64_t xen_vm_get_tsc(struct target *target) {
5762  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5763 
5764  assert(xstate->dominfo_valid);
5765 
5766 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5767  struct target_thread *gthread;
5768  struct xen_vm_thread_state *gtstate;
5769  if (xstate->dominfo.ttd_guest) {
5770  if (target->global_thread && OBJVALID(target->global_thread))
5771  gthread = target->global_thread;
5772  else if (!(gthread = __xen_vm_load_current_thread(target,0,1))) {
5773  verror("could not load global thread!\n");
5774  return UINT64_MAX;
5775  }
5776 
5777  gtstate = (struct xen_vm_thread_state *)gthread->state;
5778 
5779  return gtstate->context.ttd_perf.tsc;
5780  }
5781  else {
5782 #endif
5783  if (xstate->vcpuinfo.time.version & 0x1)
5784  vwarn("tsc update in progress; tsc may be wrong?!\n");
5785 
5786  return xstate->vcpuinfo.time.tsc_timestamp;
5787 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5788  }
5789 #endif
5790 }
5791 
5792 uint64_t xen_vm_get_time(struct target *target) {
5793  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5794 
5795  assert(xstate->dominfo_valid);
5796 
5797  if (xstate->vcpuinfo.time.version & 0x1)
5798  vwarn("tsc update in progress; time may be wrong?!\n");
5799 
5800  return xstate->vcpuinfo.time.system_time;
5801 }
5802 
5803 uint64_t xen_vm_get_counter(struct target *target) {
5804  struct xen_vm_state *xstate = (struct xen_vm_state *)target->state;
5805 
5806  assert(xstate->dominfo_valid);
5807 
5808 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5809  struct target_thread *gthread;
5810  struct xen_vm_thread_state *gtstate;
5811  if (xstate->dominfo.ttd_guest) {
5812  if (target->global_thread && OBJVALID(target->global_thread))
5813  gthread = target->global_thread;
5814  else if (!(gthread = __xen_vm_load_current_thread(target,0,1))) {
5815  verror("could not load global thread!\n");
5816  return UINT64_MAX;
5817  }
5818 
5819  gtstate = (struct xen_vm_thread_state *)gthread->state;
5820 
5821  return gtstate->context.ttd_perf.brctr;
5822  }
5823  else {
5824 #endif
5825  if (xstate->vcpuinfo.time.version & 0x1)
5826  vwarn("time (subbing for counter) update in progress; time/counter"
5827  " may be wrong?!\n");
5828 
5829  return xstate->vcpuinfo.time.system_time;
5830 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5831  }
5832 #endif
5833 }
5834 
5835 int xen_vm_enable_feature(struct target *target,int feature,void *arg) {
5836  if (feature != XV_FEATURE_BTS)
5837  return -1;
5838 
5839 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5840  struct xen_vm_state *xstate;
5841 
5842  xstate = (struct xen_vm_state *)(target->state);
5843 
5844  assert(xstate->dominfo_valid);
5845  if (!xstate->dominfo.ttd_replay_flag)
5846  return 0;
5847 
5848  return xc_ttd_set_bts_on(xc_handle,xstate->id);
5849 #else
5850  return -1;
5851 #endif
5852 }
5853 
5854 int xen_vm_disable_feature(struct target *target,int feature) {
5855  if (feature != XV_FEATURE_BTS)
5856  return -1;
5857 
5858 #ifdef CONFIG_DETERMINISTIC_TIMETRAVEL
5859  struct xen_vm_state *xstate;
5860 
5861  xstate = (struct xen_vm_state *)(target->state);
5862 
5863  assert(xstate->dominfo_valid);
5864  if (!xstate->dominfo.ttd_replay_flag)
5865  return 0;
5866 
5867  return xc_ttd_set_bts_off(xc_handle,xstate->id);
5868 #else
5869  return -1;
5870 #endif
5871 }
5872 
5873 /*
5874  * Local variables:
5875  * mode: C
5876  * c-set-style: "BSD"
5877  * c-basic-offset: 4
5878  * End:
5879  */
arch_type_t type
Definition: arch.h:117
#define OBJSCLEAN(obj)
Definition: object.h:116
char * domain
Definition: target_xen_vm.h:68
#define TARGET_XV_VMP_SOCKET_FILENAME
int target_flush_all_threads(struct target *target)
Definition: target_api.c:1275
int xen_vm_virq_or_vmp_read(struct target *target, int *vmid)
#define DRF
ADDR base_virt_addr
Definition: binfile.h:286
REGVAL target_regcache_readreg_tidctxt(struct target *target, tid_t tid, thread_ctxt_t tidctxt, REG reg)
Definition: target.c:6566
#define REG_X86_64_RSP
Definition: arch_x86_64.h:43
ADDR kernel_start_addr
#define vwarnopt(level, area, flags, format,...)
Definition: log.h:37
shared_info_t * live_shinfo
GHashTable * config
Definition: target_api.h:2582
int target_arch_x86_v2p_flags_snprintf(struct target *target, arch_x86_v2p_flags_t flags, char *buf, unsigned int bufsiz)
void * state
Definition: target_api.h:2488
#define SAFE_PERSONALITY_OP_WARN(op, outvar, expoutval, target,...)
Definition: target.h:737
#define XV_ARGP_HIUE
int __xen_vm_thread_regcache_to_vcpu_64_raw_h(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, void *rawval, int rawlen, void *priv)
int xen_vm_virq_attach(int xce_handle, XC_EVTCHN_PORT_T *dbg_port)
int can_switch_context
Definition: probe.h:305
target_status_t target_get_status(struct target *target)
Definition: target.c:3987
unsigned int use_xenaccess
Definition: target_xen_vm.h:77
void * backend_spec
Definition: target_api.h:2252
int evloop_unset_fd(struct evloop *evloop, int fd, int fdtype)
Definition: evloop.c:165
int xen_vm_instr_can_switch_context(struct target *target, ADDR addr)
#define SAFE_PERSONALITY_OP_WARN_NORET(op, outvar, expoutval, target,...)
Definition: target.h:755
struct debugfile * debugfile_from_file(char *filename, char *root_prefix, struct array_list *debugfile_load_opts_list)
Definition: debug.c:1584
thread_bpmode_t bpmode
Definition: target_api.h:2174
int xen_vm_disable_hw_breakpoint(struct target *target, tid_t tid, REG dreg)
#define unlikely(expr)
Definition: debugpred.h:101
unsigned char *(* read_phys)(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)
int32_t tid_t
Definition: common.h:36
#define OBJSVALID(obj)
Definition: object.h:101
target_status_t
Definition: target_api.h:197
#define REG_X86_64_RIP
Definition: arch_x86_64.h:54
struct argp xen_vm_argp
#define X86_EF_IF
Definition: arch_x86.h:27
int xen_vm_virq_or_vmp_attach_or_launch(struct target *target)
result_t probepoint_ss_handler(struct target *target, struct target_thread *tthread, struct probepoint *probepoint)
Definition: probe.c:3021
int xen_vm_virq_or_vmp_get_fd(struct target *target)
struct memregion * region
Definition: target_api.h:2353
uint64_t xen_vm_get_time(struct target *target)
int target_regcache_copy_all(struct target_thread *sthread, thread_ctxt_t stidctxt, struct target_thread *dthread, thread_ctxt_t dtidctxt)
Definition: target.c:6451
char * kernel_filename
Definition: target_xen_vm.h:69
GHashTable * soft_probepoints
Definition: target_api.h:2695
#define PRIiREG
Definition: common.h:94
error_t xen_vm_argp_parse_opt(int key, char *arg, struct argp_state *state)
probepoint_state_t state
Definition: probe.h:220
struct target_personality_ops * personality_ops
Definition: target_api.h:2547
struct target * sstep_thread_overlay
Definition: target_api.h:2693
char * replay_dir
Definition: target_xen_vm.h:71
struct target_thread * sstep_thread
Definition: target_api.h:2687
GHashTable * target_regcache_copy_registers(struct target *target, tid_t tid)
Definition: target.c:6543
ADDR addr
Definition: probe.h:218
int xen_vm_notify_sw_breakpoint(struct target *target, ADDR addr, int notification)
char * path
char * kernel_filename
int target_os_thread_is_user(struct target *target, tid_t tid)
Definition: target_os.c:55
probepoint_whence_t
Definition: probe_api.h:234
target_debug_bp_handler_t handle_break
Definition: target_api.h:2829
#define SAFE_PERSONALITY_OP(op, outvar, defoutval, target,...)
Definition: target.h:772
struct target_thread * base_thread
Definition: target_api.h:2615
#define likely(expr)
Definition: debugpred.h:102
#define REG_X86_EBP
Definition: arch_x86.h:42
#define REG_X86_64_GS
Definition: arch_x86_64.h:107
struct target_thread * global_thread
Definition: target_api.h:2645
int xen_vm_disable_feature(struct target *target, int feature)
struct target * target_create(char *type, struct target_spec *spec)
Definition: target.c:1849
uint32_t monitorhandling
Definition: target_api.h:2427
char * name
Definition: target.h:928
int target_regcache_writereg_tidctxt(struct target *target, tid_t tid, thread_ctxt_t tidctxt, REG reg, REGVAL value)
Definition: target.c:6588
#define TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT_EXTRA
int target_associate_debugfile(struct target *target, struct memregion *region, struct debugfile *debugfile)
Definition: target.c:1963
int xen_vm_detach_overlay_thread(struct target *base, struct target *overlay, tid_t tid)
#define verror(format,...)
Definition: log.h:30
int(* fini)(struct target *target)
unsigned int hvm
unsigned char * target_read_addr(struct target *target, ADDR addr, unsigned long length, unsigned char *buf)
Definition: target_api.c:1014
tid_t target_os_thread_get_leader(struct target *target, tid_t tid)
Definition: target_os.c:62
#define XV_ARGP_MEMCACHE_MMAP_SIZE
void ** list
Definition: alist.h:34
unsigned char * xen_vm_read_pid(struct target *target, tid_t tid, ADDR vaddr, unsigned long length, unsigned char *buf)
uint64_t xen_vm_get_counter(struct target *target)
int __xen_vm_thread_regcache_to_vcpu_32_reg_h(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, REGVAL regval, void *priv)
#define UNIX_PATH_MAX
Definition: target_xen_vm.c:33
int xen_vm_evloop_handler(int readfd, int fdtype, void *state)
int xen_vm_vmp_launch()
Definition: evloop.h:66
uint32_t mmapable
Definition: target_api.h:2427
#define REG_X86_ESP
Definition: arch_x86.h:41
unsigned int use_libvmi
Definition: target_xen_vm.h:77
struct memregion * memregion_create(struct addrspace *space, region_type_t type, char *name)
Definition: memory.c:242
ADDR base_phys_addr
Definition: target.h:966
int target_personality_attach(struct target *target, char *personality, char *personality_lib)
Definition: target.c:5965
#define vwarn(format,...)
Definition: log.h:33
uint32_t threadctl
Definition: target_api.h:2427
struct evloop * evloop
Definition: target_api.h:2598
unsigned int clear_mem_caches_each_exception
Definition: target_xen_vm.h:77
#define ADDRMAX
Definition: common.h:74
int xc_handle
REGVAL target_read_reg(struct target *target, tid_t tid, REG reg)
Definition: target_api.c:1083
unsigned int no_hvm_setcontext
Definition: target_xen_vm.h:77
result_t probepoint_bp_handler(struct target *target, struct target_thread *tthread, struct probepoint *probepoint, int was_stepping)
Definition: probe.c:2593
struct xen_vm_mem_ops xen_vm_mem_ops_builtin
int target_regcache_foreach_dirty(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, target_regcache_regval_handler_t regh, target_regcache_rawval_handler_t rawh, void *priv)
Definition: target.c:6169
int xen_vm_detach_evloop(struct target *target)
struct arch * arch
Definition: binfile.h:216
struct memrange * memrange_create(struct memregion *region, ADDR start, ADDR end, OFFSET offset, unsigned int prot_flags)
Definition: memory.c:538
struct xen_vm_spec * xen_vm_build_spec(void)
int target_os_thread_get_pgd_phys(struct target *target, tid_t tid, ADDR *pgdp)
Definition: target_os.c:48
#define RF
unsigned int no_hw_debug_reg_clear
Definition: target_xen_vm.h:77
struct target_thread * current_thread
Definition: target_api.h:2640
#define OBJDIRTY(obj)
Definition: object.h:80
#define REG_X86_CS
Definition: arch_x86.h:84
struct target_memmod * target_memmod_lookup(struct target *target, tid_t tid, ADDR addr, int is_phys)
Definition: target.c:4841
probepoint_watchsize_t
Definition: probe_api.h:241
tid_t base_tid
Definition: target_api.h:2617
int __xen_vm_thread_regcache_to_vcpu_32_raw_h(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, void *rawval, int rawlen, void *priv)
#define __PAGE_SIZE
int xen_vm_singlestep_end(struct target *target, tid_t tid, struct target *overlay)
unsigned int no_use_multiplexer
Definition: target_xen_vm.h:77
struct target_location_ctxt * global_tlctxt
Definition: target_api.h:2667
int xen_vm_vmp_attach(char *path, int *cfd, char **cpath)
uint64_t xen_vm_get_tsc(struct target *target)
uint32_t live
Definition: target_api.h:2427
#define THREAD_CTXT_USER
Definition: target_os.h:33
int target_arch_x86_v2p_get_flags(struct target *target, REGVAL cr0, REGVAL cr4, REGVAL msr_efer, REGVAL cpuid_edx, arch_x86_v2p_flags_t *flags)
int xen_vm_vmp_client_fd
GHashTable * threads
Definition: target_api.h:2632
unsigned int hvm_monitor_trap_flag_set
int(* handle_exception_ours)(struct target *target)
void value_free(struct value *value)
Definition: value.c:224
#define TARGET_XV_VMP_BIN_PATH
struct target_location_ctxt * target_global_tlctxt(struct target *target)
Definition: target.c:5243
struct target_spec * spec
Definition: target_api.h:2258
struct value * target_load_value_member(struct target *target, struct target_location_ctxt *tlctxt, struct value *old_value, const char *member, const char *delim, load_flags_t flags)
Definition: target.c:2907
#define REG_X86_64_CS
Definition: arch_x86_64.h:103
char * config_file
Definition: target_xen_vm.h:70
#define THREAD_CTXT_KERNEL
Definition: target_os.h:32
#define PROT_EXEC
Definition: common.h:108
int len
Definition: dumptarget.c:52
struct xen_vm_mem_ops xen_vm_mem_ops_libvmi
#define RHOLD(x, hx)
Definition: common.h:622
#define PROT_WRITE
Definition: common.h:107
unsigned long dr[8]
ADDR addr
Definition: target.h:381
struct target_memmod * emulating_debug_mmod
Definition: target_api.h:2142
#define EVLOOP_FDTYPE_A
Definition: evloop.h:29
int target_regcache_snprintf(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, char *buf, int bufsiz, int detail, char *sep, char *kvsep, int flags)
Definition: target.c:6368
target_poll_outcome_t
Definition: target_api.h:394
Definition: probe.h:308
target_type_t target_type
Definition: target_api.h:2166
#define TARGET_XV_VMP_SOCKET_CLIENT_FILE_FORMAT
int(* addr_v2p)(struct target *target, tid_t tid, ADDR pgd, ADDR vaddr, ADDR *paddr)
#define XC_IF_INVALID
#define vdebug(devel, areas, flags, format,...)
Definition: log.h:302
#define REG_X86_EIP
Definition: arch_x86.h:45
int evloop_set_fd(struct evloop *evloop, int fd, int fdtype, evloop_handler_t handler, void *state)
Definition: evloop.c:48
struct thread_probepoint_context * tpc
Definition: target_api.h:2131
unsigned char *(* read_tid)(struct target *target, tid_t tid, ADDR pgd, ADDR addr, unsigned long target_length, unsigned char *buf)
struct target * xen_vm_instantiate(struct target_spec *spec, struct evloop *evloop)
#define EVLOOP_HRET_ERROR
Definition: evloop.h:35
#define REG_X86_64_ES
Definition: arch_x86_64.h:102
Definition: log.h:172
int target_regcache_init_reg_tidctxt(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, REGVAL regval)
Definition: target.c:6130
#define OBJVALID(obj)
Definition: object.h:76
struct memregion * region
Definition: target.h:981
struct arch * arch
Definition: target_api.h:2563
#define OBJSDIRTY(obj)
Definition: object.h:111
struct xen_vm_mem_ops * memops
#define ARCH_X86_64_REG_COUNT
Definition: arch_x86_64.h:33
unsigned int hypervisor_ignores_userspace_exceptions
Definition: target_xen_vm.h:77
int unlink(const char *pathname)
Definition: qemuhacks.c:132
struct array_list * debugfile_load_opts_list
Definition: target_api.h:2199
int __xen_vm_thread_regcache_to_vcpu(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, struct vcpu_guest_context *context)
#define ARCH_X86_REG_COUNT
Definition: arch_x86.h:34
struct binfile * binfile
Definition: dwdebug.h:808
int xce_handle_fd
unsigned int thread_ctxt_t
Definition: target_api.h:300
void target_thread_set_status(struct target_thread *tthread, thread_status_t status)
Definition: target.c:3999
Definition: log.h:70
vcpu_guest_context_t context
int xen_vm_enable_feature(struct target *target, int feature, void *arg)
result_t
Definition: common.h:25
int xen_vm_attach_evloop(struct target *target, struct evloop *evloop)
uint32_t REGVAL
Definition: common.h:66
Definition: arch.h:102
uint32_t needmonitorinterrupt
Definition: target_api.h:2427
#define EVLOOP_HRET_SUCCESS
Definition: evloop.h:36
target_status_t target_status(struct target *target)
Definition: target_api.c:1007
int target_invalidate_all_threads(struct target *target)
Definition: target.c:4426
#define PRIiTID
Definition: common.h:37
char * xen_vm_vmp_client_path
struct target_spec * target_build_spec(target_type_t type, target_mode_t mode)
Definition: target_api.c:394
struct target_ops xen_vm_ops
int dominfo_timeout
Definition: target_xen_vm.h:72
int(* attach)(struct target *target)
struct binfile * binfile_pointing
Definition: dwdebug.h:830
int xen_vm_singlestep(struct target *target, tid_t tid, int isbp, struct target *overlay)
int8_t REG
Definition: common.h:93
#define XV_ARGP_USE_XENACCESS
int(* handle_pause)(struct target *target)
uint32_t opened
Definition: target_api.h:2427
int xen_vm_virq_detach(int xce_handle, XC_EVTCHN_PORT_T *dbg_port)
struct target_location_ctxt * target_location_ctxt_create(struct target *target, tid_t tid, struct memregion *region)
Definition: target.c:5248
int target_finalize(struct target *target)
Definition: target.c:1925
unsigned int breakpoint_instrs_len
Definition: arch.h:150
GHashTable * hard_probepoints
Definition: target_api.h:2080
unsigned long int memcache_mmap_size
Definition: target_xen_vm.h:75
int xen_vm_disable_hw_breakpoints(struct target *target, tid_t tid)
uint32_t ADDR
Definition: common.h:64
struct target * target_lookup_overlay(struct target *target, tid_t tid)
Definition: target.c:4455
struct target_ops * ops
Definition: target_api.h:2510
Definition: log.h:162
REGVAL target_read_reg_ctxt(struct target *target, tid_t tid, thread_ctxt_t tidctxt, REG reg)
Definition: target_api.c:1103
target_exception_flags_t
Definition: target_api.h:386
target_status_t target_notify_overlay(struct target *overlay, target_exception_flags_t flags, tid_t tid, ADDR ipval, int *again)
Definition: target.c:4449
REG spregno
Definition: target_api.h:2469
thread_ctxt_t tidctxt
Definition: target_api.h:2043
int xen_vm_enable_hw_breakpoint(struct target *target, tid_t tid, REG dreg)
arch_x86_v2p_flags_t v2p_flags
int32_t v_i32(struct value *v)
Definition: value.c:336
#define PROT_READ
Definition: common.h:106
REG fbregno
Definition: target_api.h:2468
#define PRIxADDR
Definition: common.h:67
int xen_vm_enable_hw_breakpoints(struct target *target, tid_t tid)
#define EVLOOP_FDTYPE_R
Definition: evloop.h:30
int __xen_vm_vcpu_to_thread_regcache(struct target *target, struct vcpu_guest_context *context, struct target_thread *tthread, thread_ctxt_t tctxt)
int binfile_get_root_scope_sizes(struct binfile *binfile, int *named, int *duplicated, int *anon, int *numscopes)
Definition: binfile.c:326
REGVAL target_regcache_readreg(struct target *target, tid_t tid, REG reg)
Definition: target.c:6209
int(* snprintf)(struct target *target, char *buf, int bufsiz)
Definition: target_api.h:2758
unsigned long(* write_tid)(struct target *target, tid_t tid, ADDR pgd, ADDR addr, unsigned long length, unsigned char *buf)
struct target_spec * spec
Definition: target_api.h:2565
int xen_vm_vmp_detach(int *cfd, char **cpath)
#define EVLOOP_HRET_BADERROR
Definition: evloop.h:34
int xen_vm_xc_detach(int *xc_handle, int *xce_handle)
unsigned long(* write_phys)(struct target *target, ADDR paddr, unsigned long length, unsigned char *buf)
uint32_t nodisablehwbponss
Definition: target_api.h:2427
result_t target_os_emulate_ss_handler(struct target *target, tid_t tid, thread_ctxt_t tidctxt, struct target_memmod *mmod)
Definition: target_os.c:163
int xen_vm_attach_overlay_thread(struct target *base, struct target *overlay, tid_t newtid)
vcpu_info_t vcpuinfo
ADDR base_virt_addr
Definition: target.h:967
active_probe_flags_t
Definition: target_api.h:432
int id
Definition: target_api.h:2476
char * debugfile_root_prefix
Definition: target_api.h:2197
unsigned int max_thread_ctxt
Definition: target_api.h:2467
struct target_thread * target_lookup_thread(struct target *target, tid_t tid)
Definition: target.c:3981
void target_set_status(struct target *target, target_status_t status)
Definition: target.c:3993
xc_dominfo_t dominfo
int __xen_vm_thread_regcache_to_vcpu_64_reg_h(struct target *target, struct target_thread *tthread, thread_ctxt_t tctxt, REG reg, REGVAL regval, void *priv)
target_debug_handler_t handle_step
Definition: target_api.h:2830
Definition: log.h:177
#define XV_ARGP_USE_LIBVMI
ADDR base_phys_addr
Definition: binfile.h:285
void xen_vm_free_spec(struct xen_vm_spec *xspec)
int vdebug_is_on(int level, log_areas_t areas, log_flags_t flags)
Definition: log.c:335
#define XV_ARGP_CLEAR_MEM_CACHES
struct binfile * binfile
Definition: target.h:950
#define REG_X86_64_RBP
Definition: arch_x86_64.h:42
struct target_thread * target_create_thread(struct target *target, tid_t tid, void *tstate, void *tpstate)
Definition: target.c:4021
struct array_list * target_list_threads(struct target *target)
Definition: target_api.c:1168
int target_regcache_writereg(struct target *target, tid_t tid, REG reg, REGVAL value)
Definition: target.c:6250
result_t target_os_emulate_bp_handler(struct target *target, tid_t tid, thread_ctxt_t tidctxt, struct target_memmod *mmod)
Definition: target_os.c:85
REG ipregno
Definition: target_api.h:2470
#define TID_GLOBAL
Definition: target_api.h:145
#define X86_EF_RF
Definition: arch_x86.h:28
struct argp_option xen_vm_argp_opts[]
int xen_vm_xc_attach(int *xc_handle, int *xce_handle)
#define X86_EF_TF
Definition: arch_x86.h:26
unsigned long xen_vm_write_pid(struct target *target, tid_t tid, ADDR vaddr, unsigned long length, unsigned char *buf)
int xen_vm_spec_to_argv(struct target_spec *spec, int *argc, char ***argv)
struct probepoint * probepoint
Definition: probe.h:207
#define PRIxREGVAL
Definition: common.h:72
struct addrspace * addrspace_create(struct target *target, char *name, ADDR tag)
Definition: memory.c:41
char * xen_vm_argp_header
uint32_t writeable
Definition: target_api.h:2427
int xen_vm_virq_or_vmp_detach(struct target *target)