hostnamectl; # tested on 
   Static hostname: lenovo
  Operating System: Debian GNU/Linux 10 (buster)
            Kernel: Linux 4.19.0-17-amd64
      Architecture: x86-64

hostnamectl; # also on
Operating System: Debian GNU/Linux 12 (bookworm) 
Kernel: Linux 6.1.0-17-amd64

# with
virsh --version
5.0.0
# also with
9.0.0

libvirtd --version
libvirtd (libvirt) 5.0.0
# also with
libvirtd (libvirt) 9.0.0

/usr/bin/kvm --version
QEMU emulator version 3.1.0 (Debian 1:3.1+dfsg-8+deb10u8)
Copyright (c) 2003-2018 Fabrice Bellard and the QEMU Project developers

kvm qemu shutdown all vms

cat /scripts/virsh.shutdown.all.sh

#!/bin/bash
echo "=== shutting down all kvm vms ==="
for i in $(virsh list | grep running | awk '{print $2}'); do virsh shutdown $i; done
virsh list --all

# example:
/scripts/virsh.shutdown.all.sh

start all vms with keyword in name


cat /scripts/virsh.start.all.sh
#!/bin/bash
echo "=== starting all kvm vms that contain the word $1 ==="
for i in $(virsh list --all | grep $1 | awk '{print $2}'); do virsh start $i; done
virsh list --all

# example:
/scripts/virsh.start.all.sh keyword

snapshots:

# create a snapshot1 of debian12
virsh snapshot-create-as --domain debian12 --name "snapshot1"
# if the user wants to add a more detailed description
virsh snapshot-create-as --domain debian12 --name "snapshot1" --description "details"

# list all snapshots fo debian12
virsh snapshot-list debian12

# restore-revert to snapshot snapshot1
virsh snapshot-revert --domain debian12 --snapshotname snapshot1

# delete snapshot1
virsh snapshot-delete --domain debian12 --snapshotname snapshot1

in general: how to iterate over all vms:

iterate over all vmnames:

# make sure
# the user is the logged in as user
# that is allowed to "see" all virtual machines
# (per default that is only root)
su - root
virsh list

# if there is a list of vmnames ok let's continue
# iterate over all vmnames
for VM_NAME in $(virsh list | grep running | awk '{print $2}'); do echo $VM_NAME; done

get all mac of all vms

this only works if vms have dhcp assigned addresses of the network “default”!

# iterate over all vms by name
for VM_NAME in $(virsh list | grep running | awk '{print $2}'); do echo $VM_NAME; virsh  dumpxml  $VM_NAME | grep 'mac address'; done

# sample output:
debian1
<mac address='52:54:00:da:f7:6f'/>
ubuntu1
<mac address='52:54:00:3d:9d:53'/>
ubuntu2
<mac address='52:54:00:66:5b:79'/>
debian2
<mac address='52:54:00:60:78:19'/>

for VM_NAME in $(virsh list | grep running | awk '{print $2}'); do echo $VM_NAME; virsh dumpxml $VM_NAME | grep 'mac address'| grep -oP "mac address='\K[^']+"; done
# sample output
debian1
52:54:00:da:f7:6f
ubuntu1
52:54:00:3d:9d:53
ubuntu2
52:54:00:66:5b:79
debian2
52:54:00:60:78:19

# alternatively (only lists all dhcp assigned ips)
# list all virtual networks
virsh net-list

# setup the default dhcp network (the dhcp will not be public only on the local machine)
virsh net-start default
virsh net-autostart default

virsh net-dhcp-leases default

 Expiry Time           MAC address         Protocol   IP address           Hostname    Client ID or DUID
-------------------------------------------------------------------------------------------------------------------------------------------------
 2024-01-08 13:08:39   52:54:00:15:XX:XX   ipv4       192.168.122.207/24   ubuntu      ff:56:50:4d:...
 2024-01-08 13:42:21   52:54:00:95:XX:XX   ipv4       192.168.122.162/24   debian12    ff:00:95:9d:6e:...
 2024-01-08 13:40:09   52:54:00:aa:XX:XX   ipv4       192.168.122.249/24   vmname   -

alternatives:

su - root
apt install netdiscover
# arp scans local network
netdiscover

command might be useful (does active/passive ARP scanning)

netdiscover.man.txt

if this all did not help: checkout the hacks and scripts

snapshot all vms:

cat /scripts/virsh.snapshot.all.sh
#!/bin/bash
echo "=== virsh: snapshot all vms ==="
for i in $(virsh list --all | awk '(NR>2)' | awk '{print $2}'); do virsh snapshot-create $i; done
virsh list --all

# example:
/scripts/virsh.snapshot.all.sh
=== virsh: snapshot all vms ===
Domain snapshot 1628801587 created
Domain snapshot 1628801587 created
Domain snapshot 1628801587 created
Domain snapshot 1628801587 created
Domain snapshot 1628801587 created
 Id   Name               State
-----------------------------------
 -    ansible.master     shut off
 -    ansible.srv1       shut off
 -    ansible.srv2       shut off
 -    ansible.srv3       shut off
 -    debian10template   shut off

# list all snapshots of a vmname
virsh snapshot-list --domain vmname
# revert vmname to snapshotname
virsh snapshot-revert vmname snapshotname

automatically revert to snapshot, start vm and connect to vm:

vim /scripts/kvm-qemu/vmname.sh

(needs to be run as root)

change:

  1. vmname
  2. snapshotname
  3. user is the user that is running the x11 desktop 🙂
#!/bin/bash
echo "=== revert to working snapshot ==="
virsh snapshot-revert vmname snapshotname

echo "=== starting vm ==="
virsh start vmname
export DISPLAY=':0.0'
/usr/bin/cp -fv /home/user/.Xauthority /root/.Xauthority
virt-viewer 'vmname'

clone vm

# clone "vmname"
# even if a "may exceed disk" warning: if the original disk image is thin provisioned the cloned one will be too
virt-clone --original vmname --name vmname-clone --file /where/disk/image/vmname-clone.qcow2 --check disk_size=off

# if the vm has multiple disk files
# things get a bit more complicated
# as the user has to give the --file parameter for every harddisk.qcow2 file in use
# RedHat WHY? WHY? SIMPLIFY!
virt-clone --original vmname --name vmname-clone --auto-clone --check disk_size=off --file /path/vmname-clone/disk1 --file /path/vmname-clone/disk2 --file /path/vmname-clone/disk3 --file /path/vmname-clone/disk4

# so in order to simplify this process a bash script was written
vim clone_vm_with_all_harddisks.sh

#!/bin/bash
# version: 1.1
# author: dwaves.de
# date-creation: 2024-07-02
# date-last-test: 2024-07-02
# description: clone a kvm vm including all harddisk files
# usage:
# find the name of the vm to clone
# virsh list --all
# /path/to/clone_vm_with_all_harddisks.sh "vmname" /where/to/clone/it/to/

# check for proper usage
if [ "$#" -ne 2 ]; then
   echo "Usage: $0 <vmname> <destination_directory>"
   exit 1
fi

# assign variables
VM_NAME=$1
DEST_DIR=$2
CLONED_VM_NAME="${VM_NAME}_clone"

# check if the destination directory exists
if [ ! -d "$DEST_DIR" ]; then
   echo "The directory $DEST_DIR does not exist."
   exit 1
fi

# Check if the VM exists
if ! virsh dominfo "$VM_NAME" > /dev/null 2>&1; then
   echo "The VM $VM_NAME does not exist."
   exit 1
fi

# ensure the VM is powered off
if [ "$(virsh domstate "$VM_NAME")" != "shut off" ]; then
   echo "The VM $VM_NAME is not shut off. Please shut it down before cloning."
   exit 1
fi

# extract disk paths
DISK_PATHS=($(virsh domblklist "$VM_NAME" --details | grep ^" file" | awk '{print $4}'))

# prepare virt-clone command
VIRT_CLONE_CMD="virt-clone --original $VM_NAME --name $CLONED_VM_NAME"

# add disk files to virt-clone command
for DISK_PATH in "${DISK_PATHS[@]}"; do
   BASENAME=$(basename "$DISK_PATH")
   NEW_DISK_PATH="$DEST_DIR/${CLONED_VM_NAME}_${BASENAME}"
   VIRT_CLONE_CMD+=" --file $NEW_DISK_PATH"
done

# clone the VM
eval "$VIRT_CLONE_CMD"

# check if the clone was successful
if [ $? -eq 0 ]; then
   echo "The VM $VM_NAME has been successfully cloned to $CLONED_VM_NAME."
   echo "The disk images are located at $DEST_DIR."
else
   echo "Failed to clone the VM $VM_NAME."
   exit 1
fi

delete vm

# forecefull shutdown of vm
virsh destroy --domain vmname
# WARNING! DELETE INCLUDING EVERYTHING! (SAVED STATES, SNAPSHOTS, DISK IMAGE)
# WITHOUT POSSIBILITY TO RESTORE VM! DOUBLE CHECK THE NAME!
# del disk images
# RAM save states
# delete all snapsnots too
virsh undefine --domain vmname --remove-all-storage --managed-save --snapshots-metadata

add or remove a disk

in theory there is a command that can do that, practically it is currenty NOT working.

so here is how to do it:

# power down the vm
# thin provision create the new disks

qemu-img create -f qcow2 /home/user/vms/kvm/vmname.disk1.qcow2 1024G
qemu-img create -f qcow2 /home/user/vms/kvm/vmname.disk2.qcow2 1024G
qemu-img create -f qcow2 /home/user/vms/kvm/vmname.disk3.qcow2 1024G
qemu-img create -f qcow2 /home/user/vms/kvm/vmname.disk4.qcow2 1024G

# edit vm config file 
vim /etc/libvirt/qemu/vmname.xml

# find the section that says
<disk type='file' device='disk'>
</disk>

# afterwards insert:
# note that the hex number bus='0x64' (dec: 100)
# needs to be INCREMENTED (+1) for every new disk (max is hex:ff = dec:255)
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/home/user/vms/kvm/vmname.disk1.qcow2'/>
<target dev='vdb' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x64' slot='0x00' function='0x0'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/home/user/vms/kvm/vmname.disk2.qcow2'/>
<target dev='vdc' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x65' slot='0x00' function='0x0'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/home/user/vms/kvm/vmname.disk3.qcow2'/>
<target dev='vdd' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x66' slot='0x00' function='0x0'/>
</disk>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='/home/user/vms/kvm/vmname.disk4.qcow2'/>
<target dev='vde' bus='virtio'/>
<address type='pci' domain='0x0000' bus='0x67' slot='0x00' function='0x0'/>
</disk>

neat tips:

the user can (mostly always) attach

virsh somecommand --running

which means: immediately after doing somecommand start the vm again

Links:

GNU Linux kvm how to get list of all IPs of all vms

https://computingforgeeks.com/how-to-create-vm-snapshot-on-kvm/

liked this article?

  • only together we can create a truly free world
  • plz support dwaves to keep it up & running!
  • (yes the info on the internet is (mostly) free but beer is still not free (still have to work on that))
  • really really hate advertisement
  • contribute: whenever a solution was found, blog about it for others to find!
  • talk about, recommend & link to this blog and articles
  • thanks to all who contribute!
admin