1. 概述

    libvirt是基于KVM的上层封装,提供了操作KVM的生层接口,如虚拟机的生命周期(创建,删除,查看,管理)等,网络的管理和存储的管理。通过libvirt可以操作KVM,实现类似于virsh,virt-manager这些工具能够实现的功能,本文以查看当前hypervisor的所有instance为例,讲述通过libvirt模块,查看当前机器的虚拟机列表,关于libvirt的更多操作,如开机,关机,重启,网络管理,存储管理等操作,参考附件。


2. 实现代码

cat libvirt_vm.py       
#!/usr/bin/env python
#_*_ coding:utf8 _*_
#author:Happy
#blog adddress: http://happylab.blog.51cto.com
#来自Happy实验室
import sys
try:
        import libvirt
        HAS_LIBVIRT = True
except Exception:
        HAS_LIBVIRT = False
def is_virtual():
        '''
        判断当前系统是否支持KVM虚拟化,不支持则退出
        '''
        if not HAS_LIBVIRT:
                sys.exit("current system are not support Virtualization")
        return 'virt' 
def get_conn():
        '''
        获取libvirt的连接句柄,用于提供操作libivrt的接口
        '''
        if is_virtual() == 'virt':
                try:
                        conn = libvirt.open('qemu:///system')
                except Exception as e:
                        sys.exit(e)
        return conn
def close_conn(conn):
        '''
        关闭libvirt的连接句柄
        '''
        return conn.close()
def list_active_vms():
        '''
        获取所有开机状态的instance,返回虚拟机的名字
        '''
        vms_list = []
        conn = get_conn()
        domain_list = conn.listDomainsID()
        for id in domain_list:
                vms_list.append(conn.lookupByID(id).name())
        close_conn(conn)
        return vms_list
def list_inactive_vms():
        '''
        获取关机状态的instance,返回虚拟机的名字
        '''
        vms_list = []
        conn = get_conn()
        for id in conn.listDefinedDomains():
                vms_list.append(id)
        close_conn(conn)
        return vms_list
def list_all_vms():
        '''
        获取所有的虚拟机
        '''
        vms = []
        vms.extend(list_active_vms())
        vms.extend(list_inactive_vms())
        return vms
def get_capability():
        '''
        得到hypervisor的容量信息,返回格式为XML
        '''
        conn = get_conn()
        capability = conn.getCapabilities()
        conn.close()
        return capability
def get_hostname():
        '''
        attain hypervisor's hostname
        '''
        conn = get_conn()
        hostname = conn.getHostname()
        conn.close()
        return hostname
def get_max_vcpus():
        '''
        获取hypervisor支持虚拟机的最大CPU数
        '''
        conn = get_conn()
        max_vcpus = conn.getMaxVcpus(None)
        conn.close()
        return max_vcpus
if __name__ == "__main__":
        print "当前主机%s的虚拟机列表:" % (get_hostname())
        for vms in list_active_vms():
                print vms

3. 测试

[root@ChuangYiYuan_10_16_2_19 ~]# python libvirt_vm.py 
当前主机ChuangYiYuan_10_16_2_19的虚拟机列表:
instance-0000006b
instance-000001c1
instance-000000b9
instance-00000181
instance-000001f5
instance-000000cb
instance-0000007f
instance-000000eb
instance-00000145
instance-0000019b
instance-000001b9
instance-000000d7
instance-0000012b
instance-00000077
instance-00000165
instance-00000083

4. 总结

    通过libvirt能够实现KVM的管理,libvirt提供了大部分管理KVM的接口,通过改接口,可以实现openstack底层的操作。


5. 附录

    openstack关于libvirt底层的实现代码,供大家参考

"""
Supports KVM, LXC, QEMU, UML, and XEN.
"""
import errno
import eventlet
import functools
import glob
import mmap
import os
import shutil
import socket
import sys
import tempfile
import threading
import time
import uuid

class LibvirtDriver(driver.ComputeDriver):
    capabilities = {
        "has_p_w_picpathcache": True,
        "supports_recreate": True,
        }
    def __init__(self, virtapi, read_only=False):
        super(LibvirtDriver, self).__init__(virtapi)
        global libvirt
        if libvirt is None:
            libvirt = __import__('libvirt')
        self._host_state = None
        self._initiator = None
        self._fc_wwnns = None
        self._fc_wwpns = None
        self._wrapped_conn = None
        self._wrapped_conn_lock = threading.Lock()
        self._caps = None
        self._vcpu_total = 0
        self.read_only = read_only
        self.firewall_driver = firewall.load_driver(
            DEFAULT_FIREWALL_DRIVER,
            self.virtapi,
            get_connection=self._get_connection)
        vif_class = importutils.import_class(CONF.libvirt.vif_driver)
        self.vif_driver = vif_class(self._get_connection)
        self.volume_drivers = driver.driver_dict_from_config(
            CONF.libvirt.volume_drivers, self)
        self.dev_filter = pci_whitelist.get_pci_devices_filter()
        self._event_queue = None
        self._disk_cachemode = None
        self.p_w_picpath_cache_manager = p_w_picpathcache.ImageCacheManager()
        self.p_w_picpath_backend = p_w_picpathbackend.Backend(CONF.use_cow_p_w_picpaths)
        self.disk_cachemodes = {}
        self.valid_cachemodes = ["default",
                                 "none",
                                 "writethrough",
                                 "writeback",
                                 "directsync",
                                 "unsafe",
                                ]
        for mode_str in CONF.libvirt.disk_cachemodes:
            disk_type, sep, cache_mode = mode_str.partition('=')
            if cache_mode not in self.valid_cachemodes:
                LOG.warn(_('Invalid cachemode %(cache_mode)s specified '
                           'for disk type %(disk_type)s.'),
                         {'cache_mode': cache_mode, 'disk_type': disk_type})
                continue
            self.disk_cachemodes[disk_type] = cache_mode
        self._volume_api = volume.API()
 
    def _get_new_connection(self):
        # call with _wrapped_conn_lock held
        LOG.debug(_('Connecting to libvirt: %s'), self.uri())
        wrapped_conn = None
        try:
            wrapped_conn = self._connect(self.uri(), self.read_only)
        finally:
            # Enabling the compute service, in case it was disabled
            # since the connection was successful.
            disable_reason = DISABLE_REASON_UNDEFINED
            if not wrapped_conn:
                disable_reason = 'Failed to connect to libvirt'
            self._set_host_enabled(bool(wrapped_conn), disable_reason)
        self._wrapped_conn = wrapped_conn
        try:
            LOG.debug(_("Registering for lifecycle events %s"), self)
            wrapped_conn.domainEventRegisterAny(
                None,
                libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
                self._event_lifecycle_callback,
                self)
        except Exception as e:
            LOG.warn(_("URI %(uri)s does not support events: %(error)s"),
                     {'uri': self.uri(), 'error': e})
        try:
            LOG.debug(_("Registering for connection events: %s") %
                      str(self))
            wrapped_conn.registerCloseCallback(self._close_callback, None)
        except (TypeError, AttributeError) as e:
            # NOTE: The registerCloseCallback of python-libvirt 1.0.1+
            # is defined with 3 arguments, and the above registerClose-
            # Callback succeeds. However, the one of python-libvirt 1.0.0
            # is defined with 4 arguments and TypeError happens here.
            # Then python-libvirt 0.9 does not define a method register-
            # CloseCallback.
            LOG.debug(_("The version of python-libvirt does not support "
                        "registerCloseCallback or is too old: %s"), e)
        except libvirt.libvirtError as e:
            LOG.warn(_("URI %(uri)s does not support connection"
                       " events: %(error)s"),
                     {'uri': self.uri(), 'error': e})
        return wrapped_conn
   @staticmethod
    def uri():
        if CONF.libvirt.virt_type == 'uml':
            uri = CONF.libvirt.connection_uri or 'uml:///system'
        elif CONF.libvirt.virt_type == 'xen':
            uri = CONF.libvirt.connection_uri or 'xen:///'
        elif CONF.libvirt.virt_type == 'lxc':
            uri = CONF.libvirt.connection_uri or 'lxc:///'
        else:
            uri = CONF.libvirt.connection_uri or 'qemu:///system'
        return uri
    @staticmethod
    def _connect(uri, read_only):
        def _connect_auth_cb(creds, opaque):
            if len(creds) == 0:
                return 0
            LOG.warning(
                _("Can not handle authentication request for %d credentials")
                % len(creds))
            raise exception.NovaException(
                _("Can not handle authentication request for %d credentials")
                % len(creds))
        auth = [[libvirt.VIR_CRED_AUTHNAME,
                 libvirt.VIR_CRED_ECHOPROMPT,
                 libvirt.VIR_CRED_REALM,
                 libvirt.VIR_CRED_PASSPHRASE,
                 libvirt.VIR_CRED_NOECHOPROMPT,
                 libvirt.VIR_CRED_EXTERNAL],
                _connect_auth_cb,
                None]
        try:
            flags = 0
            if read_only:
                flags = libvirt.VIR_CONNECT_RO
            # tpool.proxy_call creates a native thread. Due to limitations
            # with eventlet locking we cannot use the logging API inside
            # the called function.
            return tpool.proxy_call(
                (libvirt.virDomain, libvirt.virConnect),
                libvirt.openAuth, uri, auth, flags)
        except libvirt.libvirtError as ex:
            LOG.exception(_("Connection to libvirt failed: %s"), ex)
            payload = dict(ip=LibvirtDriver.get_host_ip_addr(),
                           method='_connect',
                           reason=ex)
            rpc.get_notifier('compute').error(nova_context.get_admin_context(),
                                              'compute.libvirt.error',
                                              payload)
            raise exception.HypervisorUnavailable(host=CONF.host)
    '''
                返回instance的个数,conn.numOfDomains()用于显示active的vm个数,conn.numOfDefinedDomains()则显示inactive的vm个数
    '''
    def get_num_instances(self):
        """Efficient override of base instance_exists method."""
        return self._conn.numOfDomains()
    '''
            检查虚拟机是否存在,根据名字校验
    '''
    def instance_exists(self, instance_name):
        """Efficient override of base instance_exists method."""
        try:
            self._lookup_by_name(instance_name)
            return True
        except exception.NovaException:
            return False
    '''
                查看libvirt active虚拟机的id号码,conn.numOfDomains()用于显示active虚拟机的个数,conn.numOfDefinedDomains()则用于显示inactive的虚拟机个数
    '''
    # TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
    def list_instance_ids(self):
        if self._conn.numOfDomains() == 0:
            return []
        return self._conn.listDomainsID()
    '''
                返回虚拟机列表的名字,调用list_instance_ids()函数,只是显示active虚拟机的名字,其中conn.lookupByID(ids).name()用于显示instance的名字
    '''
    def list_instances(self):
        names = []
        for domain_id in self.list_instance_ids():
            try:
                # We skip domains with ID 0 (hypervisors).
                if domain_id != 0:
                    domain = self._lookup_by_id(domain_id)
                    names.append(domain.name())
            except exception.InstanceNotFound:
                # Ignore deleted instance while listing
                continue
        # extend instance list to contain also defined domains
        names.extend([vm for vm in self._conn.listDefinedDomains()
                    if vm not in names])
        return names
    '''
                查看instance的UUID号码,显示active+inactive状态的虚拟机的UUID号码,其中conn.lookupByID(ids).UUIDString()用于返回active instance的UUID号码
                                                            conn.lookupByName('name').UUIDString()则返回inactive虚拟机的UUID号
    '''
    def list_instance_uuids(self):
        uuids = set()
        for domain_id in self.list_instance_ids():
            try:
                # We skip domains with ID 0 (hypervisors).
                if domain_id != 0:
                    domain = self._lookup_by_id(domain_id)
                    uuids.add(domain.UUIDString())
            except exception.InstanceNotFound:
                # Ignore deleted instance while listing
                continue
        # extend instance list to contain also defined domains
        for domain_name in self._conn.listDefinedDomains():
            try:
                uuids.add(self._lookup_by_name(domain_name).UUIDString())
            except exception.InstanceNotFound:
                # Ignore deleted instance while listing
                continue
        return list(uuids)
    def plug_vifs(self, instance, network_info):
        """Plug VIFs into networks."""
        for vif in network_info:
            self.vif_driver.plug(instance, vif)
    def unplug_vifs(self, instance, network_info, ignore_errors=False):
        """Unplug VIFs from networks."""
        for vif in network_info:
            try:
                self.vif_driver.unplug(instance, vif)
            except exception.NovaException:
                if not ignore_errors:
                    raise
    def _teardown_container(self, instance):
        inst_path = libvirt_utils.get_instance_path(instance)
        container_dir = os.path.join(inst_path, 'rootfs')
        container_root_device = instance.get('root_device_name')
        disk.teardown_container(container_dir, container_root_device)
   
    def _undefine_domain(self, instance):
        try:
            virt_dom = self._lookup_by_name(instance['name'])
        except exception.InstanceNotFound:
            virt_dom = None
        if virt_dom:
            try:
                try:
                    virt_dom.undefineFlags(
                        libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
                except libvirt.libvirtError:
                    LOG.debug(_("Error from libvirt during undefineFlags."
                        " Retrying with undefine"), instance=instance)
                    virt_dom.undefine()
                except AttributeError:
                    # NOTE(vish): Older versions of libvirt don't support
                    #             undefine flags, so attempt to do the
                    #             right thing.
                    try:
                        if virt_dom.hasManagedSaveImage(0):
                            virt_dom.managedSaveRemove(0)
                    except AttributeError:
                        pass
                    virt_dom.undefine()
            except libvirt.libvirtError as e:
                with excutils.save_and_reraise_exception():
                    errcode = e.get_error_code()
                    LOG.error(_('Error from libvirt during undefine. '
                                'Code=%(errcode)s Error=%(e)s') %
                              {'errcode': errcode, 'e': e}, instance=instance)
  
    def _cleanup_rbd(self, instance):
        pool = CONF.libvirt.p_w_picpaths_rbd_pool
        volumes = libvirt_utils.list_rbd_volumes(pool)
        pattern = instance['uuid']
        def belongs_to_instance(disk):
            return disk.startswith(pattern)
        volumes = filter(belongs_to_instance, volumes)
        if volumes:
            libvirt_utils.remove_rbd_volumes(pool, *volumes)
    def _cleanup_lvm(self, instance):
        """Delete all LVM disks for given instance object."""
        disks = self._lvm_disks(instance)
        if disks:
            libvirt_utils.remove_logical_volumes(*disks)
   
    @staticmethod
    def _get_disk_xml(xml, device):
        """Returns the xml for the disk mounted at device."""
        try:
            doc = etree.fromstring(xml)
        except Exception:
            return None
        ret = doc.findall('./devices/disk')
        for node in ret:
            for child in node.getchildren():
                if child.tag == 'target':
                    if child.get('dev') == device:
                        return etree.tostring(node)
    def _get_existing_domain_xml(self, instance, network_info,
                                 block_device_info=None):
        try:
            virt_dom = self._lookup_by_name(instance['name'])
            xml = virt_dom.XMLDesc(0)
        except exception.InstanceNotFound:
            disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
                                                instance,
                                                block_device_info)
            xml = self.to_xml(nova_context.get_admin_context(),
                              instance, network_info, disk_info,
                              block_device_info=block_device_info)
        return xml