mirror of
https://github.com/vagrant-libvirt/vagrant-libvirt.git
synced 2025-02-25 18:55:27 -06:00
Add support for VMs with no box
Vagrant already supports VMs without boxes with Docker. We leverage this in libvirt as well. The use case for this is to PXE oot a vagrant VM that is then installed over the network then installed over the network; a different use case would be to test PXE booted clients that do not use a hard drive whatsoever.
This commit is contained in:
21
README.md
21
README.md
@@ -22,6 +22,7 @@ welcome and can help a lot :-)
|
||||
* Snapshots via [sahara](https://github.com/jedi4ever/sahara).
|
||||
* Package caching via [vagrant-cachier](http://fgrehm.viewdocs.io/vagrant-cachier/).
|
||||
* Use boxes from other Vagrant providers via [vagrant-mutate](https://github.com/sciurus/vagrant-mutate).
|
||||
* Support VMs with no box for PXE boot purposes
|
||||
|
||||
## Future work
|
||||
|
||||
@@ -425,6 +426,26 @@ Vagrant.configure("2") do |config|
|
||||
end
|
||||
```
|
||||
|
||||
## No box and PXE boot
|
||||
|
||||
There is support for PXE booting VMs with no disks as well as PXE booting VMs with blank disks. There are some limitations:
|
||||
|
||||
* No provisioning scripts are ran
|
||||
* No network configuration is being applied to the VM
|
||||
* No SSH connection can be made
|
||||
* ```vagrant halt``` will only work cleanly if the VM handles ACPI shutdown signals
|
||||
|
||||
In short, VMs without a box can be created, halted and destroyed but all other functionality cannot be used.
|
||||
|
||||
An example for a PXE booted VM with no disks whatsoever:
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.define :pxeclient do |pxeclient|
|
||||
pxeclient.vm.provider :libvirt do |domain|
|
||||
domain.boot 'network'
|
||||
end
|
||||
end
|
||||
|
||||
## SSH Access To VM
|
||||
|
||||
vagrant-libvirt supports vagrant's [standard ssh settings](https://docs.vagrantup.com/v2/vagrantfile/ssh_settings.html).
|
||||
|
||||
@@ -23,27 +23,34 @@ module VagrantPlugins
|
||||
# Create VM if not yet created.
|
||||
if !env[:result]
|
||||
b2.use SetNameOfDomain
|
||||
b2.use HandleStoragePool
|
||||
b2.use HandleBox
|
||||
b2.use HandleBoxImage
|
||||
b2.use CreateDomainVolume
|
||||
b2.use CreateDomain
|
||||
if !env[:machine].box
|
||||
b2.use CreateDomain
|
||||
b2.use CreateNetworks
|
||||
b2.use CreateNetworkInterfaces
|
||||
b2.use StartDomain
|
||||
else
|
||||
b2.use HandleStoragePool
|
||||
b2.use HandleBox
|
||||
b2.use HandleBoxImage
|
||||
b2.use CreateDomainVolume
|
||||
b2.use CreateDomain
|
||||
|
||||
b2.use Provision
|
||||
b2.use PrepareNFSValidIds
|
||||
b2.use SyncedFolderCleanup
|
||||
b2.use SyncedFolders
|
||||
b2.use PrepareNFSSettings
|
||||
b2.use ShareFolders
|
||||
b2.use CreateNetworks
|
||||
b2.use CreateNetworkInterfaces
|
||||
b2.use Provision
|
||||
b2.use PrepareNFSValidIds
|
||||
b2.use SyncedFolderCleanup
|
||||
b2.use SyncedFolders
|
||||
b2.use PrepareNFSSettings
|
||||
b2.use ShareFolders
|
||||
b2.use CreateNetworks
|
||||
b2.use CreateNetworkInterfaces
|
||||
|
||||
b2.use StartDomain
|
||||
b2.use WaitTillUp
|
||||
b2.use StartDomain
|
||||
b2.use WaitTillUp
|
||||
|
||||
b2.use ForwardPorts
|
||||
b2.use SetHostname
|
||||
# b2.use SyncFolders
|
||||
b2.use ForwardPorts
|
||||
b2.use SetHostname
|
||||
# b2.use SyncFolders
|
||||
end
|
||||
else
|
||||
b2.use action_start
|
||||
end
|
||||
@@ -68,27 +75,33 @@ module VagrantPlugins
|
||||
next
|
||||
end
|
||||
|
||||
# VM is not running or suspended.
|
||||
if !env[:machine].box
|
||||
# With no box, we just care about network creation and starting it
|
||||
b3.use CreateNetworks
|
||||
b3.use StartDomain
|
||||
else
|
||||
# VM is not running or suspended.
|
||||
|
||||
b3.use Provision
|
||||
b3.use Provision
|
||||
|
||||
# Ensure networks are created and active
|
||||
b3.use CreateNetworks
|
||||
# Ensure networks are created and active
|
||||
b3.use CreateNetworks
|
||||
|
||||
b3.use PrepareNFSValidIds
|
||||
b3.use SyncedFolderCleanup
|
||||
b3.use SyncedFolders
|
||||
b3.use PrepareNFSValidIds
|
||||
b3.use SyncedFolderCleanup
|
||||
b3.use SyncedFolders
|
||||
|
||||
# Start it..
|
||||
b3.use StartDomain
|
||||
# Start it..
|
||||
b3.use StartDomain
|
||||
|
||||
# Machine should gain IP address when comming up,
|
||||
# so wait for dhcp lease and store IP into machines data_dir.
|
||||
b3.use WaitTillUp
|
||||
# Machine should gain IP address when comming up,
|
||||
# so wait for dhcp lease and store IP into machines data_dir.
|
||||
b3.use WaitTillUp
|
||||
|
||||
b3.use ForwardPorts
|
||||
b3.use PrepareNFSSettings
|
||||
b3.use ShareFolders
|
||||
b3.use ForwardPorts
|
||||
b3.use PrepareNFSSettings
|
||||
b3.use ShareFolders
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -154,7 +167,9 @@ module VagrantPlugins
|
||||
if !env[:result]
|
||||
# Try to remove stale volumes anyway
|
||||
b2.use SetNameOfDomain
|
||||
b2.use RemoveStaleVolume
|
||||
if env[:machine].box
|
||||
b2.use RemoveStaleVolume
|
||||
end
|
||||
if !env[:result]
|
||||
b2.use MessageNotCreated
|
||||
end
|
||||
|
||||
@@ -72,18 +72,29 @@ module VagrantPlugins
|
||||
|
||||
@os_type = 'hvm'
|
||||
|
||||
# Get path to domain image from the storage pool selected.
|
||||
actual_volumes =
|
||||
env[:machine].provider.driver.connection.volumes.all.select do |x|
|
||||
x.pool_name == @storage_pool_name
|
||||
end
|
||||
domain_volume = ProviderLibvirt::Util::Collection.find_matching(
|
||||
actual_volumes, "#{@name}.img")
|
||||
raise Errors::DomainVolumeExists if domain_volume.nil?
|
||||
@domain_volume_path = domain_volume.path
|
||||
# Get path to domain image from the storage pool selected if we have a box.
|
||||
if env[:machine].box
|
||||
actual_volumes =
|
||||
env[:machine].provider.driver.connection.volumes.all.select do |x|
|
||||
x.pool_name == @storage_pool_name
|
||||
end
|
||||
domain_volume = ProviderLibvirt::Util::Collection.find_matching(
|
||||
actual_volumes,"#{@name}.img")
|
||||
raise Errors::DomainVolumeExists if domain_volume.nil?
|
||||
@domain_volume_path = domain_volume.path
|
||||
end
|
||||
|
||||
# If we have a box, take the path from the domain volume and set our storage_prefix.
|
||||
# If not, we dump the storage pool xml to get its defined path.
|
||||
# the default storage prefix is typically: /var/lib/libvirt/images/
|
||||
storage_prefix = File.dirname(@domain_volume_path) + '/' # steal
|
||||
if env[:machine].box
|
||||
storage_prefix = File.dirname(@domain_volume_path) + '/' # steal
|
||||
else
|
||||
storage_pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name(@storage_pool_name)
|
||||
raise Errors::NoStoragePool if storage_pool.nil?
|
||||
xml = Nokogiri::XML(storage_pool.xml_desc)
|
||||
storage_prefix = xml.xpath("/pool/target/path").inner_text.to_s + '/'
|
||||
end
|
||||
|
||||
@disks.each do |disk|
|
||||
disk[:path] ||= _disk_name(@name, disk)
|
||||
@@ -125,7 +136,9 @@ module VagrantPlugins
|
||||
env[:ui].info(" -- Cpus: #{@cpus}")
|
||||
env[:ui].info(" -- Memory: #{@memory_size / 1024}M")
|
||||
env[:ui].info(" -- Loader: #{@loader}")
|
||||
env[:ui].info(" -- Base box: #{env[:machine].box.name}")
|
||||
if env[:machine].box
|
||||
env[:ui].info(" -- Base box: #{env[:machine].box.name}")
|
||||
end
|
||||
env[:ui].info(" -- Storage pool: #{@storage_pool_name}")
|
||||
env[:ui].info(" -- Image: #{@domain_volume_path} (#{env[:box_virtual_size]}G)")
|
||||
env[:ui].info(" -- Volume Cache: #{@domain_volume_cache}")
|
||||
|
||||
@@ -125,43 +125,46 @@ module VagrantPlugins
|
||||
# Continue the middleware chain.
|
||||
@app.call(env)
|
||||
|
||||
# Configure interfaces that user requested. Machine should be up and
|
||||
# running now.
|
||||
networks_to_configure = []
|
||||
|
||||
adapters.each_with_index do |options, slot_number|
|
||||
# Skip configuring the management network, which is on the first interface.
|
||||
# It's used for provisioning and it has to be available during provisioning,
|
||||
# ifdown command is not acceptable here.
|
||||
next if slot_number == 0
|
||||
next if options[:auto_config] === false
|
||||
@logger.debug "Configuring interface slot_number #{slot_number} options #{options}"
|
||||
|
||||
network = {
|
||||
:interface => slot_number,
|
||||
:use_dhcp_assigned_default_route => options[:use_dhcp_assigned_default_route],
|
||||
:mac_address => options[:mac],
|
||||
}
|
||||
|
||||
if options[:ip]
|
||||
if env[:machine].box
|
||||
# Configure interfaces that user requested. Machine should be up and
|
||||
# running now.
|
||||
networks_to_configure = []
|
||||
|
||||
adapters.each_with_index do |options, slot_number|
|
||||
# Skip configuring the management network, which is on the first interface.
|
||||
# It's used for provisioning and it has to be available during provisioning,
|
||||
# ifdown command is not acceptable here.
|
||||
next if slot_number == 0
|
||||
next if options[:auto_config] === false
|
||||
@logger.debug "Configuring interface slot_number #{slot_number} options #{options}"
|
||||
|
||||
network = {
|
||||
:type => :static,
|
||||
:ip => options[:ip],
|
||||
:netmask => options[:netmask],
|
||||
}.merge(network)
|
||||
else
|
||||
network[:type] = :dhcp
|
||||
:interface => slot_number,
|
||||
:use_dhcp_assigned_default_route => options[:use_dhcp_assigned_default_route],
|
||||
:mac_address => options[:mac],
|
||||
}
|
||||
|
||||
if options[:ip]
|
||||
network = {
|
||||
:type => :static,
|
||||
:ip => options[:ip],
|
||||
:netmask => options[:netmask],
|
||||
}.merge(network)
|
||||
else
|
||||
network[:type] = :dhcp
|
||||
end
|
||||
|
||||
# do not run configure_networks for tcp tunnel interfaces
|
||||
next if options.fetch(:tcp_tunnel_type, nil)
|
||||
|
||||
networks_to_configure << network
|
||||
end
|
||||
|
||||
# do not run configure_networks for tcp tunnel interfaces
|
||||
next if options.fetch(:tcp_tunnel_type, nil)
|
||||
|
||||
networks_to_configure << network
|
||||
|
||||
env[:ui].info I18n.t('vagrant.actions.vm.network.configuring')
|
||||
env[:machine].guest.capability(
|
||||
:configure_networks, networks_to_configure)
|
||||
end
|
||||
|
||||
env[:ui].info I18n.t('vagrant.actions.vm.network.configuring')
|
||||
env[:machine].guest.capability(
|
||||
:configure_networks, networks_to_configure)
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
@@ -23,7 +23,7 @@ module VagrantPlugins
|
||||
Config
|
||||
end
|
||||
|
||||
provider('libvirt', parallel: true) do
|
||||
provider('libvirt', parallel: true, box_optional: true) do
|
||||
require_relative 'provider'
|
||||
Provider
|
||||
end
|
||||
|
||||
@@ -49,12 +49,14 @@
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<devices>
|
||||
<% if @domain_volume_path %>
|
||||
<disk type='file' device='disk'>
|
||||
<driver name='qemu' type='qcow2' cache='<%= @domain_volume_cache %>'/>
|
||||
<source file='<%= @domain_volume_path %>'/>
|
||||
<%# we need to ensure a unique target dev -%>
|
||||
<target dev='vda' bus='<%= @disk_bus %>'/>
|
||||
</disk>
|
||||
<% end %>
|
||||
<%# additional disks -%>
|
||||
<% @disks.each do |d| -%>
|
||||
<disk type='file' device='disk'>
|
||||
|
||||
Reference in New Issue
Block a user