diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java
index 5b3e97698fda..ddf5978497ba 100644
--- a/api/src/main/java/com/cloud/storage/Storage.java
+++ b/api/src/main/java/com/cloud/storage/Storage.java
@@ -170,6 +170,7 @@ public static enum StoragePoolType {
ISO(false, false, EncryptionSupport.Unsupported), // for iso image
LVM(false, false, EncryptionSupport.Unsupported), // XenServer local LVM SR
CLVM(true, false, EncryptionSupport.Unsupported),
+ CLVM_NG(true, false, EncryptionSupport.Hypervisor),
RBD(true, true, EncryptionSupport.Unsupported), // http://libvirt.org/storage.html#StorageBackendRBD
SharedMountPoint(true, true, EncryptionSupport.Hypervisor),
VMFS(true, true, EncryptionSupport.Unsupported), // VMware VMFS storage
diff --git a/core/src/main/java/com/cloud/agent/api/MigrateCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateCommand.java
index 5ac4e9ae445e..a960e6e33a1f 100644
--- a/core/src/main/java/com/cloud/agent/api/MigrateCommand.java
+++ b/core/src/main/java/com/cloud/agent/api/MigrateCommand.java
@@ -26,6 +26,7 @@
import com.cloud.agent.api.to.DpdkTO;
import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.storage.Storage;
public class MigrateCommand extends Command {
private String vmName;
@@ -184,6 +185,8 @@ public String toString() {
private final String sourceText;
private final String backingStoreText;
private boolean isSourceDiskOnStorageFileSystem;
+ private Storage.StoragePoolType sourcePoolType;
+ private Storage.StoragePoolType destPoolType;
public MigrateDiskInfo(final String serialNumber, final DiskType diskType, final DriverType driverType, final Source source, final String sourceText) {
this.serialNumber = serialNumber;
@@ -232,6 +235,22 @@ public boolean isSourceDiskOnStorageFileSystem() {
public void setSourceDiskOnStorageFileSystem(boolean isDiskOnFileSystemStorage) {
this.isSourceDiskOnStorageFileSystem = isDiskOnFileSystemStorage;
}
+
+ public Storage.StoragePoolType getSourcePoolType() {
+ return sourcePoolType;
+ }
+
+ public void setSourcePoolType(Storage.StoragePoolType sourcePoolType) {
+ this.sourcePoolType = sourcePoolType;
+ }
+
+ public Storage.StoragePoolType getDestPoolType() {
+ return destPoolType;
+ }
+
+ public void setDestPoolType(Storage.StoragePoolType destPoolType) {
+ this.destPoolType = destPoolType;
+ }
}
@Override
diff --git a/core/src/main/java/com/cloud/agent/api/PostMigrationAnswer.java b/core/src/main/java/com/cloud/agent/api/PostMigrationAnswer.java
new file mode 100644
index 000000000000..24fdf8402029
--- /dev/null
+++ b/core/src/main/java/com/cloud/agent/api/PostMigrationAnswer.java
@@ -0,0 +1,42 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.agent.api;
+
+/**
+ * Answer for PostMigrationCommand.
+ * Indicates success or failure of post-migration operations on the destination host.
+ */
+public class PostMigrationAnswer extends Answer {
+
+ protected PostMigrationAnswer() {
+ }
+
+ public PostMigrationAnswer(PostMigrationCommand cmd, String detail) {
+ super(cmd, false, detail);
+ }
+
+ public PostMigrationAnswer(PostMigrationCommand cmd, Exception ex) {
+ super(cmd, ex);
+ }
+
+ public PostMigrationAnswer(PostMigrationCommand cmd) {
+ super(cmd, true, null);
+ }
+}
diff --git a/core/src/main/java/com/cloud/agent/api/PostMigrationCommand.java b/core/src/main/java/com/cloud/agent/api/PostMigrationCommand.java
new file mode 100644
index 000000000000..e32e6eacb344
--- /dev/null
+++ b/core/src/main/java/com/cloud/agent/api/PostMigrationCommand.java
@@ -0,0 +1,54 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.agent.api;
+
+import com.cloud.agent.api.to.VirtualMachineTO;
+
+/**
+ * PostMigrationCommand is sent to the destination host after a successful VM migration.
+ * It performs post-migration tasks such as:
+ * - Claiming exclusive locks on CLVM volumes (converting from shared to exclusive mode)
+ * - Other post-migration cleanup operations
+ */
+public class PostMigrationCommand extends Command {
+ private VirtualMachineTO vm;
+ private String vmName;
+
+ protected PostMigrationCommand() {
+ }
+
+ public PostMigrationCommand(VirtualMachineTO vm, String vmName) {
+ this.vm = vm;
+ this.vmName = vmName;
+ }
+
+ public VirtualMachineTO getVirtualMachine() {
+ return vm;
+ }
+
+ public String getVmName() {
+ return vmName;
+ }
+
+ @Override
+ public boolean executeInSequence() {
+ return true;
+ }
+}
diff --git a/core/src/main/java/com/cloud/agent/api/PreMigrationCommand.java b/core/src/main/java/com/cloud/agent/api/PreMigrationCommand.java
new file mode 100644
index 000000000000..951ee46002fb
--- /dev/null
+++ b/core/src/main/java/com/cloud/agent/api/PreMigrationCommand.java
@@ -0,0 +1,56 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.agent.api;
+
+import com.cloud.agent.api.to.VirtualMachineTO;
+
+/**
+ * PreMigrationCommand is sent to the source host before VM migration starts.
+ * It performs pre-migration tasks such as:
+ * - Converting CLVM volume exclusive locks to shared mode so destination host can access them
+ * - Other pre-migration preparation operations on the source host
+ *
+ * This command runs on the SOURCE host before PrepareForMigrationCommand runs on the DESTINATION host.
+ */
+public class PreMigrationCommand extends Command {
+ private VirtualMachineTO vm;
+ private String vmName;
+
+ protected PreMigrationCommand() {
+ }
+
+ public PreMigrationCommand(VirtualMachineTO vm, String vmName) {
+ this.vm = vm;
+ this.vmName = vmName;
+ }
+
+ public VirtualMachineTO getVirtualMachine() {
+ return vm;
+ }
+
+ public String getVmName() {
+ return vmName;
+ }
+
+ @Override
+ public boolean executeInSequence() {
+ return true;
+ }
+}
diff --git a/core/src/main/java/org/apache/cloudstack/storage/command/ClvmLockTransferCommand.java b/core/src/main/java/org/apache/cloudstack/storage/command/ClvmLockTransferCommand.java
new file mode 100644
index 000000000000..7d71ba78509b
--- /dev/null
+++ b/core/src/main/java/org/apache/cloudstack/storage/command/ClvmLockTransferCommand.java
@@ -0,0 +1,97 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.cloudstack.storage.command;
+
+import com.cloud.agent.api.Command;
+
+/**
+ * Command to transfer CLVM (Clustered LVM) exclusive lock between hosts.
+ * This enables lightweight volume migration for CLVM storage pools where volumes
+ * reside in the same Volume Group (VG) but need to be accessed from different hosts.
+ *
+ *
Instead of copying volume data (traditional migration), this command simply
+ * deactivates the LV on the source host and activates it exclusively on the destination host.
+ *
+ *
This is significantly faster (10-100x) than traditional migration and uses no network bandwidth.
+ */
+public class ClvmLockTransferCommand extends Command {
+
+ /**
+ * Operation to perform on the CLVM volume.
+ * Maps to lvchange flags for LVM operations.
+ */
+ public enum Operation {
+ /** Deactivate the volume on this host (-an) */
+ DEACTIVATE("-an", "deactivate"),
+
+ /** Activate the volume exclusively on this host (-aey) */
+ ACTIVATE_EXCLUSIVE("-aey", "activate exclusively"),
+
+ /** Activate the volume in shared mode on this host (-asy) */
+ ACTIVATE_SHARED("-asy", "activate in shared mode");
+
+ private final String lvchangeFlag;
+ private final String description;
+
+ Operation(String lvchangeFlag, String description) {
+ this.lvchangeFlag = lvchangeFlag;
+ this.description = description;
+ }
+
+ public String getLvchangeFlag() {
+ return lvchangeFlag;
+ }
+
+ public String getDescription() {
+ return description;
+ }
+ }
+
+ private String lvPath;
+ private Operation operation;
+ private String volumeUuid;
+
+ public ClvmLockTransferCommand() {
+ // For serialization
+ }
+
+ public ClvmLockTransferCommand(Operation operation, String lvPath, String volumeUuid) {
+ this.operation = operation;
+ this.lvPath = lvPath;
+ this.volumeUuid = volumeUuid;
+ // Execute in sequence to ensure lock safety
+ setWait(30);
+ }
+
+ public String getLvPath() {
+ return lvPath;
+ }
+
+ public Operation getOperation() {
+ return operation;
+ }
+
+ public String getVolumeUuid() {
+ return volumeUuid;
+ }
+
+ @Override
+ public boolean executeInSequence() {
+ return true;
+ }
+}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java
index 8b0171870765..448c00ab240d 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeInfo.java
@@ -31,6 +31,12 @@
public interface VolumeInfo extends DownloadableDataInfo, Volume {
+ /**
+ * Constant for the volume detail key that stores the host ID currently holding the CLVM exclusive lock.
+ * This is used during lightweight lock migration to determine the source host for lock transfer.
+ */
+ String CLVM_LOCK_HOST_ID = "clvmLockHostId";
+
boolean isAttachedVM();
void addPayload(Object data);
@@ -103,4 +109,21 @@ public interface VolumeInfo extends DownloadableDataInfo, Volume {
List getCheckpointPaths();
Set getCheckpointImageStoreUrls();
+
+ /**
+ * Gets the destination host ID hint for CLVM volume creation.
+ * This is used to route volume creation commands to the specific host where the VM will be deployed.
+ * Only applicable for CLVM storage pools to avoid shared mode activation.
+ *
+ * @return The host ID where the volume should be created, or null if not set
+ */
+ Long getDestinationHostId();
+
+ /**
+ * Sets the destination host ID hint for CLVM volume creation.
+ * This should be set before volume creation when the destination host is known.
+ *
+ * @param hostId The host ID where the volume should be created
+ */
+ void setDestinationHostId(Long hostId);
}
diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
index 682473ec94fc..a7d82d0b9628 100644
--- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
+++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/VolumeService.java
@@ -30,6 +30,7 @@
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.offering.DiskOffering;
+import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume;
import com.cloud.user.Account;
import com.cloud.utils.Pair;
@@ -123,4 +124,71 @@ boolean copyPoliciesBetweenVolumesAndDestroySourceVolumeAfterMigration(ObjectInD
void checkAndRepairVolumeBasedOnConfig(DataObject dataObject, Host host);
void validateChangeDiskOfferingEncryptionType(long existingDiskOfferingId, long newDiskOfferingId);
+
+ /**
+ * Transfers exclusive lock for a volume on cluster-based storage (e.g., CLVM/CLVM_NG) from one host to another.
+ * This is used for storage that requires host-level lock management for volumes on shared storage pools.
+ * For non-CLVM pool types, this method returns false without taking action.
+ *
+ * @param volume The volume to transfer lock for
+ * @param sourceHostId Host currently holding the exclusive lock
+ * @param destHostId Host to receive the exclusive lock
+ * @return true if lock transfer succeeded or was not needed, false if it failed
+ */
+ boolean transferVolumeLock(VolumeInfo volume, Long sourceHostId, Long destHostId);
+
+ /**
+ * Finds which host currently has the exclusive lock on a CLVM volume.
+ * Checks in order: explicit lock tracking, attached VM's host, or first available cluster host.
+ *
+ * @param volume The CLVM volume
+ * @return Host ID that has the exclusive lock, or null if cannot be determined
+ */
+ Long findVolumeLockHost(VolumeInfo volume);
+
+ /**
+ * Performs lightweight CLVM lock migration for a volume to a target host.
+ * This transfers the LVM exclusive lock without copying data (CLVM volumes are on shared cluster storage).
+ * If the volume already has the lock on the destination host, no action is taken.
+ *
+ * @param volume The volume to migrate lock for
+ * @param destHostId Destination host ID
+ * @return Updated VolumeInfo after lock migration
+ */
+ VolumeInfo performLockMigration(VolumeInfo volume, Long destHostId);
+
+ /**
+ * Checks if both storage pools are CLVM type (CLVM or CLVM_NG).
+ *
+ * @param volumePoolType Storage pool type for the volume
+ * @param vmPoolType Storage pool type for the VM
+ * @return true if both pools are CLVM type (CLVM or CLVM_NG)
+ */
+ boolean areBothPoolsClvmType(StoragePoolType volumePoolType, StoragePoolType vmPoolType);
+
+ /**
+ * Determines if CLVM lock transfer is required when a volume is already on the correct storage pool.
+ *
+ * @param volumeToAttach The volume being attached
+ * @param volumePoolType Storage pool type for the volume
+ * @param vmPoolType Storage pool type for the VM's existing volume
+ * @param volumePoolId Storage pool ID for the volume
+ * @param vmPoolId Storage pool ID for the VM's existing volume
+ * @param vmHostId VM's current host ID (or last host ID if stopped)
+ * @return true if CLVM lock transfer is needed
+ */
+ boolean isLockTransferRequired(VolumeInfo volumeToAttach, StoragePoolType volumePoolType, StoragePoolType vmPoolType,
+ Long volumePoolId, Long vmPoolId, Long vmHostId);
+
+ /**
+ * Determines if lightweight CLVM migration is needed instead of full data copy.
+ *
+ * @param volumePoolType Storage pool type for the volume
+ * @param vmPoolType Storage pool type for the VM
+ * @param volumePoolPath Storage pool path for the volume
+ * @param vmPoolPath Storage pool path for the VM
+ * @return true if lightweight migration should be used
+ */
+ boolean isLightweightMigrationNeeded(StoragePoolType volumePoolType, StoragePoolType vmPoolType,
+ String volumePoolPath, String vmPoolPath);
}
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
index e8796fb02529..8a773f74ab2e 100755
--- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -50,6 +50,8 @@
import javax.persistence.EntityExistsException;
+import com.cloud.agent.api.PostMigrationCommand;
+import com.cloud.storage.ClvmLockManager;
import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
@@ -135,6 +137,7 @@
import com.cloud.agent.api.PrepareExternalProvisioningCommand;
import com.cloud.agent.api.PrepareForMigrationAnswer;
import com.cloud.agent.api.PrepareForMigrationCommand;
+import com.cloud.agent.api.PreMigrationCommand;
import com.cloud.agent.api.RebootAnswer;
import com.cloud.agent.api.RebootCommand;
import com.cloud.agent.api.RecreateCheckpointsCommand;
@@ -264,6 +267,7 @@
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.template.VirtualMachineTemplate;
import com.cloud.user.Account;
@@ -359,6 +363,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
@Inject
private VolumeDao _volsDao;
@Inject
+ private VolumeDetailsDao _volsDetailsDao;
+ @Inject
private HighAvailabilityManager _haMgr;
@Inject
private HostPodDao _podDao;
@@ -461,6 +467,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
ExtensionsManager extensionsManager;
@Inject
ExtensionDetailsDao extensionDetailsDao;
+ @Inject
+ ClvmLockManager clvmLockManager;
VmWorkJobHandlerProxy _jobHandlerProxy = new VmWorkJobHandlerProxy(this);
@@ -3107,6 +3115,24 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy
updateOverCommitRatioForVmProfile(profile, dest.getHost().getClusterId());
final VirtualMachineTO to = toVmTO(profile);
+
+ logger.info("Sending PreMigrationCommand to source host {} for VM {}", srcHostId, vm.getInstanceName());
+ final PreMigrationCommand preMigCmd = new PreMigrationCommand(to, vm.getInstanceName());
+ Answer preMigAnswer = null;
+ try {
+ preMigAnswer = _agentMgr.send(srcHostId, preMigCmd);
+ if (preMigAnswer == null || !preMigAnswer.getResult()) {
+ final String details = preMigAnswer != null ? preMigAnswer.getDetails() : "null answer returned";
+ final String msg = "Failed to prepare source host for migration: " + details;
+ logger.error("Failed to prepare source host {} for migration of VM {}: {}", srcHostId, vm.getInstanceName(), details);
+ throw new CloudRuntimeException(msg);
+ }
+ logger.info("Successfully prepared source host {} for migration of VM {}", srcHostId, vm.getInstanceName());
+ } catch (final AgentUnavailableException | OperationTimedoutException e) {
+ logger.error("Failed to send PreMigrationCommand to source host {}: {}", srcHostId, e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to prepare source host for migration: " + e.getMessage(), e);
+ }
+
final PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to);
setVmNetworkDetails(vm, to);
@@ -3238,6 +3264,24 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy
logger.warn("Error while checking the vm {} on host {}", vm, dest.getHost(), e);
}
migrated = true;
+ try {
+ logger.info("Executing post-migration tasks for VM {} on destination host {}", vm.getInstanceName(), dstHostId);
+ final PostMigrationCommand postMigrationCommand = new PostMigrationCommand(to, vm.getInstanceName());
+ final Answer postMigrationAnswer = _agentMgr.send(dstHostId, postMigrationCommand);
+
+ if (postMigrationAnswer == null || !postMigrationAnswer.getResult()) {
+ final String details = postMigrationAnswer != null ? postMigrationAnswer.getDetails() : "null answer returned";
+ logger.warn("Post-migration tasks failed for VM {} on destination host {}: {}. Migration completed but some cleanup may be needed.",
+ vm.getInstanceName(), dstHostId, details);
+ } else {
+ logger.info("Successfully completed post-migration tasks for VM {} on destination host {}", vm.getInstanceName(), dstHostId);
+ }
+ } catch (Exception e) {
+ logger.warn("Exception during post-migration tasks for VM {} on destination host {}: {}. Migration completed but some cleanup may be needed.",
+ vm.getInstanceName(), dstHostId, e.getMessage(), e);
+ }
+
+ updateClvmLockHostForVmVolumes(vm.getId(), dstHostId);
} finally {
if (!migrated) {
logger.info("Migration was unsuccessful. Cleaning up: {}", vm);
@@ -3323,6 +3367,27 @@ private void updateVmPod(VMInstanceVO vm, long dstHostId) {
_vmDao.persist(newVm);
}
+ /**
+ * Updates CLVM_LOCK_HOST_ID for all CLVM volumes attached to a VM after VM migration.
+ * This ensures that subsequent operations on CLVM volumes are routed to the correct host.
+ *
+ * @param vmId The ID of the VM that was migrated
+ * @param destHostId The destination host ID where the VM now resides
+ */
+ private void updateClvmLockHostForVmVolumes(long vmId, long destHostId) {
+ List volumes = _volsDao.findByInstance(vmId);
+ if (volumes == null || volumes.isEmpty()) {
+ return;
+ }
+
+ for (VolumeVO volume : volumes) {
+ StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
+ if (pool != null && ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ clvmLockManager.setClvmLockHostId(volume.getId(), destHostId);
+ }
+ }
+ }
+
/**
* We create the mapping of volumes and storage pool to migrate the VMs according to the information sent by the user.
* If the user did not enter a complete mapping, the volumes that were left behind will be auto mapped using {@link #createStoragePoolMappingsForVolumes(VirtualMachineProfile, DataCenterDeployment, Map, List)}
@@ -4897,6 +4962,27 @@ private void orchestrateMigrateForScale(final String vmUuid, final long srcHostI
volumeMgr.prepareForMigration(profile, dest);
final VirtualMachineTO to = toVmTO(profile);
+
+ // Step 1: Send PreMigrationCommand to source host to convert CLVM volumes to shared mode
+ // This must happen BEFORE PrepareForMigrationCommand on destination to avoid lock conflicts
+ logger.info("Sending PreMigrationCommand to source host {} for VM {}", srcHostId, vm.getInstanceName());
+ final PreMigrationCommand preMigCmd = new PreMigrationCommand(to, vm.getInstanceName());
+ Answer preMigAnswer = null;
+ try {
+ preMigAnswer = _agentMgr.send(srcHostId, preMigCmd);
+ if (preMigAnswer == null || !preMigAnswer.getResult()) {
+ final String details = preMigAnswer != null ? preMigAnswer.getDetails() : "null answer returned";
+ final String msg = "Failed to prepare source host for migration: " + details;
+ logger.error("Failed to prepare source host {} for migration of VM {}: {}", srcHostId, vm.getInstanceName(), details);
+ throw new CloudRuntimeException(msg);
+ }
+ logger.info("Successfully prepared source host {} for migration of VM {}", srcHostId, vm.getInstanceName());
+ } catch (final AgentUnavailableException | OperationTimedoutException e) {
+ logger.error("Failed to send PreMigrationCommand to source host {}: {}", srcHostId, e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to prepare source host for migration: " + e.getMessage(), e);
+ }
+
+ // Step 2: Send PrepareForMigrationCommand to destination host
final PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(to);
ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Migrating, vm.getType(), vm.getId());
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index e8c75afa81c5..7d7bcb410fa4 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -38,8 +38,10 @@
import javax.inject.Inject;
import javax.naming.ConfigurationException;
+import com.cloud.agent.AgentManager;
import com.cloud.deploy.DeploymentClusterPlanner;
import com.cloud.exception.ResourceAllocationException;
+import com.cloud.storage.ClvmLockManager;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
@@ -273,6 +275,10 @@ public enum UserVmCloneType {
ConfigurationDao configurationDao;
@Inject
VMInstanceDao vmInstanceDao;
+ @Inject
+ ClvmLockManager clvmLockManager;
+ @Inject
+ AgentManager _agentMgr;
@Inject
protected SnapshotHelper snapshotHelper;
@@ -745,6 +751,17 @@ public VolumeInfo createVolume(VolumeInfo volumeInfo, VirtualMachine vm, Virtual
logger.debug("Trying to create volume [{}] on storage pool [{}].",
volumeToString, poolToString);
DataStore store = dataStoreMgr.getDataStore(pool.getId(), DataStoreRole.Primary);
+
+ // For CLVM pools, set the lock host hint so volume is created on the correct host
+ // This avoids the need for shared mode activation and improves performance
+ if (ClvmLockManager.isClvmPoolType(pool.getPoolType()) && hostId != null) {
+ logger.info("CLVM pool detected. Setting lock host {} for volume {} to route creation to correct host",
+ hostId, volumeInfo.getUuid());
+ volumeInfo.setDestinationHostId(hostId);
+
+ clvmLockManager.setClvmLockHostId(volumeInfo.getId(), hostId);
+ }
+
for (int i = 0; i < 2; i++) {
// retry one more time in case of template reload is required for Vmware case
AsyncCallFuture future = null;
@@ -786,6 +803,109 @@ private String getVolumeIdentificationInfos(Volume volume) {
return String.format("uuid: %s, name: %s", volume.getUuid(), volume.getName());
}
+ /**
+ * Updates the CLVM_LOCK_HOST_ID for a migrated volume if applicable.
+ * For CLVM volumes that are attached to a VM, this updates the lock host tracking
+ * to point to the VM's current host after volume migration.
+ *
+ * @param migratedVolume The volume that was migrated
+ * @param destPool The destination storage pool
+ * @param operationType Description of the operation (e.g., "migrated", "live-migrated") for logging
+ */
+ private void updateClvmLockHostAfterMigration(Volume migratedVolume, StoragePool destPool, String operationType) {
+ if (migratedVolume == null || destPool == null) {
+ return;
+ }
+
+ StoragePoolVO pool = _storagePoolDao.findById(destPool.getId());
+ if (pool == null || !ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ return;
+ }
+
+ if (migratedVolume.getInstanceId() == null) {
+ return;
+ }
+
+ VMInstanceVO vm = vmInstanceDao.findById(migratedVolume.getInstanceId());
+ if (vm == null || vm.getHostId() == null) {
+ return;
+ }
+
+ clvmLockManager.setClvmLockHostId(migratedVolume.getId(), vm.getHostId());
+ logger.debug("Updated CLVM_LOCK_HOST_ID for {} volume {} to host {} where VM {} is running",
+ operationType, migratedVolume.getUuid(), vm.getHostId(), vm.getInstanceName());
+ }
+
+ /**
+ * Retrieves the CLVM lock host ID from any existing volume of the specified VM.
+ * This is useful when attaching a new volume to a stopped VM - we want to maintain
+ * consistency by using the same host that manages the VM's other CLVM volumes.
+ *
+ * @param vmId The ID of the VM
+ * @return The host ID if found, null otherwise
+ */
+ private Long getClvmLockHostFromVmVolumes(Long vmId) {
+ if (vmId == null) {
+ return null;
+ }
+
+ List vmVolumes = _volsDao.findByInstance(vmId);
+ if (vmVolumes == null || vmVolumes.isEmpty()) {
+ return null;
+ }
+
+ for (VolumeVO volume : vmVolumes) {
+ if (volume.getPoolId() == null) {
+ continue;
+ }
+
+ StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
+ if (pool != null && ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ Long lockHostId = clvmLockManager.getClvmLockHostId(volume.getId(), volume.getUuid());
+ if (lockHostId != null) {
+ logger.debug("Found CLVM lock host {} from existing volume {} of VM {}",
+ lockHostId, volume.getUuid(), vmId);
+ return lockHostId;
+ }
+ }
+ }
+
+ return null;
+ }
+
+ private void transferClvmLocksForVmStart(List volumes, Long destHostId, VMInstanceVO vm) {
+ if (volumes == null || volumes.isEmpty() || destHostId == null) {
+ return;
+ }
+
+ for (VolumeVO volume : volumes) {
+ if (volume.getPoolId() == null) {
+ continue;
+ }
+
+ StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId());
+ if (pool == null || !ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ continue;
+ }
+
+ Long currentLockHost = clvmLockManager.getClvmLockHostId(volume.getId(), volume.getUuid());
+
+ if (currentLockHost == null) {
+ clvmLockManager.setClvmLockHostId(volume.getId(), destHostId);
+ } else if (!currentLockHost.equals(destHostId)) {
+ logger.info("CLVM volume {} is locked on host {} but VM {} starting on host {}. Transferring lock.",
+ volume.getUuid(), currentLockHost, vm.getInstanceName(), destHostId);
+
+ if (!clvmLockManager.transferClvmVolumeLock(volume.getUuid(), volume.getId(),
+ volume.getPath(), pool, currentLockHost, destHostId)) {
+ throw new CloudRuntimeException(
+ String.format("Failed to transfer CLVM lock for volume %s from host %s to host %s",
+ volume.getUuid(), currentLockHost, destHostId));
+ }
+ }
+ }
+ }
+
public String getRandomVolumeName() {
return UUID.randomUUID().toString();
}
@@ -1204,10 +1324,22 @@ public VolumeInfo createVolumeOnPrimaryStorage(VirtualMachine vm, VolumeInfo vol
Long clusterId = storagePool.getClusterId();
logger.trace("storage-pool {}/{} is associated with cluster {}",storagePool.getName(), storagePool.getUuid(), clusterId);
Long hostId = vm.getHostId();
- if (hostId == null && storagePool.isLocal()) {
- List poolHosts = storagePoolHostDao.listByPoolId(storagePool.getId());
- if (poolHosts.size() > 0) {
- hostId = poolHosts.get(0).getHostId();
+ if (hostId == null && (storagePool.isLocal() || ClvmLockManager.isClvmPoolType(storagePool.getPoolType()))) {
+ if (ClvmLockManager.isClvmPoolType(storagePool.getPoolType())) {
+ hostId = getClvmLockHostFromVmVolumes(vm.getId());
+ if (hostId != null) {
+ logger.debug("Using CLVM lock host {} from VM {}'s existing volumes for new volume creation",
+ hostId, vm.getUuid());
+ }
+ }
+
+ if (hostId == null) {
+ List poolHosts = storagePoolHostDao.listByPoolId(storagePool.getId());
+ if (!poolHosts.isEmpty()) {
+ hostId = poolHosts.get(0).getHostId();
+ logger.debug("Selected host {} from storage pool {} for stopped VM {} volume creation",
+ hostId, storagePool.getUuid(), vm.getUuid());
+ }
}
}
@@ -1452,6 +1584,9 @@ public Volume migrateVolume(Volume volume, StoragePool destPool) throws StorageU
_snapshotDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
_snapshotDataStoreDao.updateVolumeIds(vol.getId(), result.getVolume().getId());
}
+
+ // For CLVM volumes attached to a VM, update the CLVM_LOCK_HOST_ID after migration
+ updateClvmLockHostAfterMigration(result.getVolume(), destPool, "migrated");
}
return result.getVolume();
} catch (InterruptedException | ExecutionException e) {
@@ -1477,6 +1612,10 @@ public Volume liveMigrateVolume(Volume volume, StoragePool destPool) {
logger.error("Volume [{}] migration failed due to [{}].", volToString, result.getResult());
return null;
}
+
+ // For CLVM volumes attached to a VM, update the CLVM_LOCK_HOST_ID after live migration
+ updateClvmLockHostAfterMigration(result.getVolume(), destPool, "live-migrated");
+
return result.getVolume();
} catch (InterruptedException | ExecutionException e) {
logger.error("Volume [{}] migration failed due to [{}].", volToString, e.getMessage());
@@ -1519,6 +1658,11 @@ public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHos
logger.error(msg);
throw new CloudRuntimeException(msg);
}
+ for (Map.Entry entry : volumeToPool.entrySet()) {
+ Volume volume = entry.getKey();
+ StoragePool destPool = entry.getValue();
+ updateClvmLockHostAfterMigration(volume, destPool, "vm-migrated");
+ }
} catch (InterruptedException | ExecutionException e) {
logger.error("Failed to migrate VM [{}] along with its volumes due to [{}].", vm, e.getMessage());
logger.debug("Exception: ", e);
@@ -1851,6 +1995,19 @@ private Pair recreateVolume(VolumeVO vol, VirtualMachinePro
future = volService.createManagedStorageVolumeFromTemplateAsync(volume, destPool.getId(), templ, hostId);
} else {
+ // For CLVM pools, set the destination host hint so volume is created on the correct host
+ // This avoids the need for shared mode activation and improves performance
+ StoragePoolVO poolVO = _storagePoolDao.findById(destPool.getId());
+ if (poolVO != null && ClvmLockManager.isClvmPoolType(poolVO.getPoolType())) {
+ Long hostId = vm.getVirtualMachine().getHostId();
+ if (hostId != null) {
+ volume.setDestinationHostId(hostId);
+ clvmLockManager.setClvmLockHostId(volume.getId(), hostId);
+ logger.info("CLVM pool detected during volume creation from template. Setting lock host {} for volume {} (persisted to DB) to route creation to correct host",
+ hostId, volume.getUuid());
+ }
+ }
+
future = volService.createVolumeFromTemplateAsync(volume, destPool.getId(), templ);
}
}
@@ -1966,13 +2123,18 @@ public void prepare(VirtualMachineProfile vm, DeployDestination dest) throws Sto
throw new CloudRuntimeException(msg);
}
- // don't allow to start vm that doesn't have a root volume
if (_volsDao.findByInstanceAndType(vm.getId(), Volume.Type.ROOT).isEmpty()) {
throw new CloudRuntimeException(String.format("ROOT volume is missing, unable to prepare volumes for the VM [%s].", vm.getVirtualMachine()));
}
List vols = _volsDao.findUsableVolumesForInstance(vm.getId());
+ VirtualMachine vmInstance = vm.getVirtualMachine();
+ VMInstanceVO vmInstanceVO = vmInstanceDao.findById(vmInstance.getId());
+ if (vmInstance.getState() == State.Starting && dest.getHost() != null) {
+ transferClvmLocksForVmStart(vols, dest.getHost().getId(), vmInstanceVO);
+ }
+
List tasks = getTasks(vols, dest.getStorageForDisks(), vm);
Volume vol = null;
PrimaryDataStore store;
diff --git a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml
index 17c5002c718b..28134f415f1b 100644
--- a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml
+++ b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml
@@ -44,6 +44,8 @@
value="#{storagePoolAllocatorsRegistry.registered}" />
+
+
(), destHostId, vmInstance);
+
+ Mockito.verify(clvmLockManager, Mockito.never()).getClvmLockHostId(Mockito.anyLong(), Mockito.anyString());
+ }
+
+ @Test
+ public void testTransferClvmLocksForVmStart_NullPoolId() throws Exception {
+ Long destHostId = 2L;
+
+ VolumeVO volumeWithoutPool = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeWithoutPool.getPoolId()).thenReturn(null);
+
+ VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
+
+ ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
+ setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
+ setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
+
+ Method method = VolumeOrchestrator.class.getDeclaredMethod(
+ "transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
+ method.setAccessible(true);
+
+ method.invoke(volumeOrchestrator, List.of(volumeWithoutPool), destHostId, vmInstance);
+
+ Mockito.verify(storagePoolDao, Mockito.never()).findById(Mockito.anyLong());
+ }
+
+ @Test
+ public void testTransferClvmLocksForVmStart_SetInitialLockHost() throws Exception {
+ Long destHostId = 2L;
+ Long poolId = 10L;
+
+ VolumeVO clvmVolume = Mockito.mock(VolumeVO.class);
+ Mockito.when(clvmVolume.getId()).thenReturn(101L);
+ Mockito.when(clvmVolume.getPoolId()).thenReturn(poolId);
+
+ StoragePoolVO clvmPool = Mockito.mock(StoragePoolVO.class);
+ Mockito.when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+
+ VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
+
+ ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
+ Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(101L), ArgumentMatchers.nullable(String.class))).thenReturn(null);
+
+ Mockito.when(storagePoolDao.findById(poolId)).thenReturn(clvmPool);
+
+ setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
+ setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
+
+ Method method = VolumeOrchestrator.class.getDeclaredMethod(
+ "transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
+ method.setAccessible(true);
+
+ method.invoke(volumeOrchestrator, List.of(clvmVolume), destHostId, vmInstance);
+
+ Mockito.verify(clvmLockManager, Mockito.times(1)).setClvmLockHostId(101L, destHostId);
+ Mockito.verify(clvmLockManager, Mockito.never()).transferClvmVolumeLock(
+ Mockito.anyString(), Mockito.anyLong(), Mockito.anyString(),
+ Mockito.any(), Mockito.anyLong(), Mockito.anyLong());
+ }
+
+ @Test
+ public void testTransferClvmLocksForVmStart_MixedVolumes() throws Exception {
+ Long destHostId = 2L;
+ Long currentHostId = 1L;
+ Long clvmPoolId = 10L;
+ Long nfsPoolId = 20L;
+
+ VolumeVO clvmVolume = Mockito.mock(VolumeVO.class);
+ Mockito.when(clvmVolume.getId()).thenReturn(101L);
+ Mockito.when(clvmVolume.getPoolId()).thenReturn(clvmPoolId);
+ Mockito.when(clvmVolume.getUuid()).thenReturn("clvm-vol-uuid");
+ Mockito.when(clvmVolume.getPath()).thenReturn("clvm-vol-path");
+
+ VolumeVO nfsVolume = Mockito.mock(VolumeVO.class);
+ Mockito.when(nfsVolume.getPoolId()).thenReturn(nfsPoolId);
+
+ StoragePoolVO clvmPool = Mockito.mock(StoragePoolVO.class);
+ Mockito.when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+
+ StoragePoolVO nfsPool = Mockito.mock(StoragePoolVO.class);
+ Mockito.when(nfsPool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+
+ VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
+ Mockito.when(vmInstance.getInstanceName()).thenReturn(MOCK_VM_NAME);
+
+ ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
+ Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(101L), Mockito.anyString())).thenReturn(currentHostId);
+ Mockito.when(clvmLockManager.transferClvmVolumeLock(Mockito.anyString(), Mockito.anyLong(),
+ Mockito.anyString(), Mockito.any(), Mockito.anyLong(), Mockito.anyLong())).thenReturn(true);
+
+ Mockito.when(storagePoolDao.findById(clvmPoolId)).thenReturn(clvmPool);
+ Mockito.when(storagePoolDao.findById(nfsPoolId)).thenReturn(nfsPool);
+
+ setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
+ setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
+
+ Method method = VolumeOrchestrator.class.getDeclaredMethod(
+ "transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
+ method.setAccessible(true);
+
+ method.invoke(volumeOrchestrator, List.of(clvmVolume, nfsVolume), destHostId, vmInstance);
+
+ Mockito.verify(clvmLockManager, Mockito.times(1)).transferClvmVolumeLock(
+ Mockito.eq("clvm-vol-uuid"), Mockito.eq(101L), Mockito.eq("clvm-vol-path"),
+ Mockito.eq(clvmPool), Mockito.eq(currentHostId), Mockito.eq(destHostId));
+ }
+
+ @Test(expected = CloudRuntimeException.class)
+ public void testTransferClvmLocksForVmStart_TransferFails() throws Throwable {
+ Long destHostId = 2L;
+ Long currentHostId = 1L;
+ Long poolId = 10L;
+
+ VolumeVO clvmVolume = Mockito.mock(VolumeVO.class);
+ Mockito.when(clvmVolume.getId()).thenReturn(101L);
+ Mockito.when(clvmVolume.getPoolId()).thenReturn(poolId);
+ Mockito.when(clvmVolume.getUuid()).thenReturn("vol-uuid");
+ Mockito.when(clvmVolume.getPath()).thenReturn("vol-path");
+
+ StoragePoolVO clvmPool = Mockito.mock(StoragePoolVO.class);
+ Mockito.when(clvmPool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+
+ VMInstanceVO vmInstance = Mockito.mock(VMInstanceVO.class);
+ Mockito.when(vmInstance.getInstanceName()).thenReturn(MOCK_VM_NAME);
+
+ ClvmLockManager clvmLockManager = Mockito.mock(ClvmLockManager.class);
+ Mockito.when(clvmLockManager.getClvmLockHostId(Mockito.eq(101L), Mockito.anyString())).thenReturn(currentHostId);
+ Mockito.when(clvmLockManager.transferClvmVolumeLock(Mockito.anyString(), Mockito.anyLong(),
+ Mockito.anyString(), Mockito.any(), Mockito.anyLong(), Mockito.anyLong())).thenReturn(false);
+
+ Mockito.when(storagePoolDao.findById(poolId)).thenReturn(clvmPool);
+
+ setField(volumeOrchestrator, "clvmLockManager", clvmLockManager);
+ setField(volumeOrchestrator, "_storagePoolDao", storagePoolDao);
+
+ Method method = VolumeOrchestrator.class.getDeclaredMethod(
+ "transferClvmLocksForVmStart", List.class, Long.class, VMInstanceVO.class);
+ method.setAccessible(true);
+
+ try {
+ method.invoke(volumeOrchestrator, List.of(clvmVolume), destHostId, vmInstance);
+ } catch (InvocationTargetException e) {
+ throw e.getCause();
+ }
+ }
+
+ private void setField(Object target, String fieldName, Object value) throws Exception {
+ Field field = findField(target.getClass(), fieldName);
+ if (field == null) {
+ throw new NoSuchFieldException("Field " + fieldName + " not found in " + target.getClass());
+ }
+ field.setAccessible(true);
+ field.set(target, value);
+ }
+
+ private Field findField(Class> clazz, String fieldName) {
+ Class> current = clazz;
+ while (current != null && current != Object.class) {
+ try {
+ return current.getDeclaredField(fieldName);
+ } catch (NoSuchFieldException e) {
+ current = current.getSuperclass();
+ }
+ }
+ return null;
+ }
+
}
diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java
index bbb2b4f3a88e..f5daf673d3c8 100644
--- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java
+++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDaoImpl.java
@@ -151,6 +151,7 @@ public boolean configure(String name, Map params) throws Configu
idStateNinSearch.and(STATE, idStateNinSearch.entity().getState(), SearchCriteria.Op.NOTIN);
idStateNinSearch.done();
+
snapshotVOSearch = snapshotDao.createSearchBuilder();
snapshotVOSearch.and(VOLUME_ID, snapshotVOSearch.entity().getVolumeId(), SearchCriteria.Op.EQ);
snapshotVOSearch.done();
diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java
index 947b4af8f690..867470dac040 100644
--- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java
+++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageDataMotionStrategy.java
@@ -144,12 +144,16 @@ protected boolean isDestinationNfsPrimaryStorageClusterWide(Map volumeDataStoreMap, VirtualMach
setVolumeMigrationOptions(srcVolumeInfo, destVolumeInfo, vmTO, srcHost, destStoragePool, migrationType);
+ if (ClvmLockManager.isClvmPoolType(destStoragePool.getPoolType())) {
+ destVolumeInfo.setDestinationHostId(destHost.getId());
+ clvmLockManager.setClvmLockHostId(destVolume.getId(), destHost.getId());
+ logger.info("Set CLVM lock host {} for volume {} during migration to ensure creation on destination host",
+ destHost.getId(), destVolumeInfo.getUuid());
+ }
+
// create a volume on the destination storage
destDataStore.getDriver().createAsync(destDataStore, destVolumeInfo, null);
@@ -2096,7 +2107,7 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach
MigrateCommand.MigrateDiskInfo migrateDiskInfo;
- boolean isNonManagedToNfs = supportStoragePoolType(sourceStoragePool.getPoolType(), StoragePoolType.Filesystem) && destStoragePool.getPoolType() == StoragePoolType.NetworkFilesystem && !managedStorageDestination;
+ boolean isNonManagedToNfs = supportStoragePoolType(sourceStoragePool.getPoolType(), StoragePoolType.Filesystem, StoragePoolType.CLVM, StoragePoolType.CLVM_NG) && destStoragePool.getPoolType() == StoragePoolType.NetworkFilesystem && !managedStorageDestination;
if (isNonManagedToNfs) {
migrateDiskInfo = new MigrateCommand.MigrateDiskInfo(srcVolumeInfo.getPath(),
MigrateCommand.MigrateDiskInfo.DiskType.FILE,
@@ -2106,9 +2117,12 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach
} else {
String backingPath = generateBackingPath(destStoragePool, destVolumeInfo);
migrateDiskInfo = configureMigrateDiskInfo(srcVolumeInfo, destPath, backingPath);
+ migrateDiskInfo = updateMigrateDiskInfoForBlockDevice(migrateDiskInfo, destStoragePool);
migrateDiskInfo.setSourceDiskOnStorageFileSystem(isStoragePoolTypeOfFile(sourceStoragePool));
migrateDiskInfoList.add(migrateDiskInfo);
}
+ migrateDiskInfo.setSourcePoolType(sourceStoragePool.getPoolType());
+ migrateDiskInfo.setDestPoolType(destVolumeInfo.getStoragePoolType());
prepareDiskWithSecretConsumerDetail(vmTO, srcVolumeInfo, destVolumeInfo.getPath());
migrateStorage.put(srcVolumeInfo.getPath(), migrateDiskInfo);
@@ -2116,6 +2130,8 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach
srcVolumeInfoToDestVolumeInfo.put(srcVolumeInfo, destVolumeInfo);
}
+ prepareDisksForMigrationForClvm(vmTO, volumeDataStoreMap, srcHost);
+
PrepareForMigrationCommand pfmc = new PrepareForMigrationCommand(vmTO);
Answer pfma;
@@ -2211,10 +2227,42 @@ public void copyAsync(Map volumeDataStoreMap, VirtualMach
}
}
+ private void prepareDisksForMigrationForClvm(VirtualMachineTO vmTO, Map volumeDataStoreMap, Host srcHost) {
+ // For CLVM/CLVM_NG source pools, convert volumes from exclusive to shared mode
+ // on the source host BEFORE PrepareForMigrationCommand on the destination.
+ boolean hasClvmSource = volumeDataStoreMap.keySet().stream()
+ .map(v -> _storagePoolDao.findById(v.getPoolId()))
+ .anyMatch(p -> p != null && (p.getPoolType() == StoragePoolType.CLVM || p.getPoolType() == StoragePoolType.CLVM_NG));
+
+ if (hasClvmSource) {
+ logger.info("CLVM/CLVM_NG source pool detected for VM [{}], sending PreMigrationCommand to source host [{}] to convert volumes to shared mode.", vmTO.getName(), srcHost.getId());
+ PreMigrationCommand preMigCmd = new PreMigrationCommand(vmTO, vmTO.getName());
+ try {
+ Answer preMigAnswer = agentManager.send(srcHost.getId(), preMigCmd);
+ if (preMigAnswer == null || !preMigAnswer.getResult()) {
+ String details = preMigAnswer != null ? preMigAnswer.getDetails() : "null answer returned";
+ logger.warn("PreMigrationCommand failed for CLVM/CLVM_NG VM [{}] on source host [{}]: {}. Migration will continue but may fail if volumes are exclusively locked.", vmTO.getName(), srcHost.getId(), details);
+ } else {
+ logger.info("Successfully converted CLVM/CLVM_NG volumes to shared mode on source host [{}] for VM [{}].", srcHost.getId(), vmTO.getName());
+ }
+ } catch (Exception e) {
+ logger.warn("Failed to send PreMigrationCommand to source host [{}] for VM [{}]: {}. Migration will continue but may fail if volumes are exclusively locked.", srcHost.getId(), vmTO.getName(), e.getMessage());
+ }
+ }
+ }
+
private MigrationOptions.Type decideMigrationTypeAndCopyTemplateIfNeeded(Host destHost, VMInstanceVO vmInstance, VolumeInfo srcVolumeInfo, StoragePoolVO sourceStoragePool, StoragePoolVO destStoragePool, DataStore destDataStore) {
VMTemplateVO vmTemplate = _vmTemplateDao.findById(vmInstance.getTemplateId());
String srcVolumeBackingFile = getVolumeBackingFile(srcVolumeInfo);
+
+ // Check if source is CLVM/CLVM_NG (block device storage)
+ // LinkedClone (VIR_MIGRATE_NON_SHARED_INC) only works for file → file migrations
+ // For block device sources, use FullClone (VIR_MIGRATE_NON_SHARED_DISK)
+ boolean sourceIsBlockDevice = sourceStoragePool.getPoolType() == StoragePoolType.CLVM ||
+ sourceStoragePool.getPoolType() == StoragePoolType.CLVM_NG;
+
if (StringUtils.isNotBlank(srcVolumeBackingFile) && supportStoragePoolType(destStoragePool.getPoolType(), StoragePoolType.Filesystem) &&
+ !sourceIsBlockDevice &&
srcVolumeInfo.getTemplateId() != null &&
Objects.nonNull(vmTemplate) &&
!Arrays.asList(KVM_VM_IMPORT_DEFAULT_TEMPLATE_NAME, VM_IMPORT_DEFAULT_TEMPLATE_NAME).contains(vmTemplate.getName())) {
@@ -2222,8 +2270,12 @@ private MigrationOptions.Type decideMigrationTypeAndCopyTemplateIfNeeded(Host de
copyTemplateToTargetFilesystemStorageIfNeeded(srcVolumeInfo, sourceStoragePool, destDataStore, destStoragePool, destHost);
return MigrationOptions.Type.LinkedClone;
}
- logger.debug(String.format("Skipping copy template from source storage pool [%s] to target storage pool [%s] before migration due to volume [%s] does not have a " +
- "template or we are doing full clone migration.", sourceStoragePool.getId(), destStoragePool.getId(), srcVolumeInfo.getId()));
+
+ if (sourceIsBlockDevice) {
+ logger.debug(String.format("Source storage pool [%s] is block device (CLVM/CLVM_NG). Using FullClone migration for volume [%s] to target storage pool [%s]. Template copy skipped as entire volume will be copied.", sourceStoragePool.getId(), srcVolumeInfo.getId(), destStoragePool.getId()));
+ } else {
+ logger.debug(String.format("Skipping copy template from source storage pool [%s] to target storage pool [%s] before migration due to volume [%s] does not have a template or we are doing full clone migration.", sourceStoragePool.getId(), destStoragePool.getId(), srcVolumeInfo.getId()));
+ }
return MigrationOptions.Type.FullClone;
}
@@ -2289,6 +2341,39 @@ protected MigrateCommand.MigrateDiskInfo configureMigrateDiskInfo(VolumeInfo src
MigrateCommand.MigrateDiskInfo.Source.DEV, destPath, backingPath);
}
+ /**
+ * UpdatesMigrateDiskInfo for CLVM/CLVM_NG block devices by returning a new instance with corrected disk type, driver type, and source.
+ * For CLVM/CLVM_NG destinations, returns a new MigrateDiskInfo with BLOCK disk type, DEV source, and appropriate driver type (QCOW2 for CLVM_NG, RAW for CLVM).
+ * For other storage types, returns the original MigrateDiskInfo unchanged.
+ *
+ * @param migrateDiskInfo The original MigrateDiskInfo object
+ * @param destStoragePool The destination storage pool
+ * @return A new MigrateDiskInfo with updated values for CLVM/CLVM_NG, or the original for other storage types
+ */
+ protected MigrateCommand.MigrateDiskInfo updateMigrateDiskInfoForBlockDevice(MigrateCommand.MigrateDiskInfo migrateDiskInfo,
+ StoragePoolVO destStoragePool) {
+ if (ClvmLockManager.isClvmPoolType(destStoragePool.getPoolType())) {
+
+ MigrateCommand.MigrateDiskInfo.DriverType driverType =
+ (destStoragePool.getPoolType() == StoragePoolType.CLVM_NG) ?
+ MigrateCommand.MigrateDiskInfo.DriverType.QCOW2 :
+ MigrateCommand.MigrateDiskInfo.DriverType.RAW;
+
+ logger.debug("Updating MigrateDiskInfo for {} destination: setting BLOCK disk type, DEV source, and {} driver type",
+ destStoragePool.getPoolType(), driverType);
+
+ return new MigrateCommand.MigrateDiskInfo(
+ migrateDiskInfo.getSerialNumber(),
+ MigrateCommand.MigrateDiskInfo.DiskType.BLOCK,
+ driverType,
+ MigrateCommand.MigrateDiskInfo.Source.DEV,
+ migrateDiskInfo.getSourceText(),
+ migrateDiskInfo.getBackingStoreText());
+ }
+
+ return migrateDiskInfo;
+ }
+
/**
* Sets the volume path as the iScsi name in case of a configured iScsi.
*/
diff --git a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java
index 808c319b40f2..6f0776b27c8c 100644
--- a/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java
+++ b/engine/storage/datamotion/src/test/java/org/apache/cloudstack/storage/motion/KvmNonManagedStorageSystemDataMotionTest.java
@@ -168,6 +168,8 @@ public Boolean supportStoragePoolType(StoragePoolType storagePoolType) {
supportedTypes.add(StoragePoolType.Filesystem);
supportedTypes.add(StoragePoolType.NetworkFilesystem);
supportedTypes.add(StoragePoolType.SharedMountPoint);
+ supportedTypes.add(StoragePoolType.CLVM);
+ supportedTypes.add(StoragePoolType.CLVM_NG);
return supportedTypes.contains(storagePoolType);
}
@@ -505,6 +507,8 @@ public void validateSupportStoragePoolType() {
supportedTypes.add(StoragePoolType.Filesystem);
supportedTypes.add(StoragePoolType.NetworkFilesystem);
supportedTypes.add(StoragePoolType.SharedMountPoint);
+ supportedTypes.add(StoragePoolType.CLVM);
+ supportedTypes.add(StoragePoolType.CLVM_NG);
for (StoragePoolType poolType : StoragePoolType.values()) {
boolean isSupported = kvmNonManagedStorageDataMotionStrategy.supportStoragePoolType(poolType);
diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java
index 7174336113b5..e02d17fe19b7 100644
--- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java
+++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategy.java
@@ -60,6 +60,7 @@
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.ClvmLockManager;
import com.cloud.storage.CreateSnapshotPayload;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Snapshot;
@@ -643,6 +644,10 @@ public StrategyPriority canHandle(Snapshot snapshot, Long zoneId, SnapshotOperat
return StrategyPriority.DEFAULT;
}
+ if (isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO)) {
+ return StrategyPriority.DEFAULT;
+ }
+
return StrategyPriority.CANT_HANDLE;
}
if (zoneId != null && SnapshotOperation.DELETE.equals(op)) {
@@ -691,4 +696,32 @@ protected boolean isSnapshotStoredOnSameZoneStoreForQCOW2Volume(Snapshot snapsho
dataStoreMgr.getStoreZoneId(s.getDataStoreId(), s.getRole()), volumeVO.getDataCenterId()));
}
+ /**
+ * Checks if a CLVM volume snapshot is stored on secondary storage in the same zone.
+ * CLVM snapshots are backed up to secondary storage and removed from primary storage.
+ */
+ protected boolean isSnapshotStoredOnSecondaryForCLVMVolume(Snapshot snapshot, VolumeVO volumeVO) {
+ if (volumeVO == null) {
+ return false;
+ }
+
+ Long poolId = volumeVO.getPoolId();
+ if (poolId == null) {
+ return false;
+ }
+
+ StoragePool pool = (StoragePool) dataStoreMgr.getDataStore(poolId, DataStoreRole.Primary);
+ if (pool == null || !ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ return false;
+ }
+
+ List snapshotStores = snapshotStoreDao.listReadyBySnapshot(snapshot.getId(), DataStoreRole.Image);
+ if (CollectionUtils.isEmpty(snapshotStores)) {
+ return false;
+ }
+
+ return snapshotStores.stream().anyMatch(s -> Objects.equals(
+ dataStoreMgr.getStoreZoneId(s.getDataStoreId(), s.getRole()), volumeVO.getDataCenterId()));
+ }
+
}
diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
index b71d6cf3afac..665c3a4659ca 100644
--- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
+++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java
@@ -27,6 +27,7 @@
import com.cloud.hypervisor.Hypervisor;
import com.cloud.storage.Snapshot;
+import com.cloud.storage.Storage;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.vm.snapshot.VMSnapshotDetailsVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDetailsDao;
@@ -468,6 +469,13 @@ public boolean revertVMSnapshot(VMSnapshot vmSnapshot) {
@Override
public StrategyPriority canHandle(VMSnapshot vmSnapshot) {
+ UserVmVO vm = userVmDao.findById(vmSnapshot.getVmId());
+ String cantHandleLog = String.format("Default VM snapshot cannot handle VM snapshot for [%s]", vm);
+
+ if (isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+
return StrategyPriority.DEFAULT;
}
@@ -493,10 +501,31 @@ public boolean deleteVMSnapshotFromDB(VMSnapshot vmSnapshot, boolean unmanage) {
return vmSnapshotDao.remove(vmSnapshot.getId());
}
+ protected boolean isRunningVMVolumeOnCLVMStorage(UserVmVO vm, String cantHandleLog) {
+ Long vmId = vm.getId();
+ if (State.Running.equals(vm.getState())) {
+ List volumes = volumeDao.findByInstance(vmId);
+ for (VolumeVO volume : volumes) {
+ StoragePool pool = primaryDataStoreDao.findById(volume.getPoolId());
+ if (pool != null && pool.getPoolType() == Storage.StoragePoolType.CLVM) {
+ logger.warn("Rejecting VM snapshot request: {} - VM is running on CLVM storage (pool: {}, poolType: CLVM)",
+ cantHandleLog, pool.getName());
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
@Override
public StrategyPriority canHandle(Long vmId, Long rootPoolId, boolean snapshotMemory) {
UserVmVO vm = userVmDao.findById(vmId);
String cantHandleLog = String.format("Default VM snapshot cannot handle VM snapshot for [%s]", vm);
+
+ if (isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+
if (State.Running.equals(vm.getState()) && !snapshotMemory) {
logger.debug("{} as it is running and its memory will not be affected.", cantHandleLog, vm);
return StrategyPriority.CANT_HANDLE;
diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java
index e3f28a7012c2..7a9cb4601543 100644
--- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java
+++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/vmsnapshot/StorageVMSnapshotStrategy.java
@@ -345,6 +345,13 @@ public StrategyPriority canHandle(VMSnapshot vmSnapshot) {
}
}
+ Long vmId = vmSnapshot.getVmId();
+ UserVmVO vm = userVmDao.findById(vmId);
+ String cantHandleLog = String.format("Storage VM snapshot strategy cannot handle VM snapshot for [%s]", vm);
+ if (vm != null && isRunningVMVolumeOnCLVMStorage(vm, cantHandleLog)) {
+ return StrategyPriority.CANT_HANDLE;
+ }
+
if ( SnapshotManager.VmStorageSnapshotKvm.value() && userVm.getHypervisorType() == Hypervisor.HypervisorType.KVM
&& vmSnapshot.getType() == VMSnapshot.Type.Disk) {
return StrategyPriority.HYPERVISOR;
diff --git a/engine/storage/snapshot/src/test/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategyTest.java b/engine/storage/snapshot/src/test/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategyTest.java
index 53f98c18f1be..d08e62dd92b5 100644
--- a/engine/storage/snapshot/src/test/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategyTest.java
+++ b/engine/storage/snapshot/src/test/java/org/apache/cloudstack/storage/snapshot/DefaultSnapshotStrategyTest.java
@@ -21,6 +21,7 @@
import java.util.List;
import com.cloud.hypervisor.Hypervisor;
+import com.cloud.storage.StoragePool;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine;
@@ -327,4 +328,236 @@ public void testIsSnapshotStoredOnSameZoneStoreForQCOW2VolumeHasRef() {
prepareMocksForIsSnapshotStoredOnSameZoneStoreForQCOW2VolumeTest(100L);
Assert.assertTrue(defaultSnapshotStrategySpy.isSnapshotStoredOnSameZoneStoreForQCOW2Volume(snapshot, volumeVO));
}
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_NullVolume() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, null));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_NullPoolId() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(null);
+
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_NullPool() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn(null);
+
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_NonCLVMPool() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_RBDPool() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.RBD);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolNoSnapshotStores() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ Mockito.when(snapshot.getId()).thenReturn(1L);
+
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image)).thenReturn(new ArrayList<>());
+
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolSnapshotInDifferentZone() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ Mockito.when(snapshot.getId()).thenReturn(1L);
+
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+ Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ SnapshotDataStoreVO snapshotStore1 = Mockito.mock(SnapshotDataStoreVO.class);
+ Mockito.when(snapshotStore1.getDataStoreId()).thenReturn(201L);
+ Mockito.when(snapshotStore1.getRole()).thenReturn(DataStoreRole.Image);
+
+ SnapshotDataStoreVO snapshotStore2 = Mockito.mock(SnapshotDataStoreVO.class);
+ Mockito.when(snapshotStore2.getDataStoreId()).thenReturn(202L);
+ Mockito.when(snapshotStore2.getRole()).thenReturn(DataStoreRole.Image);
+
+ Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
+ .thenReturn(List.of(snapshotStore1, snapshotStore2));
+
+ Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(111L);
+ Mockito.when(dataStoreManager.getStoreZoneId(202L, DataStoreRole.Image)).thenReturn(112L);
+
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolSnapshotInSameZone() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ Mockito.when(snapshot.getId()).thenReturn(1L);
+
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+ Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ SnapshotDataStoreVO snapshotStore = Mockito.mock(SnapshotDataStoreVO.class);
+ Mockito.when(snapshotStore.getDataStoreId()).thenReturn(201L);
+ Mockito.when(snapshotStore.getRole()).thenReturn(DataStoreRole.Image);
+
+ Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
+ .thenReturn(List.of(snapshotStore));
+
+ Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(100L);
+
+ Assert.assertTrue(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolMultipleSnapshotsOneMatches() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ Mockito.when(snapshot.getId()).thenReturn(1L);
+
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+ Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ SnapshotDataStoreVO snapshotStore1 = Mockito.mock(SnapshotDataStoreVO.class);
+ Mockito.when(snapshotStore1.getDataStoreId()).thenReturn(201L);
+ Mockito.when(snapshotStore1.getRole()).thenReturn(DataStoreRole.Image);
+
+ SnapshotDataStoreVO snapshotStore2 = Mockito.mock(SnapshotDataStoreVO.class);
+ Mockito.when(snapshotStore2.getDataStoreId()).thenReturn(202L);
+ Mockito.when(snapshotStore2.getRole()).thenReturn(DataStoreRole.Image);
+
+ SnapshotDataStoreVO snapshotStore3 = Mockito.mock(SnapshotDataStoreVO.class);
+
+ Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
+ .thenReturn(List.of(snapshotStore1, snapshotStore2, snapshotStore3));
+
+ Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(111L);
+ Mockito.when(dataStoreManager.getStoreZoneId(202L, DataStoreRole.Image)).thenReturn(100L);
+
+ Assert.assertTrue(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolNullZoneIds() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ Mockito.when(snapshot.getId()).thenReturn(1L);
+
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+ Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ SnapshotDataStoreVO snapshotStore = Mockito.mock(SnapshotDataStoreVO.class);
+ Mockito.when(snapshotStore.getDataStoreId()).thenReturn(201L);
+ Mockito.when(snapshotStore.getRole()).thenReturn(DataStoreRole.Image);
+
+ Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
+ .thenReturn(List.of(snapshotStore));
+
+ Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(null);
+
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolVolumeNullDataCenter() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ Mockito.when(snapshot.getId()).thenReturn(1L);
+
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+ Mockito.when(volumeVO.getDataCenterId()).thenReturn(1L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ SnapshotDataStoreVO snapshotStore = Mockito.mock(SnapshotDataStoreVO.class);
+ Mockito.when(snapshotStore.getDataStoreId()).thenReturn(201L);
+ Mockito.when(snapshotStore.getRole()).thenReturn(DataStoreRole.Image);
+
+ Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
+ .thenReturn(List.of(snapshotStore));
+
+ Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(100L);
+
+ Assert.assertFalse(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
+
+ @Test
+ public void testIsSnapshotStoredOnSecondaryForCLVMVolume_CLVMPoolMultipleSnapshotsAllInSameZone() {
+ Snapshot snapshot = Mockito.mock(Snapshot.class);
+ Mockito.when(snapshot.getId()).thenReturn(1L);
+
+ VolumeVO volumeVO = Mockito.mock(VolumeVO.class);
+ Mockito.when(volumeVO.getPoolId()).thenReturn(10L);
+ Mockito.when(volumeVO.getDataCenterId()).thenReturn(100L);
+
+ StoragePool pool = Mockito.mock(StoragePool.class, Mockito.withSettings().extraInterfaces(DataStore.class));
+ Mockito.when(pool.getPoolType()).thenReturn(Storage.StoragePoolType.CLVM);
+ Mockito.when(dataStoreManager.getDataStore(10L, DataStoreRole.Primary)).thenReturn((DataStore) pool);
+
+ SnapshotDataStoreVO snapshotStore1 = Mockito.mock(SnapshotDataStoreVO.class);
+ Mockito.when(snapshotStore1.getDataStoreId()).thenReturn(201L);
+ Mockito.when(snapshotStore1.getRole()).thenReturn(DataStoreRole.Image);
+
+ SnapshotDataStoreVO snapshotStore2 = Mockito.mock(SnapshotDataStoreVO.class);
+
+ Mockito.when(snapshotDataStoreDao.listReadyBySnapshot(1L, DataStoreRole.Image))
+ .thenReturn(List.of(snapshotStore1, snapshotStore2));
+
+ Mockito.when(dataStoreManager.getStoreZoneId(201L, DataStoreRole.Image)).thenReturn(100L);
+
+ Assert.assertTrue(defaultSnapshotStrategySpy.isSnapshotStoredOnSecondaryForCLVMVolume(snapshot, volumeVO));
+ }
}
diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
index 061d18dc3769..fc2f263ee85f 100644
--- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
+++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
@@ -32,8 +32,12 @@
import com.cloud.dc.DedicatedResourceVO;
import com.cloud.dc.dao.DedicatedResourceDao;
+import com.cloud.storage.ClvmLockManager;
+import com.cloud.storage.VolumeDetailVO;
+import com.cloud.storage.dao.VolumeDetailsDao;
import com.cloud.user.Account;
import com.cloud.utils.Pair;
+import com.cloud.utils.db.QueryBuilder;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
@@ -46,6 +50,7 @@
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
import org.apache.cloudstack.storage.LocalHostEndpoint;
import org.apache.cloudstack.storage.RemoteHostEndPoint;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.LogManager;
import org.springframework.stereotype.Component;
@@ -59,8 +64,8 @@
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage.TemplateType;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import com.cloud.utils.db.DB;
-import com.cloud.utils.db.QueryBuilder;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.exception.CloudRuntimeException;
@@ -75,6 +80,10 @@ public class DefaultEndPointSelector implements EndPointSelector {
private HostDao hostDao;
@Inject
private DedicatedResourceDao dedicatedResourceDao;
+ @Inject
+ private PrimaryDataStoreDao _storagePoolDao;
+ @Inject
+ private VolumeDetailsDao _volDetailsDao;
private static final String VOL_ENCRYPT_COLUMN_NAME = "volume_encryption_support";
private final String findOneHostOnPrimaryStorage = "select t.id from "
@@ -264,6 +273,14 @@ public EndPoint select(DataObject srcData, DataObject destData) {
@Override
public EndPoint select(DataObject srcData, DataObject destData, boolean volumeEncryptionSupportRequired) {
+ if (destData instanceof VolumeInfo) {
+ EndPoint clvmEndpoint = selectClvmEndpointIfApplicable((VolumeInfo) destData, "template-to-volume copy");
+ if (clvmEndpoint != null) {
+ return clvmEndpoint;
+ }
+ }
+
+ // Default behavior for non-CLVM or when no destination host is set
DataStore srcStore = srcData.getDataStore();
DataStore destStore = destData.getDataStore();
if (moveBetweenPrimaryImage(srcStore, destStore)) {
@@ -388,18 +405,89 @@ private List listUpAndConnectingSecondaryStorageVmHost(Long dcId) {
return sc.list();
}
+ /**
+ * Selects endpoint for CLVM volumes with destination host hint.
+ * This ensures volumes are created on the correct host with exclusive locks.
+ *
+ * @param volume The volume to check for CLVM routing
+ * @param operation Description of the operation (for logging)
+ * @return EndPoint for the destination host if CLVM routing applies, null otherwise
+ */
+ private EndPoint selectClvmEndpointIfApplicable(VolumeInfo volume, String operation) {
+ DataStore store = volume.getDataStore();
+
+ if (store.getRole() != DataStoreRole.Primary) {
+ return null;
+ }
+
+ // Check if this is a CLVM pool
+ StoragePoolVO pool = _storagePoolDao.findById(store.getId());
+ if (pool == null || !ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ return null;
+ }
+
+ // Check if destination host hint is set
+ Long destHostId = volume.getDestinationHostId();
+ if (destHostId == null) {
+ return null;
+ }
+
+ logger.info("CLVM {}: routing volume {} to destination host {} for optimal exclusive lock placement",
+ operation, volume.getUuid(), destHostId);
+
+ EndPoint ep = getEndPointFromHostId(destHostId);
+ if (ep != null) {
+ return ep;
+ }
+
+ logger.warn("Could not get endpoint for destination host {}, falling back to default selection", destHostId);
+ return null;
+ }
+
@Override
public EndPoint select(DataObject object, boolean encryptionSupportRequired) {
DataStore store = object.getDataStore();
+
+ // This ensures volumes are created on the correct host with exclusive locks
+ if (object instanceof VolumeInfo && store.getRole() == DataStoreRole.Primary) {
+ VolumeInfo volInfo = (VolumeInfo) object;
+ EndPoint clvmEndpoint = selectClvmEndpointIfApplicable(volInfo, "volume creation");
+ if (clvmEndpoint != null) {
+ return clvmEndpoint;
+ }
+ }
+
+ // Default behavior for non-CLVM or when no destination host is set
if (store.getRole() == DataStoreRole.Primary) {
return findEndPointInScope(store.getScope(), findOneHostOnPrimaryStorage, store.getId(), encryptionSupportRequired);
}
throw new CloudRuntimeException(String.format("Storage role %s doesn't support encryption", store.getRole()));
}
+
@Override
public EndPoint select(DataObject object) {
DataStore store = object.getDataStore();
+
+ // For CLVM volumes, check if there's a lock host ID to route to
+ if (object instanceof VolumeInfo && store.getRole() == DataStoreRole.Primary) {
+ VolumeInfo volume = (VolumeInfo) object;
+ StoragePoolVO pool = _storagePoolDao.findById(store.getId());
+ if (pool != null && ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ Long lockHostId = getClvmLockHostId(volume);
+ if (lockHostId != null) {
+ logger.debug("Routing CLVM volume {} operation to lock holder host {}",
+ volume.getUuid(), lockHostId);
+ EndPoint ep = getEndPointFromHostId(lockHostId);
+ if (ep != null) {
+ return ep;
+ }
+ logger.warn("Could not get endpoint for CLVM lock host {}, falling back to default selection",
+ lockHostId);
+ }
+ }
+ }
+
EndPoint ep = select(store);
if (ep != null) {
return ep;
@@ -493,6 +581,31 @@ public EndPoint select(DataObject object, StorageAction action, boolean encrypti
}
case DELETEVOLUME: {
VolumeInfo volume = (VolumeInfo) object;
+
+ // For CLVM volumes, route to the host holding the exclusive lock
+ if (volume.getHypervisorType() == Hypervisor.HypervisorType.KVM) {
+ DataStore store = volume.getDataStore();
+ if (store.getRole() == DataStoreRole.Primary) {
+ StoragePoolVO pool = _storagePoolDao.findById(store.getId());
+ if (pool != null && ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ Long lockHostId = getClvmLockHostId(volume);
+ if (lockHostId != null) {
+ logger.info("Routing CLVM volume {} deletion to lock holder host {}",
+ volume.getUuid(), lockHostId);
+ EndPoint ep = getEndPointFromHostId(lockHostId);
+ if (ep != null) {
+ return ep;
+ }
+ logger.warn("Could not get endpoint for CLVM lock host {}, falling back to default selection",
+ lockHostId);
+ } else {
+ logger.debug("No CLVM lock host tracked for volume {}, using default endpoint selection",
+ volume.getUuid());
+ }
+ }
+ }
+ }
+
if (volume.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
VirtualMachine vm = volume.getAttachedVM();
if (vm != null) {
@@ -589,4 +702,24 @@ public List selectAll(DataStore store) {
}
return endPoints;
}
+
+ /**
+ * Retrieves the host ID that currently holds the exclusive lock on a CLVM volume.
+ * This is tracked in volume_details table for proper routing of delete operations.
+ *
+ * @param volume The CLVM volume
+ * @return Host ID holding the lock, or null if not tracked
+ */
+ private Long getClvmLockHostId(VolumeInfo volume) {
+ VolumeDetailVO detail = _volDetailsDao.findDetail(volume.getId(), VolumeInfo.CLVM_LOCK_HOST_ID);
+ if (detail != null && detail.getValue() != null && !detail.getValue().isEmpty()) {
+ try {
+ return Long.parseLong(detail.getValue());
+ } catch (NumberFormatException e) {
+ logger.warn("Invalid CLVM lock host ID in volume_details for volume {}: {}",
+ volume.getUuid(), detail.getValue());
+ }
+ }
+ return null;
+ }
}
diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java
index 7de9000782ec..4b8c9e3e77ab 100644
--- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java
+++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/datastore/provider/DefaultHostListener.java
@@ -37,12 +37,14 @@
import com.cloud.network.dao.NetworkVO;
import com.cloud.offerings.NetworkOfferingVO;
import com.cloud.offerings.dao.NetworkOfferingDao;
+import com.cloud.storage.ClvmLockManager;
import com.cloud.storage.DataStoreRole;
import com.cloud.storage.Storage;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StorageService;
+import com.cloud.storage.VolumeApiServiceImpl;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.exception.CloudRuntimeException;
@@ -139,6 +141,18 @@ public boolean hostConnect(long hostId, long poolId) throws StorageConflictExcep
Map nfsMountOpts = storageManager.getStoragePoolNFSMountOpts(pool, null).first();
Optional.ofNullable(nfsMountOpts).ifPresent(detailsMap::putAll);
+
+ // Propagate CLVM secure zero-fill setting to the host
+ // Note: This is done during host connection (agent start, MS restart, host reconnection)
+ // so the setting is non-dynamic. Changes require host reconnection to take effect.
+ if (ClvmLockManager.isClvmPoolType(pool.getPoolType())) {
+ Boolean clvmSecureZeroFill = VolumeApiServiceImpl.CLVMSecureZeroFill.valueIn(poolId);
+ if (clvmSecureZeroFill != null) {
+ detailsMap.put("clvmsecurezerofill", String.valueOf(clvmSecureZeroFill));
+ logger.debug("Added CLVM secure zero-fill setting: {} for storage pool: {}", clvmSecureZeroFill, pool);
+ }
+ }
+
ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, detailsMap);
cmd.setWait(modifyStoragePoolCommandWait);
HostVO host = hostDao.findById(hostId);
diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
index 43218b3f6a02..e8934d0ff712 100644
--- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
+++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java
@@ -126,6 +126,7 @@ public class VolumeObject implements VolumeInfo {
private boolean directDownload;
private String vSphereStoragePolicyId;
private boolean followRedirects;
+ private Long destinationHostId; // For CLVM: hints where volume should be created
private List checkpointPaths;
private Set checkpointImageStoreUrls;
@@ -361,6 +362,30 @@ public void setDirectDownload(boolean directDownload) {
this.directDownload = directDownload;
}
+ @Override
+ public Long getDestinationHostId() {
+ // If not in memory, try to load from database (volume_details table)
+ // For CLVM volumes, this uses the CLVM_LOCK_HOST_ID which serves dual purpose:
+ // 1. During creation: hints where to create the volume
+ // 2. After creation: tracks which host holds the exclusive lock
+ if (destinationHostId == null && volumeVO != null) {
+ VolumeDetailVO detail = volumeDetailsDao.findDetail(volumeVO.getId(), CLVM_LOCK_HOST_ID);
+ if (detail != null && detail.getValue() != null && !detail.getValue().isEmpty()) {
+ try {
+ destinationHostId = Long.parseLong(detail.getValue());
+ } catch (NumberFormatException e) {
+ logger.warn("Invalid CLVM lock host ID value in volume_details for volume {}: {}", volumeVO.getUuid(), detail.getValue());
+ }
+ }
+ }
+ return destinationHostId;
+ }
+
+ @Override
+ public void setDestinationHostId(Long hostId) {
+ this.destinationHostId = hostId;
+ }
+
public void update() {
volumeDao.update(volumeVO.getId(), volumeVO);
volumeVO = volumeDao.findById(volumeVO.getId());
diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
index 436f991afbd1..424f3f73afb2 100644
--- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
+++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java
@@ -32,6 +32,8 @@
import javax.inject.Inject;
+import com.cloud.storage.ClvmLockManager;
+import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.annotation.AnnotationService;
import org.apache.cloudstack.annotation.dao.AnnotationDao;
@@ -221,6 +223,8 @@ public class VolumeServiceImpl implements VolumeService {
private PassphraseDao passphraseDao;
@Inject
protected DiskOfferingDao diskOfferingDao;
+ @Inject
+ ClvmLockManager clvmLockManager;
public VolumeServiceImpl() {
}
@@ -2963,4 +2967,166 @@ public void moveVolumeOnSecondaryStorageToAnotherAccount(Volume volume, Account
protected String buildVolumePath(long accountId, long volumeId) {
return String.format("%s/%s/%s", TemplateConstants.DEFAULT_VOLUME_ROOT_DIR, accountId, volumeId);
}
+
+ @Override
+ public boolean transferVolumeLock(VolumeInfo volume, Long sourceHostId, Long destHostId) {
+ StoragePoolVO pool = storagePoolDao.findById(volume.getPoolId());
+ if (pool == null) {
+ logger.error("Cannot transfer volume lock for volume {}: storage pool not found", volume.getUuid());
+ return false;
+ }
+
+ logger.info("Transferring CLVM lock for volume {} (pool: {}) from host {} to host {}",
+ volume.getUuid(), pool.getName(), sourceHostId, destHostId);
+
+ return clvmLockManager.transferClvmVolumeLock(volume.getUuid(), volume.getId(), volume.getPath(),
+ pool, sourceHostId, destHostId);
+ }
+
+ @Override
+ public Long findVolumeLockHost(VolumeInfo volume) {
+ if (volume == null) {
+ logger.warn("Cannot find volume lock host: volume is null");
+ return null;
+ }
+
+ Long lockHostId = clvmLockManager.getClvmLockHostId(volume.getId(), volume.getUuid());
+ if (lockHostId != null) {
+ logger.debug("Found explicit lock host {} for volume {}", lockHostId, volume.getUuid());
+ return lockHostId;
+ }
+
+ Long instanceId = volume.getInstanceId();
+ if (instanceId != null) {
+ VMInstanceVO vmInstance = vmDao.findById(instanceId);
+ if (vmInstance != null && vmInstance.getHostId() != null) {
+ logger.debug("Volume {} is attached to VM {} on host {}",
+ volume.getUuid(), vmInstance.getUuid(), vmInstance.getHostId());
+ return vmInstance.getHostId();
+ }
+ }
+
+ StoragePoolVO pool = storagePoolDao.findById(volume.getPoolId());
+ if (pool != null && pool.getClusterId() != null) {
+ List hosts = _hostDao.findByClusterId(pool.getClusterId());
+ if (hosts != null && !hosts.isEmpty()) {
+ for (HostVO host : hosts) {
+ if (host.getStatus() == com.cloud.host.Status.Up) {
+ logger.debug("Using fallback: first UP host {} in cluster {} for volume {}",
+ host.getId(), pool.getClusterId(), volume.getUuid());
+ return host.getId();
+ }
+ }
+ }
+ }
+
+ logger.warn("Could not determine lock host for volume {}", volume.getUuid());
+ return null;
+ }
+
+ @Override
+ public VolumeInfo performLockMigration(VolumeInfo volume, Long destHostId) {
+ if (volume == null) {
+ throw new CloudRuntimeException("Cannot perform CLVM lock migration: volume is null");
+ }
+
+ String volumeUuid = volume.getUuid();
+ logger.info("Starting CLVM lock migration for volume {} (id: {}) to host {}",
+ volumeUuid, volume.getUuid(), destHostId);
+
+ Long sourceHostId = findVolumeLockHost(volume);
+ if (sourceHostId == null) {
+ logger.warn("Could not determine source host for CLVM volume {} lock, assuming volume is not exclusively locked",
+ volumeUuid);
+ sourceHostId = destHostId;
+ }
+
+ if (sourceHostId.equals(destHostId)) {
+ logger.info("CLVM volume {} already has lock on destination host {}, no migration needed",
+ volumeUuid, destHostId);
+ return volume;
+ }
+
+ logger.info("Migrating CLVM volume {} lock from host {} to host {}",
+ volumeUuid, sourceHostId, destHostId);
+
+ boolean success = transferVolumeLock(volume, sourceHostId, destHostId);
+ if (!success) {
+ throw new CloudRuntimeException(
+ String.format("Failed to transfer CLVM lock for volume %s from host %s to host %s",
+ volumeUuid, sourceHostId, destHostId));
+ }
+
+ logger.info("Successfully migrated CLVM volume {} lock from host {} to host {}",
+ volumeUuid, sourceHostId, destHostId);
+
+ return volFactory.getVolume(volume.getId());
+ }
+
+ @Override
+ public boolean areBothPoolsClvmType(StoragePoolType volumePoolType, StoragePoolType vmPoolType) {
+ if (volumePoolType == null || vmPoolType == null) {
+ logger.debug("Cannot check if both pools are CLVM type: one or both pool types are null");
+ return false;
+ }
+ return ClvmLockManager.isClvmPoolType(volumePoolType) &&
+ ClvmLockManager.isClvmPoolType(vmPoolType);
+ }
+
+ @Override
+ public boolean isLockTransferRequired(VolumeInfo volumeToAttach, StoragePoolType volumePoolType, StoragePoolType vmPoolType,
+ Long volumePoolId, Long vmPoolId, Long vmHostId) {
+ if (volumePoolType != null && !ClvmLockManager.isClvmPoolType(volumePoolType)) {
+ return false;
+ }
+
+ if (volumePoolId == null || !volumePoolId.equals(vmPoolId)) {
+ return false;
+ }
+
+ Long volumeLockHostId = findVolumeLockHost(volumeToAttach);
+
+ if (volumeLockHostId == null) {
+ VolumeVO volumeVO = _volumeDao.findById(volumeToAttach.getId());
+ if (volumeVO != null && volumeVO.getState() == Volume.State.Ready && volumeVO.getInstanceId() == null) {
+ logger.debug("CLVM volume {} is detached on same pool, lock transfer may be needed",
+ volumeToAttach.getUuid());
+ return true;
+ }
+ }
+
+ if (volumeLockHostId != null && vmHostId != null && !volumeLockHostId.equals(vmHostId)) {
+ logger.info("CLVM lock transfer required: Volume {} lock is on host {} but VM is on host {}",
+ volumeToAttach.getUuid(), volumeLockHostId, vmHostId);
+ return true;
+ }
+
+ return false;
+ }
+
+ @Override
+ public boolean isLightweightMigrationNeeded(StoragePoolType volumePoolType, StoragePoolType vmPoolType,
+ String volumePoolPath, String vmPoolPath) {
+ if (!areBothPoolsClvmType(volumePoolType, vmPoolType)) {
+ return false;
+ }
+
+ String volumeVgName = extractVgNameFromPath(volumePoolPath);
+ String vmVgName = extractVgNameFromPath(vmPoolPath);
+
+ if (volumeVgName != null && volumeVgName.equals(vmVgName)) {
+ logger.info("CLVM lightweight migration detected: Volume is in same VG ({}), only lock transfer needed (no data copy)", volumeVgName);
+ return true;
+ }
+
+ return false;
+ }
+
+ private String extractVgNameFromPath(String poolPath) {
+ if (poolPath == null) {
+ return null;
+ }
+ return poolPath.startsWith("/") ? poolPath.substring(1) : poolPath;
+ }
}
+
diff --git a/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceImplClvmTest.java b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceImplClvmTest.java
new file mode 100644
index 000000000000..5725bd91ffb6
--- /dev/null
+++ b/engine/storage/volume/src/test/java/org/apache/cloudstack/storage/volume/VolumeServiceImplClvmTest.java
@@ -0,0 +1,311 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.volume;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.when;
+
+import com.cloud.storage.ClvmLockManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.Spy;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VolumeDao;
+
+/**
+ * Tests for CLVM lock management methods in VolumeServiceImpl.
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class VolumeServiceImplClvmTest {
+
+ @Spy
+ @InjectMocks
+ private VolumeServiceImpl volumeService;
+
+ @Mock
+ private VolumeDao volumeDao;
+
+ @Mock
+ private VolumeInfo volumeInfoMock;
+
+ @Mock
+ private VolumeVO volumeVOMock;
+
+ @Mock
+ ClvmLockManager clvmLockManager;
+
+ private static final Long VOLUME_ID = 1L;
+ private static final Long POOL_ID_1 = 100L;
+ private static final Long POOL_ID_2 = 200L;
+ private static final Long HOST_ID_1 = 10L;
+ private static final Long HOST_ID_2 = 20L;
+ private static final String POOL_PATH_VG1 = "/vg1";
+ private static final String POOL_PATH_VG2 = "/vg2";
+
+ @Before
+ public void setup() {
+ when(volumeInfoMock.getId()).thenReturn(VOLUME_ID);
+ when(volumeInfoMock.getUuid()).thenReturn("test-volume-uuid");
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_BothCLVM() {
+ assertTrue(volumeService.areBothPoolsClvmType(StoragePoolType.CLVM, StoragePoolType.CLVM));
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_BothCLVM_NG() {
+ assertTrue(volumeService.areBothPoolsClvmType(StoragePoolType.CLVM_NG, StoragePoolType.CLVM_NG));
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_MixedCLVMAndCLVM_NG() {
+ assertTrue(volumeService.areBothPoolsClvmType(StoragePoolType.CLVM, StoragePoolType.CLVM_NG));
+ assertTrue(volumeService.areBothPoolsClvmType(StoragePoolType.CLVM_NG, StoragePoolType.CLVM));
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_OneCLVMOneNFS() {
+ assertFalse(volumeService.areBothPoolsClvmType(StoragePoolType.CLVM, StoragePoolType.NetworkFilesystem));
+ assertFalse(volumeService.areBothPoolsClvmType(StoragePoolType.NetworkFilesystem, StoragePoolType.CLVM));
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_OneCLVM_NGOneNFS() {
+ assertFalse(volumeService.areBothPoolsClvmType(StoragePoolType.CLVM_NG, StoragePoolType.NetworkFilesystem));
+ assertFalse(volumeService.areBothPoolsClvmType(StoragePoolType.NetworkFilesystem, StoragePoolType.CLVM_NG));
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_BothNFS() {
+ assertFalse(volumeService.areBothPoolsClvmType(StoragePoolType.NetworkFilesystem, StoragePoolType.NetworkFilesystem));
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_NullVolumePoolType() {
+ assertFalse(volumeService.areBothPoolsClvmType(null, StoragePoolType.CLVM));
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_NullVmPoolType() {
+ assertFalse(volumeService.areBothPoolsClvmType(StoragePoolType.CLVM, null));
+ }
+
+ @Test
+ public void testAreBothPoolsClvmType_BothNull() {
+ assertFalse(volumeService.areBothPoolsClvmType(null, null));
+ }
+
+
+ @Test
+ public void testIsLockTransferRequired_NonCLVMPool() {
+ assertFalse(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.NetworkFilesystem, StoragePoolType.CLVM,
+ POOL_ID_1, POOL_ID_1, HOST_ID_1));
+ }
+
+ @Test
+ public void testIsLockTransferRequired_DifferentPools() {
+ assertFalse(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM, StoragePoolType.CLVM,
+ POOL_ID_1, POOL_ID_2, HOST_ID_1));
+ }
+
+ @Test
+ public void testIsLockTransferRequired_NullPoolIds() {
+ assertFalse(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM, StoragePoolType.CLVM,
+ null, POOL_ID_1, HOST_ID_1));
+
+ assertFalse(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM, StoragePoolType.CLVM,
+ POOL_ID_1, null, HOST_ID_1));
+ }
+
+ @Test
+ public void testIsLockTransferRequired_DetachedVolumeReady() {
+ when(volumeDao.findById(VOLUME_ID)).thenReturn(volumeVOMock);
+ when(volumeVOMock.getState()).thenReturn(Volume.State.Ready);
+ when(volumeVOMock.getInstanceId()).thenReturn(null); // Detached
+
+ when(volumeService.findVolumeLockHost(volumeInfoMock)).thenReturn(null);
+
+ assertTrue(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM, StoragePoolType.CLVM,
+ POOL_ID_1, POOL_ID_1, HOST_ID_1));
+ }
+
+ @Test
+ public void testIsLockTransferRequired_DetachedVolumeNotReady() {
+ when(volumeDao.findById(VOLUME_ID)).thenReturn(volumeVOMock);
+ when(volumeVOMock.getState()).thenReturn(Volume.State.Allocated);
+
+ when(volumeService.findVolumeLockHost(volumeInfoMock)).thenReturn(null);
+
+ assertFalse(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM, StoragePoolType.CLVM,
+ POOL_ID_1, POOL_ID_1, HOST_ID_1));
+ }
+
+ @Test
+ public void testIsLockTransferRequired_DifferentHosts() {
+ when(volumeService.findVolumeLockHost(volumeInfoMock)).thenReturn(HOST_ID_1);
+
+ assertTrue(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM, StoragePoolType.CLVM,
+ POOL_ID_1, POOL_ID_1, HOST_ID_2));
+ }
+
+ @Test
+ public void testIsLockTransferRequired_SameHost() {
+ when(volumeService.findVolumeLockHost(volumeInfoMock)).thenReturn(HOST_ID_1);
+
+ assertFalse(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM, StoragePoolType.CLVM,
+ POOL_ID_1, POOL_ID_1, HOST_ID_1));
+ }
+
+ @Test
+ public void testIsLockTransferRequired_NullVmHostId() {
+ when(volumeService.findVolumeLockHost(volumeInfoMock)).thenReturn(HOST_ID_1);
+
+ assertFalse(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM, StoragePoolType.CLVM,
+ POOL_ID_1, POOL_ID_1, null));
+ }
+
+ @Test
+ public void testIsLockTransferRequired_CLVM_NG_DifferentHosts() {
+ when(volumeService.findVolumeLockHost(volumeInfoMock)).thenReturn(HOST_ID_1);
+
+ assertTrue(volumeService.isLockTransferRequired(
+ volumeInfoMock, StoragePoolType.CLVM_NG, StoragePoolType.CLVM_NG,
+ POOL_ID_1, POOL_ID_1, HOST_ID_2));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_NonCLVMPools() {
+ assertFalse(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.NetworkFilesystem, StoragePoolType.NetworkFilesystem,
+ POOL_PATH_VG1, POOL_PATH_VG1));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_OneCLVMOneNFS() {
+ assertFalse(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.NetworkFilesystem,
+ POOL_PATH_VG1, POOL_PATH_VG1));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_SameVG() {
+ assertTrue(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ "/vg1", "/vg1"));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_SameVG_NoSlash() {
+ assertTrue(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ "vg1", "vg1"));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_SameVG_MixedSlash() {
+ assertTrue(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ "/vg1", "vg1"));
+
+ assertTrue(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ "vg1", "/vg1"));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_DifferentVG() {
+ assertFalse(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ "/vg1", "/vg2"));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_CLVM_NG_SameVG() {
+ assertTrue(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM_NG, StoragePoolType.CLVM_NG,
+ "/vg1", "/vg1"));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_CLVM_NG_DifferentVG() {
+ assertFalse(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM_NG, StoragePoolType.CLVM_NG,
+ "/vg1", "/vg2"));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_MixedCLVM_CLVM_NG_SameVG() {
+ assertTrue(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM_NG,
+ "/vg1", "/vg1"));
+
+ assertTrue(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM_NG, StoragePoolType.CLVM,
+ "/vg1", "/vg1"));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_NullVolumePath() {
+ assertFalse(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ null, "/vg1"));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_NullVmPath() {
+ assertFalse(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ "/vg1", null));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_BothPathsNull() {
+ assertFalse(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ null, null));
+ }
+
+ @Test
+ public void testIsLightweightMigrationNeeded_ComplexVGNames() {
+ assertTrue(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ "/cloudstack-vg-01", "/cloudstack-vg-01"));
+
+ assertFalse(volumeService.isLightweightMigrationNeeded(
+ StoragePoolType.CLVM, StoragePoolType.CLVM,
+ "/cloudstack-vg-01", "/cloudstack-vg-02"));
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index 55bab118ad00..5d966634d4c6 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -2484,7 +2484,8 @@ public String getResizeScriptType(final KVMStoragePool pool, final KVMPhysicalDi
} else if ((poolType == StoragePoolType.NetworkFilesystem
|| poolType == StoragePoolType.SharedMountPoint
|| poolType == StoragePoolType.Filesystem
- || poolType == StoragePoolType.Gluster)
+ || poolType == StoragePoolType.Gluster
+ || poolType == StoragePoolType.CLVM_NG)
&& volFormat == PhysicalDiskFormat.QCOW2 ) {
return "QCOW2";
} else if (poolType == StoragePoolType.Linstor) {
@@ -3680,13 +3681,18 @@ public int compare(final DiskTO arg0, final DiskTO arg1) {
final String glusterVolume = pool.getSourceDir().replace("/", "");
disk.defNetworkBasedDisk(glusterVolume + path.replace(mountpoint, ""), pool.getSourceHost(), pool.getSourcePort(), null,
null, devId, diskBusType, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
- } else if (pool.getType() == StoragePoolType.CLVM || physicalDisk.getFormat() == PhysicalDiskFormat.RAW) {
+ } else if (pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG || physicalDisk.getFormat() == PhysicalDiskFormat.RAW) {
if (volume.getType() == Volume.Type.DATADISK && !(isWindowsTemplate && isUefiEnabled)) {
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData);
- }
- else {
+ } else {
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusType);
}
+
+ // CLVM_NG uses QCOW2 format on block devices, override the default RAW format
+ if (pool.getType() == StoragePoolType.CLVM_NG) {
+ disk.setDiskFormatType(DiskDef.DiskFmtType.QCOW2);
+ }
+
if (pool.getType() == StoragePoolType.Linstor && isQemuDiscardBugFree(diskBusType)) {
disk.setDiscard(DiscardType.UNMAP);
}
@@ -6523,4 +6529,242 @@ public String getHypervisorPath() {
public String getGuestCpuArch() {
return guestCpuArch;
}
+
+ /**
+ * CLVM volume state for migration operations on source host
+ */
+ public enum ClvmVolumeState {
+ /** Shared mode (-asy) - used before migration to allow both hosts to access volume */
+ SHARED("-asy", "shared", "Before migration: activating in shared mode"),
+
+ /** Deactivate (-an) - used after successful migration to release volume on source */
+ DEACTIVATE("-an", "deactivated", "After successful migration: deactivating volume"),
+
+ /** Exclusive mode (-aey) - used after failed migration to revert to original exclusive state */
+ EXCLUSIVE("-aey", "exclusive", "After failed migration: reverting to exclusive mode");
+
+ private final String lvchangeFlag;
+ private final String description;
+ private final String logMessage;
+
+ ClvmVolumeState(String lvchangeFlag, String description, String logMessage) {
+ this.lvchangeFlag = lvchangeFlag;
+ this.description = description;
+ this.logMessage = logMessage;
+ }
+
+ public String getLvchangeFlag() {
+ return lvchangeFlag;
+ }
+
+ public String getDescription() {
+ return description;
+ }
+
+ public String getLogMessage() {
+ return logMessage;
+ }
+ }
+
+ public static void modifyClvmVolumesStateForMigration(List disks, LibvirtComputingResource resource,
+ VirtualMachineTO vmSpec, ClvmVolumeState state) {
+ for (DiskDef disk : disks) {
+ if (isClvmVolume(disk, resource, vmSpec)) {
+ String volumePath = disk.getDiskPath();
+ try {
+ modifyClvmVolumeState(volumePath, state.getLvchangeFlag(), state.getDescription(), state.getLogMessage());
+ } catch (Exception e) {
+ LOGGER.error("[CLVM Migration] Exception while setting volume [{}] to {} state: {}",
+ volumePath, state.getDescription(), e.getMessage(), e);
+ }
+ }
+ }
+ }
+
+ private static void modifyClvmVolumeState(String volumePath, String lvchangeFlag,
+ String stateDescription, String logMessage) {
+ try {
+ LOGGER.info("{} for volume [{}]", logMessage, volumePath);
+
+ Script cmd = new Script("lvchange", Duration.standardSeconds(300), LOGGER);
+ cmd.add(lvchangeFlag);
+ cmd.add(volumePath);
+
+ String result = cmd.execute();
+ if (result != null) {
+ String errorMsg = String.format(
+ "Failed to set volume [%s] to %s state. Command result: %s",
+ volumePath, stateDescription, result);
+ LOGGER.error(errorMsg);
+ throw new CloudRuntimeException(errorMsg);
+ } else {
+ LOGGER.info("Successfully set volume [{}] to {} state.",
+ volumePath, stateDescription);
+ }
+ } catch (CloudRuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ String errorMsg = String.format(
+ "Exception while setting volume [%s] to %s state: %s",
+ volumePath, stateDescription, e.getMessage());
+ LOGGER.error(errorMsg, e);
+ throw new CloudRuntimeException(errorMsg, e);
+ }
+ }
+
+ public static void activateClvmVolumeExclusive(String volumePath) {
+ modifyClvmVolumeState(volumePath, ClvmVolumeState.EXCLUSIVE.getLvchangeFlag(),
+ ClvmVolumeState.EXCLUSIVE.getDescription(),
+ "Activating CLVM volume in exclusive mode");
+ }
+
+ public static void deactivateClvmVolume(String volumePath) {
+ try {
+ modifyClvmVolumeState(volumePath, ClvmVolumeState.DEACTIVATE.getLvchangeFlag(),
+ ClvmVolumeState.DEACTIVATE.getDescription(),
+ "Deactivating CLVM volume");
+ } catch (Exception e) {
+ LOGGER.warn("Failed to deactivate CLVM volume {}: {}", volumePath, e.getMessage());
+ }
+ }
+
+ public static void setClvmVolumeToSharedMode(String volumePath) {
+ try {
+ modifyClvmVolumeState(volumePath, ClvmVolumeState.SHARED.getLvchangeFlag(),
+ ClvmVolumeState.SHARED.getDescription(),
+ "Setting CLVM volume to shared mode");
+ } catch (Exception e) {
+ LOGGER.warn("Failed to set CLVM volume {} to shared mode: {}", volumePath, e.getMessage());
+ }
+ }
+
+ /**
+ * Determines if a disk is on a CLVM storage pool by checking the actual pool type from VirtualMachineTO.
+ * This is the most reliable method as it uses CloudStack's own storage pool information.
+ *
+ * @param disk The disk definition to check
+ * @param resource The LibvirtComputingResource instance (unused but kept for compatibility)
+ * @param vmSpec The VirtualMachineTO specification containing disk and pool information
+ * @return true if the disk is on a CLVM storage pool, false otherwise
+ */
+ private static boolean isClvmVolume(DiskDef disk, LibvirtComputingResource resource, VirtualMachineTO vmSpec) {
+ String diskPath = disk.getDiskPath();
+ if (diskPath == null || vmSpec == null) {
+ return false;
+ }
+
+ try {
+ if (vmSpec.getDisks() != null) {
+ for (DiskTO diskTO : vmSpec.getDisks()) {
+ if (diskTO.getData() instanceof VolumeObjectTO) {
+ VolumeObjectTO volumeTO = (VolumeObjectTO) diskTO.getData();
+ if (diskPath.equals(volumeTO.getPath()) || diskPath.equals(diskTO.getPath())) {
+ DataStoreTO dataStore = volumeTO.getDataStore();
+ if (dataStore instanceof PrimaryDataStoreTO) {
+ PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) dataStore;
+ boolean isClvm = StoragePoolType.CLVM == primaryStore.getPoolType() ||
+ StoragePoolType.CLVM_NG == primaryStore.getPoolType();
+ LOGGER.debug("Disk {} identified as CLVM/CLVM_NG={} via VirtualMachineTO pool type: {}",
+ diskPath, isClvm, primaryStore.getPoolType());
+ return isClvm;
+ }
+ }
+ }
+ }
+ }
+
+ // Fallback: Check VG attributes using vgs command (reliable)
+ // CLVM VGs have the 'c' (clustered) or 's' (shared) flag in their attributes
+ // Example: 'wz--ns' = shared, 'wz--n-' = not clustered
+ if (diskPath.startsWith("/dev/") && !diskPath.contains("/dev/mapper/")) {
+ String vgName = extractVolumeGroupFromPath(diskPath);
+ if (vgName != null) {
+ boolean isClustered = checkIfVolumeGroupIsClustered(vgName);
+ LOGGER.debug("Disk {} VG {} identified as clustered={} via vgs attribute check",
+ diskPath, vgName, isClustered);
+ return isClustered;
+ }
+ }
+
+ } catch (Exception e) {
+ LOGGER.error("Error determining if volume {} is CLVM: {}", diskPath, e.getMessage(), e);
+ }
+
+ return false;
+ }
+
+ /**
+ * Extracts the volume group name from a device path.
+ *
+ * @param devicePath The device path (e.g., /dev/vgname/lvname)
+ * @return The volume group name, or null if cannot be determined
+ */
+ static String extractVolumeGroupFromPath(String devicePath) {
+ if (devicePath == null || !devicePath.startsWith("/dev/")) {
+ return null;
+ }
+
+ // Format: /dev//
+ String[] parts = devicePath.split("/");
+ if (parts.length >= 3) {
+ return parts[2]; // ["", "dev", "vgname", ...]
+ }
+
+ return null;
+ }
+
+ /**
+ * Checks if a volume group is clustered (CLVM) by examining its attributes.
+ * Uses 'vgs' command to check for the clustered/shared flag in VG attributes.
+ *
+ * VG Attr format (6 characters): wz--nc or wz--ns
+ * Position 6: Clustered flag - 'c' = CLVM (clustered), 's' = shared (lvmlockd), '-' = not clustered
+ *
+ * @param vgName The volume group name
+ * @return true if the VG is clustered or shared, false otherwise
+ */
+ static boolean checkIfVolumeGroupIsClustered(String vgName) {
+ if (vgName == null) {
+ return false;
+ }
+
+ try {
+ // Use vgs with --noheadings and -o attr to get VG attributes
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ Script vgsCmd = new Script("vgs", 5000, LOGGER);
+ vgsCmd.add("--noheadings");
+ vgsCmd.add("--unbuffered");
+ vgsCmd.add("-o");
+ vgsCmd.add("vg_attr");
+ vgsCmd.add(vgName);
+
+ String result = vgsCmd.execute(parser);
+
+ if (result == null && parser.getLines() != null) {
+ String output = parser.getLines();
+ if (output != null && !output.isEmpty()) {
+ // Parse VG attributes (format: wz--nc or wz--ns or wz--n-)
+ // Position 6 (0-indexed 5) indicates clustering/sharing:
+ // 'c' = clustered (CLVM) or 's' = shared (lvmlockd) or '-' = not clustered/shared
+ String vgAttr = output.trim();
+ if (vgAttr.length() >= 6) {
+ char clusterFlag = vgAttr.charAt(5); // Position 6 (0-indexed 5)
+ boolean isClustered = (clusterFlag == 'c' || clusterFlag == 's');
+ LOGGER.debug("VG {} has attributes '{}', cluster/shared flag '{}' = {}",
+ vgName, vgAttr, clusterFlag, isClustered);
+ return isClustered;
+ } else {
+ LOGGER.warn("VG {} attributes '{}' have unexpected format (expected 6+ chars)", vgName, vgAttr);
+ }
+ }
+ } else {
+ LOGGER.warn("Failed to get VG attributes for {}: {}", vgName, result);
+ }
+
+ } catch (Exception e) {
+ LOGGER.debug("Error checking if VG {} is clustered: {}", vgName, e.getMessage());
+ }
+
+ return false;
+ }
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtClvmLockTransferCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtClvmLockTransferCommandWrapper.java
new file mode 100644
index 000000000000..907e39e59a99
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtClvmLockTransferCommandWrapper.java
@@ -0,0 +1,90 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+
+import com.cloud.agent.api.Answer;
+import org.apache.cloudstack.storage.command.ClvmLockTransferCommand;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.utils.script.Script;
+
+@ResourceWrapper(handles = ClvmLockTransferCommand.class)
+public class LibvirtClvmLockTransferCommandWrapper
+ extends CommandWrapper {
+
+ protected Logger logger = LogManager.getLogger(getClass());
+
+ @Override
+ public Answer execute(ClvmLockTransferCommand cmd, LibvirtComputingResource serverResource) {
+ String lvPath = cmd.getLvPath();
+ ClvmLockTransferCommand.Operation operation = cmd.getOperation();
+ String volumeUuid = cmd.getVolumeUuid();
+
+ logger.info("Executing CLVM lock transfer: operation={}, lv={}, volume={}",
+ operation, lvPath, volumeUuid);
+
+ try {
+ String lvchangeOpt;
+ String operationDesc;
+ switch (operation) {
+ case DEACTIVATE:
+ lvchangeOpt = "-an";
+ operationDesc = "deactivated";
+ break;
+ case ACTIVATE_EXCLUSIVE:
+ lvchangeOpt = "-aey";
+ operationDesc = "activated exclusively";
+ break;
+ case ACTIVATE_SHARED:
+ lvchangeOpt = "-asy";
+ operationDesc = "activated in shared mode";
+ break;
+ default:
+ return new Answer(cmd, false, "Unknown operation: " + operation);
+ }
+
+ Script script = new Script("/usr/sbin/lvchange", 30000, logger);
+ script.add(lvchangeOpt);
+ script.add(lvPath);
+
+ String result = script.execute();
+
+ if (result != null) {
+ logger.error("CLVM lock transfer failed for volume {}: {}",
+ volumeUuid, result);
+ return new Answer(cmd, false,
+ String.format("lvchange %s %s failed: %s", lvchangeOpt, lvPath, result));
+ }
+
+ logger.info("Successfully executed CLVM lock transfer: {} {} for volume {}",
+ lvchangeOpt, lvPath, volumeUuid);
+
+ return new Answer(cmd, true,
+ String.format("Successfully %s CLVM volume %s", operationDesc, volumeUuid));
+
+ } catch (Exception e) {
+ logger.error("Exception during CLVM lock transfer for volume {}: {}",
+ volumeUuid, e.getMessage(), e);
+ return new Answer(cmd, false, "Exception: " + e.getMessage());
+ }
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
index 43607edc53a5..d4b57c8f9055 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
@@ -1,5 +1,3 @@
-//
-// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
@@ -42,9 +40,16 @@
import javax.xml.transform.TransformerException;
import com.cloud.agent.api.VgpuTypesInfo;
+import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.GPUDeviceTO;
import com.cloud.hypervisor.kvm.resource.LibvirtGpuDef;
import com.cloud.hypervisor.kvm.resource.LibvirtXMLParser;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import com.cloud.storage.Storage;
+import com.cloud.utils.Ternary;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VirtualMachine;
import org.apache.cloudstack.utils.security.ParserUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.collections4.CollectionUtils;
@@ -69,7 +74,6 @@
import com.cloud.agent.api.MigrateAnswer;
import com.cloud.agent.api.MigrateCommand;
import com.cloud.agent.api.MigrateCommand.MigrateDiskInfo;
-import com.cloud.agent.api.to.DataTO;
import com.cloud.agent.api.to.DiskTO;
import com.cloud.agent.api.to.DpdkTO;
import com.cloud.agent.api.to.VirtualMachineTO;
@@ -82,11 +86,6 @@
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef;
import com.cloud.hypervisor.kvm.resource.MigrateKVMAsync;
import com.cloud.hypervisor.kvm.resource.VifDriver;
-import com.cloud.resource.CommandWrapper;
-import com.cloud.resource.ResourceWrapper;
-import com.cloud.utils.Ternary;
-import com.cloud.utils.exception.CloudRuntimeException;
-import com.cloud.vm.VirtualMachine;
@ResourceWrapper(handles = MigrateCommand.class)
public final class LibvirtMigrateCommandWrapper extends CommandWrapper {
@@ -117,7 +116,8 @@ public Answer execute(final MigrateCommand command, final LibvirtComputingResour
Command.State commandState = null;
List ifaces = null;
- List disks;
+ List disks = new ArrayList<>();
+ VirtualMachineTO to = null;
Domain dm = null;
Connect dconn = null;
@@ -136,7 +136,7 @@ public Answer execute(final MigrateCommand command, final LibvirtComputingResour
if (logger.isDebugEnabled()) {
logger.debug(String.format("Found domain with name [%s]. Starting VM migration to host [%s].", vmName, destinationUri));
}
- VirtualMachineTO to = command.getVirtualMachine();
+ to = command.getVirtualMachine();
dm = conn.domainLookupByName(vmName);
/*
@@ -336,6 +336,12 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
logger.debug(String.format("Cleaning the disks of VM [%s] in the source pool after VM migration finished.", vmName));
}
resumeDomainIfPaused(destDomain, vmName);
+
+ // Deactivate CLVM volumes on source host after successful migration
+ if (to != null) {
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(disks, libvirtComputingResource, to, LibvirtComputingResource.ClvmVolumeState.DEACTIVATE);
+ }
+
deleteOrDisconnectDisksOnSourcePool(libvirtComputingResource, migrateDiskInfoList, disks);
libvirtComputingResource.cleanOldSecretsByDiskDef(conn, disks);
}
@@ -382,6 +388,10 @@ Use VIR_DOMAIN_XML_SECURE (value = 1) prior to v1.0.0.
if (destDomain != null) {
destDomain.free();
}
+ // Revert CLVM volumes to exclusive mode on failure
+ if (to != null) {
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(disks, libvirtComputingResource, to, LibvirtComputingResource.ClvmVolumeState.EXCLUSIVE);
+ }
} catch (final LibvirtException e) {
logger.trace("Ignoring libvirt error.", e);
}
@@ -681,7 +691,7 @@ protected String replaceDpdkInterfaces(String xmlDesc, Map dpdkP
protected void deleteOrDisconnectDisksOnSourcePool(final LibvirtComputingResource libvirtComputingResource, final List migrateDiskInfoList,
List disks) {
for (DiskDef disk : disks) {
- MigrateDiskInfo migrateDiskInfo = searchDiskDefOnMigrateDiskInfoList(migrateDiskInfoList, disk);
+ MigrateCommand.MigrateDiskInfo migrateDiskInfo = searchDiskDefOnMigrateDiskInfoList(migrateDiskInfoList, disk);
if (migrateDiskInfo != null && migrateDiskInfo.isSourceDiskOnStorageFileSystem()) {
deleteLocalVolume(disk.getDiskPath());
} else {
@@ -798,7 +808,10 @@ protected String replaceStorage(String xmlDesc, Map]*type=['\"]vnc['\"][^>]*passwd=['\"])([^'\"]*)(['\"])",
"$1*****$3");
}
+
+ /**
+ * Checks if any of the destination disks in the migration target a CLVM or CLVM_NG storage pool.
+ * This is used to determine if incremental migration should be disabled to avoid libvirt
+ * precreate errors with QCOW2-on-LVM setups.
+ *
+ * @param mapMigrateStorage the map containing migration disk information with destination pool types
+ * @return true if any destination disk targets CLVM or CLVM_NG, false otherwise
+ */
+ protected boolean hasClvmDestinationDisks(Map mapMigrateStorage) {
+ if (MapUtils.isEmpty(mapMigrateStorage)) {
+ return false;
+ }
+
+ try {
+ for (Map.Entry entry : mapMigrateStorage.entrySet()) {
+ MigrateCommand.MigrateDiskInfo diskInfo = entry.getValue();
+ if (isClvmBlockDevice(diskInfo)) {
+ logger.debug("Found disk targeting CLVM/CLVM_NG destination pool");
+ return true;
+ }
+ }
+ } catch (final Exception e) {
+ logger.debug("Failed to check for CLVM destination disks: {}. Assuming no CLVM disks.", e.getMessage());
+ }
+
+ return false;
+ }
+
+ /**
+ * Filters out disk labels that target CLVM or CLVM_NG destination pools from the migration disk labels set.
+ * CLVM/CLVM_NG disks are pre-created/activated on the destination before VM migration,
+ * so they should not be migrated by libvirt.
+ *
+ * @param migrateDiskLabels the original set of disk labels to migrate
+ * @param diskDefinitions the list of disk definitions to map labels to paths
+ * @param mapMigrateStorage the map containing migration disk information with destination pool types
+ * @return a new set with CLVM/CLVM_NG disk labels removed
+ */
+ protected Set filterOutClvmDisks(Set migrateDiskLabels,
+ List diskDefinitions,
+ Map mapMigrateStorage) {
+ if (migrateDiskLabels == null || migrateDiskLabels.isEmpty()) {
+ return migrateDiskLabels;
+ }
+
+ Set filteredLabels = new HashSet<>(migrateDiskLabels);
+
+ try {
+ Set clvmDiskPaths = new HashSet<>();
+ for (Map.Entry entry : mapMigrateStorage.entrySet()) {
+ MigrateCommand.MigrateDiskInfo diskInfo = entry.getValue();
+ if (isClvmBlockDevice(diskInfo)) {
+ clvmDiskPaths.add(entry.getKey());
+ logger.debug("Identified CLVM/CLVM_NG destination disk: {}", entry.getKey());
+ }
+ }
+
+ // Map disk paths to labels and remove CLVM disk labels from the migration set
+ if (!clvmDiskPaths.isEmpty()) {
+ for (String clvmDiskPath : clvmDiskPaths) {
+ for (DiskDef diskDef : diskDefinitions) {
+ String diskPath = diskDef.getDiskPath();
+ if (diskPath != null && diskPath.contains(clvmDiskPath)) {
+ String label = diskDef.getDiskLabel();
+ if (filteredLabels.remove(label)) {
+ logger.info("Excluded disk label {} (path: {}) from libvirt migration - CLVM/CLVM_NG destination",
+ label, clvmDiskPath);
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ } catch (final Exception e) {
+ logger.warn("Failed to filter CLVM disks: {}. Proceeding with original disk list.", e.getMessage());
+ return migrateDiskLabels;
+ }
+
+ return filteredLabels;
+ }
+
+ private boolean isClvmBlockDevice(MigrateCommand.MigrateDiskInfo diskInfo) {
+ if (diskInfo == null ||diskInfo.getDestPoolType() == null) {
+ return false;
+ }
+ return (Storage.StoragePoolType.CLVM.equals(diskInfo.getDestPoolType()) || Storage.StoragePoolType.CLVM_NG.equals(diskInfo.getDestPoolType()));
+ }
+
+ /**
+ * Determines if the driver type should be updated during migration based on CLVM involvement.
+ * The driver type needs to be updated when:
+ * - Managed storage is being migrated, OR
+ * - Source pool is CLVM or CLVM_NG, OR
+ * - Destination pool is CLVM or CLVM_NG
+ *
+ * This ensures the libvirt XML driver type matches the destination format (raw/qcow2/etc).
+ *
+ * @param migrateStorageManaged true if migrating managed storage
+ * @param migrateDiskInfo the migration disk information containing source and destination pool types
+ * @return true if driver type should be updated, false otherwise
+ */
+ private boolean shouldUpdateDriverTypeForMigration(boolean migrateStorageManaged,
+ MigrateCommand.MigrateDiskInfo migrateDiskInfo) {
+ boolean sourceIsClvm = Storage.StoragePoolType.CLVM == migrateDiskInfo.getSourcePoolType() ||
+ Storage.StoragePoolType.CLVM_NG == migrateDiskInfo.getSourcePoolType();
+
+ boolean destIsClvm = Storage.StoragePoolType.CLVM == migrateDiskInfo.getDestPoolType() ||
+ Storage.StoragePoolType.CLVM_NG == migrateDiskInfo.getDestPoolType();
+
+ boolean isClvmRelatedMigration = sourceIsClvm || destIsClvm;
+ return migrateStorageManaged || isClvmRelatedMigration;
+ }
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyStoragePoolCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyStoragePoolCommandWrapper.java
index 990cefda8f33..bc22d7bfd70a 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyStoragePoolCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtModifyStoragePoolCommandWrapper.java
@@ -52,9 +52,19 @@ public Answer execute(final ModifyStoragePoolCommand command, final LibvirtCompu
final KVMStoragePool storagepool;
try {
+ Map poolDetails = command.getDetails();
+ if (poolDetails == null) {
+ poolDetails = new HashMap<>();
+ }
+
+ // Ensure CLVM secure zero-fill setting has a default value if not provided by MS
+ if (!poolDetails.containsKey(KVMStoragePool.CLVM_SECURE_ZERO_FILL)) {
+ poolDetails.put(KVMStoragePool.CLVM_SECURE_ZERO_FILL, "false");
+ }
+
storagepool =
storagePoolMgr.createStoragePool(command.getPool().getUuid(), command.getPool().getHost(), command.getPool().getPort(), command.getPool().getPath(), command.getPool()
- .getUserInfo(), command.getPool().getType(), command.getDetails());
+ .getUserInfo(), command.getPool().getType(), poolDetails);
if (storagepool == null) {
return new Answer(command, false, " Failed to create storage pool");
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostMigrationCommandWrapper.java
new file mode 100644
index 000000000000..8e5cb83e89e9
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPostMigrationCommandWrapper.java
@@ -0,0 +1,83 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import java.util.List;
+
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import org.libvirt.Connect;
+import org.libvirt.LibvirtException;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.PostMigrationAnswer;
+import com.cloud.agent.api.PostMigrationCommand;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
+import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+
+/**
+ * Wrapper for PostMigrationCommand on KVM hypervisor.
+ * Handles post-migration tasks on the destination host after a VM has been successfully migrated.
+ * Primary responsibility: Convert CLVM volumes from shared mode to exclusive mode on destination.
+ */
+@ResourceWrapper(handles = PostMigrationCommand.class)
+public final class LibvirtPostMigrationCommandWrapper extends CommandWrapper {
+
+ protected Logger logger = LogManager.getLogger(getClass());
+
+ @Override
+ public Answer execute(final PostMigrationCommand command, final LibvirtComputingResource libvirtComputingResource) {
+ final VirtualMachineTO vm = command.getVirtualMachine();
+ final String vmName = command.getVmName();
+
+ if (vm == null || vmName == null) {
+ return new PostMigrationAnswer(command, "VM or VM name is null");
+ }
+
+ logger.debug("Executing post-migration tasks for VM {} on destination host", vmName);
+
+ try {
+ final Connect conn = LibvirtConnection.getConnectionByVmName(vmName);
+
+ List disks = libvirtComputingResource.getDisks(conn, vmName);
+ logger.debug("[CLVM Post-Migration] Processing volumes for VM {} to claim exclusive locks on any CLVM volumes", vmName);
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(
+ disks,
+ libvirtComputingResource,
+ vm,
+ LibvirtComputingResource.ClvmVolumeState.EXCLUSIVE
+ );
+
+ logger.debug("Successfully completed post-migration tasks for VM {}", vmName);
+ return new PostMigrationAnswer(command);
+
+ } catch (final LibvirtException e) {
+ logger.error("Libvirt error during post-migration for VM {}: {}", vmName, e.getMessage(), e);
+ return new PostMigrationAnswer(command, e);
+ } catch (final Exception e) {
+ logger.error("Error during post-migration for VM {}: {}", vmName, e.getMessage(), e);
+ return new PostMigrationAnswer(command, e);
+ }
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPreMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPreMigrationCommandWrapper.java
new file mode 100644
index 000000000000..71fdb3df2316
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPreMigrationCommandWrapper.java
@@ -0,0 +1,85 @@
+//
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+//
+
+package com.cloud.hypervisor.kvm.resource.wrapper;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.PreMigrationCommand;
+import com.cloud.agent.api.to.VirtualMachineTO;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
+import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
+import com.cloud.resource.CommandWrapper;
+import com.cloud.resource.ResourceWrapper;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import org.libvirt.Connect;
+import org.libvirt.Domain;
+import org.libvirt.LibvirtException;
+
+import java.util.List;
+
+/**
+ * Handles PreMigrationCommand on the source host before live migration.
+ * Converts CLVM volume locks from exclusive to shared mode so the destination host can access them.
+ */
+@ResourceWrapper(handles = PreMigrationCommand.class)
+public class LibvirtPreMigrationCommandWrapper extends CommandWrapper {
+ protected Logger logger = LogManager.getLogger(getClass());
+
+ @Override
+ public Answer execute(PreMigrationCommand command, LibvirtComputingResource libvirtComputingResource) {
+ String vmName = command.getVmName();
+ VirtualMachineTO vmSpec = command.getVirtualMachine();
+
+ logger.info("Preparing source host for migration of VM: {}", vmName);
+
+ Connect conn = null;
+ Domain dm = null;
+
+ try {
+ LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
+ conn = libvirtUtilitiesHelper.getConnectionByVmName(vmName);
+ dm = conn.domainLookupByName(vmName);
+
+ List disks = libvirtComputingResource.getDisks(conn, vmName);
+ logger.info("Converting CLVM volumes to shared mode for VM: {}", vmName);
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(
+ disks,
+ libvirtComputingResource,
+ vmSpec,
+ LibvirtComputingResource.ClvmVolumeState.SHARED
+ );
+
+ logger.info("Successfully prepared source host for migration of VM: {}", vmName);
+ return new Answer(command, true, "Source host prepared for migration");
+
+ } catch (LibvirtException e) {
+ logger.error("Failed to prepare source host for migration of VM: {}", vmName, e);
+ return new Answer(command, false, "Failed to prepare source host: " + e.getMessage());
+ } finally {
+ if (dm != null) {
+ try {
+ dm.free();
+ } catch (LibvirtException e) {
+ logger.warn("Failed to free domain {}: {}", vmName, e.getMessage());
+ }
+ }
+ }
+ }
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java
index d9323df4477d..03840cd7fdfa 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtPrepareForMigrationCommandWrapper.java
@@ -21,6 +21,7 @@
import java.net.URISyntaxException;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import org.apache.cloudstack.storage.configdrive.ConfigDrive;
@@ -124,6 +125,20 @@ public Answer execute(final PrepareForMigrationCommand command, final LibvirtCom
return new PrepareForMigrationAnswer(command, "failed to connect physical disks to host");
}
+ // Activate CLVM volumes in shared mode on destination host for live migration
+ try {
+ List disks = libvirtComputingResource.getDisks(conn, vm.getName());
+ LibvirtComputingResource.modifyClvmVolumesStateForMigration(
+ disks,
+ libvirtComputingResource,
+ vm,
+ LibvirtComputingResource.ClvmVolumeState.SHARED
+ );
+ } catch (Exception e) {
+ logger.warn("Failed to activate CLVM volumes in shared mode on destination for VM {}: {}",
+ vm.getName(), e.getMessage(), e);
+ }
+
logger.info("Successfully prepared destination host for migration of VM {}", vm.getName());
return createPrepareForMigrationAnswer(command, dpdkInterfaceMapping, libvirtComputingResource, vm);
} catch (final LibvirtException | CloudRuntimeException | InternalErrorException | URISyntaxException e) {
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java
index f2af46d4cc8a..a43b584dd6d6 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtResizeVolumeCommandWrapper.java
@@ -113,7 +113,8 @@ public Answer execute(final ResizeVolumeCommand command, final LibvirtComputingR
logger.debug("Resizing volume: " + path + ", from: " + toHumanReadableSize(currentSize) + ", to: " + toHumanReadableSize(newSize) + ", type: " + type + ", name: " + vmInstanceName + ", shrinkOk: " + shrinkOk);
/* libvirt doesn't support resizing (C)LVM devices, and corrupts QCOW2 in some scenarios, so we have to do these via qemu-img */
- if (pool.getType() != StoragePoolType.CLVM && pool.getType() != StoragePoolType.Linstor && pool.getType() != StoragePoolType.PowerFlex
+ if (pool.getType() != StoragePoolType.CLVM && pool.getType() != StoragePoolType.CLVM_NG
+ && pool.getType() != StoragePoolType.Linstor && pool.getType() != StoragePoolType.PowerFlex
&& vol.getFormat() != PhysicalDiskFormat.QCOW2) {
logger.debug("Volume " + path + " can be resized by libvirt. Asking libvirt to resize the volume.");
try {
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java
index 5d76d140f229..16c1a5a2fac1 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtRevertSnapshotCommandWrapper.java
@@ -117,7 +117,7 @@ public Answer execute(final RevertSnapshotCommand command, final LibvirtComputin
secondaryStoragePool = storagePoolMgr.getStoragePoolByURI(snapshotImageStore.getUrl());
}
- if (primaryPool.getType() == StoragePoolType.CLVM) {
+ if (primaryPool.getType() == StoragePoolType.CLVM || primaryPool.getType() == StoragePoolType.CLVM_NG) {
Script cmd = new Script(libvirtComputingResource.manageSnapshotPath(), libvirtComputingResource.getCmdsTimeout(), logger);
cmd.add("-v", getFullPathAccordingToStorage(secondaryStoragePool, snapshotRelPath));
cmd.add("-n", snapshotDisk.getName());
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java
index 8dd2116e1235..ea346a335850 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java
@@ -38,6 +38,8 @@ public interface KVMStoragePool {
public static final long HeartBeatUpdateMaxTries = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_MAX_TRIES);
public static final long HeartBeatUpdateRetrySleep = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_UPDATE_RETRY_SLEEP);
public static final long HeartBeatCheckerTimeout = AgentPropertiesFileHandler.getPropertyValue(AgentProperties.KVM_HEARTBEAT_CHECKER_TIMEOUT);
+ public static final String CLVM_SECURE_ZERO_FILL = "clvmsecurezerofill";
+
public default KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, Long usableSize, byte[] passphrase) {
return createPhysicalDisk(volumeUuid, format, provisioningType, size, passphrase);
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
index 6665cf625e2f..6e1c4f49e1af 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
@@ -288,12 +288,34 @@ public KVMStoragePool getStoragePool(StoragePoolType type, String uuid, boolean
}
if (pool instanceof LibvirtStoragePool) {
- addPoolDetails(uuid, (LibvirtStoragePool) pool);
+ LibvirtStoragePool libvirtPool = (LibvirtStoragePool) pool;
+ addPoolDetails(uuid, libvirtPool);
+
+ updatePoolTypeIfApplicable(libvirtPool, pool, type, uuid);
}
return pool;
}
+ private void updatePoolTypeIfApplicable(LibvirtStoragePool libvirtPool, KVMStoragePool pool,
+ StoragePoolType type, String uuid) {
+ StoragePoolType correctType = type;
+ if (correctType == null || correctType == StoragePoolType.CLVM) {
+ StoragePoolInformation info = _storagePools.get(uuid);
+ if (info != null && info.getPoolType() != null) {
+ correctType = info.getPoolType();
+ }
+ }
+
+ if (correctType != null && correctType != pool.getType() &&
+ (correctType == StoragePoolType.CLVM || correctType == StoragePoolType.CLVM_NG) &&
+ (pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG)) {
+ logger.debug("Correcting pool type from {} to {} for pool {} based on caller/cached information",
+ pool.getType(), correctType, uuid);
+ libvirtPool.setType(correctType);
+ }
+ }
+
/**
* As the class {@link LibvirtStoragePool} is constrained to the {@link org.libvirt.StoragePool} class, there is no way of saving a generic parameter such as the details, hence,
* this method was created to always make available the details of libvirt primary storages for when they are needed.
@@ -450,6 +472,10 @@ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String n
return adaptor.createDiskFromTemplate(template, name,
PhysicalDiskFormat.RAW, provisioningType,
size, destPool, timeout, passphrase);
+ } else if (destPool.getType() == StoragePoolType.CLVM_NG) {
+ return adaptor.createDiskFromTemplate(template, name,
+ PhysicalDiskFormat.QCOW2, provisioningType,
+ size, destPool, timeout, passphrase);
} else if (template.getFormat() == PhysicalDiskFormat.DIR) {
return adaptor.createDiskFromTemplate(template, name,
PhysicalDiskFormat.DIR, provisioningType,
@@ -491,6 +517,11 @@ public KVMPhysicalDisk createPhysicalDiskFromDirectDownloadTemplate(String templ
return adaptor.createTemplateFromDirectDownloadFile(templateFilePath, destTemplatePath, destPool, format, timeout);
}
+ public void createTemplateOnClvmNg(String templatePath, String templateUuid, int timeout, KVMStoragePool pool) {
+ LibvirtStorageAdaptor adaptor = (LibvirtStorageAdaptor) getStorageAdaptor(pool.getType());
+ adaptor.createTemplateOnClvmNg(templatePath, templateUuid, timeout, pool);
+ }
+
public Ternary, String> prepareStorageClient(StoragePoolType type, String uuid, Map details) {
StorageAdaptor adaptor = getStorageAdaptor(type);
return adaptor.prepareStorageClient(uuid, details);
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 030d9747d6cd..a4d55dc6a7d7 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -223,6 +223,26 @@ public class KVMStorageProcessor implements StorageProcessor {
" \n" +
"";
+ private static final String DUMMY_VM_XML_BLOCK = "\n" +
+ " %s\n" +
+ " 256\n" +
+ " 256\n" +
+ " 1\n" +
+ " \n" +
+ " hvm\n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " %s\n" +
+ " \n" +
+ " \n"+
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ " \n" +
+ "";
+
public KVMStorageProcessor(final KVMStoragePoolManager storagePoolMgr, final LibvirtComputingResource resource) {
this.storagePoolMgr = storagePoolMgr;
@@ -344,15 +364,28 @@ public Answer copyTemplateToPrimaryStorage(final CopyCommand cmd) {
path = destTempl.getUuid();
}
- if (path != null && !storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
- logger.warn("Failed to connect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName());
- return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
- }
+ if (primaryPool.getType() == StoragePoolType.CLVM_NG) {
+ logger.info("Copying template {} to CLVM_NG pool {}",
+ destTempl.getUuid(), primaryPool.getUuid());
+
+ try {
+ storagePoolMgr.createTemplateOnClvmNg(tmplVol.getPath(), path, cmd.getWaitInMillSeconds(), primaryPool);
+ primaryVol = primaryPool.getPhysicalDisk("template-" + path);
+ } catch (Exception e) {
+ logger.error("Failed to create CLVM_NG template: {}", e.getMessage(), e);
+ return new PrimaryStorageDownloadAnswer("Failed to create CLVM_NG template: " + e.getMessage());
+ }
+ } else {
+ if (path != null && !storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path, details)) {
+ logger.warn("Failed to connect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName());
+ return new PrimaryStorageDownloadAnswer("Failed to spool template disk at path: " + path + ", in storage pool id: " + primaryStore.getUuid());
+ }
- primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
+ primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, path != null ? path : destTempl.getUuid(), primaryPool, cmd.getWaitInMillSeconds());
- if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) {
- logger.warn("Failed to disconnect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName());
+ if (!storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), path)) {
+ logger.warn("Failed to disconnect physical disk at path: {}, in storage pool [id: {}, name: {}]", path, primaryStore.getUuid(), primaryStore.getName());
+ }
}
} else {
primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), primaryPool, cmd.getWaitInMillSeconds());
@@ -1115,7 +1148,14 @@ public Answer backupSnapshot(final CopyCommand cmd) {
}
} else {
final Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), logger);
- command.add("-b", isCreatedFromVmSnapshot ? snapshotDisk.getPath() : snapshot.getPath());
+ String backupPath;
+ if (primaryPool.getType() == StoragePoolType.CLVM || primaryPool.getType() == StoragePoolType.CLVM_NG) {
+ backupPath = snapshotDisk.getPath();
+ logger.debug("Using snapshotDisk path for CLVM/CLVM_NG backup: " + backupPath);
+ } else {
+ backupPath = isCreatedFromVmSnapshot ? snapshotDisk.getPath() : snapshot.getPath();
+ }
+ command.add("-b", backupPath);
command.add(NAME_OPTION, snapshotName);
command.add("-p", snapshotDestPath);
@@ -1160,6 +1200,90 @@ public Answer backupSnapshot(final CopyCommand cmd) {
}
}
+ /**
+ * Parse CLVM/CLVM_NG snapshot path and compute MD5 hash.
+ * Snapshot path format: /dev/vgname/volumeuuid/snapshotuuid
+ *
+ * @param snapshotPath The snapshot path from database
+ * @param poolType Storage pool type (for logging)
+ * @return Array of [vgName, volumeUuid, snapshotUuid, md5Hash] or null if invalid
+ */
+ private String[] parseClvmSnapshotPath(String snapshotPath, StoragePoolType poolType) {
+ String[] pathParts = snapshotPath.split("/");
+ if (pathParts.length < 5) {
+ logger.warn("Invalid {} snapshot path format: {}, expected format: /dev/vgname/volume-uuid/snapshot-uuid",
+ poolType, snapshotPath);
+ return null;
+ }
+
+ String vgName = pathParts[2];
+ String volumeUuid = pathParts[3];
+ String snapshotUuid = pathParts[4];
+
+ logger.info("Parsed {} snapshot path - VG: {}, Volume: {}, Snapshot: {}",
+ poolType, vgName, volumeUuid, snapshotUuid);
+
+ String md5Hash = computeMd5Hash(snapshotUuid);
+ logger.debug("Computed MD5 hash for snapshot UUID {}: {}", snapshotUuid, md5Hash);
+
+ return new String[]{vgName, volumeUuid, snapshotUuid, md5Hash};
+ }
+
+ /**
+ * Delete a CLVM or CLVM_NG snapshot using managesnapshot.sh script.
+ * For both CLVM and CLVM_NG, the snapshot path stored in DB is: /dev/vgname/volumeuuid/snapshotuuid
+ * The script handles MD5 transformation and pool-specific deletion commands internally:
+ * - CLVM: Uses lvremove to delete LVM snapshot
+ * - CLVM_NG: Uses qemu-img snapshot -d to delete QCOW2 internal snapshot
+ * This approach is consistent with snapshot creation and backup which also use the script.
+ *
+ * @param snapshotPath The snapshot path from database
+ * @param poolType Storage pool type (CLVM or CLVM_NG)
+ * @param checkExistence If true, checks if snapshot exists before cleanup (for explicit deletion)
+ * If false, always performs cleanup (for post-backup cleanup)
+ * @return true if cleanup was performed, false if snapshot didn't exist (when checkExistence=true)
+ */
+ private boolean deleteClvmSnapshot(String snapshotPath, StoragePoolType poolType, boolean checkExistence) {
+ logger.info("Starting {} snapshot deletion for path: {}, checkExistence: {}", poolType, snapshotPath, checkExistence);
+
+ try {
+ String[] parsed = parseClvmSnapshotPath(snapshotPath, poolType);
+ if (parsed == null) {
+ return false;
+ }
+
+ String vgName = parsed[0];
+ String volumeUuid = parsed[1];
+ String snapshotUuid = parsed[2];
+ String volumePath = "/dev/" + vgName + "/" + volumeUuid;
+
+ // Use managesnapshot.sh script for deletion (consistent with create/backup)
+ // Script handles MD5 transformation and pool-specific commands internally
+ Script deleteCommand = new Script(_manageSnapshotPath, 10000, logger);
+ deleteCommand.add("-d", volumePath);
+ deleteCommand.add("-n", snapshotUuid);
+
+ logger.info("Executing: managesnapshot.sh -d {} -n {}", volumePath, snapshotUuid);
+ String result = deleteCommand.execute();
+
+ if (result == null) {
+ logger.info("Successfully deleted {} snapshot: {}", poolType, snapshotPath);
+ return true;
+ } else {
+ if (checkExistence && result.contains("does not exist")) {
+ logger.info("{} snapshot {} already deleted, no cleanup needed", poolType, snapshotPath);
+ return true;
+ }
+ logger.warn("Failed to delete {} snapshot {}: {}", poolType, snapshotPath, result);
+ return false;
+ }
+
+ } catch (Exception ex) {
+ logger.error("Exception while deleting {} snapshot {}", poolType, snapshotPath, ex);
+ return false;
+ }
+ }
+
private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObjectTO snapshot,
KVMStoragePool primaryPool) {
String snapshotPath = snapshot.getPath();
@@ -1172,7 +1296,15 @@ private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObject
if ((backupSnapshotAfterTakingSnapshot == null || BooleanUtils.toBoolean(backupSnapshotAfterTakingSnapshot)) && deleteSnapshotOnPrimary) {
try {
- Files.deleteIfExists(Paths.get(snapshotPath));
+ if (primaryPool.getType() == StoragePoolType.CLVM || primaryPool.getType() == StoragePoolType.CLVM_NG) {
+ // Both CLVM and CLVM_NG use the same deletion method via managesnapshot.sh script
+ boolean cleanedUp = deleteClvmSnapshot(snapshotPath, primaryPool.getType(), false);
+ if (!cleanedUp) {
+ logger.info("No need to delete {} snapshot on primary as it doesn't exist: {}", primaryPool.getType(), snapshotPath);
+ }
+ } else {
+ Files.deleteIfExists(Paths.get(snapshotPath));
+ }
} catch (IOException ex) {
logger.error("Failed to delete snapshot [{}] on primary storage [{}].", snapshot.getId(), snapshot.getName(), ex);
}
@@ -1181,6 +1313,26 @@ private void deleteSnapshotOnPrimary(final CopyCommand cmd, final SnapshotObject
}
}
+
+ /**
+ * Compute MD5 hash of a string, matching what managesnapshot.sh does:
+ * echo "${snapshot}" | md5sum -t | awk '{ print $1 }'
+ */
+ private String computeMd5Hash(String input) {
+ try {
+ java.security.MessageDigest md = java.security.MessageDigest.getInstance("MD5");
+ byte[] array = md.digest((input + "\n").getBytes("UTF-8"));
+ StringBuilder sb = new StringBuilder();
+ for (byte b : array) {
+ sb.append(String.format("%02x", b));
+ }
+ return sb.toString();
+ } catch (Exception e) {
+ logger.error("Failed to compute MD5 hash for: {}", input, e);
+ return input;
+ }
+ }
+
protected synchronized void attachOrDetachISO(final Connect conn, final String vmName, String isoPath, final boolean isAttach, Map params, DataStoreTO store) throws
LibvirtException, InternalErrorException {
DiskDef iso = new DiskDef();
@@ -1520,6 +1672,10 @@ protected synchronized void attachOrDetachDisk(final Connect conn, final boolean
if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
diskdef.setDiskFormatType(DiskDef.DiskFmtType.QCOW2);
}
+ } else if (attachingPool.getType() == StoragePoolType.CLVM_NG) {
+ // CLVM_NG uses QCOW2 format on block devices
+ diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, busT);
+ diskdef.setDiskFormatType(DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, busT, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.RAW) {
@@ -1735,13 +1891,22 @@ public Answer createVolume(final CreateObjectCommand cmd) {
primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
disksize = volume.getSize();
PhysicalDiskFormat format;
- if (volume.getFormat() == null || StoragePoolType.RBD.equals(primaryStore.getPoolType())) {
+
+ MigrationOptions migrationOptions = volume.getMigrationOptions();
+ boolean useDstPoolFormat = useDestPoolFormat(migrationOptions, primaryStore);
+
+ if (volume.getFormat() == null || StoragePoolType.RBD.equals(primaryStore.getPoolType()) || useDstPoolFormat) {
format = primaryPool.getDefaultFormat();
+ if (useDstPoolFormat) {
+ logger.debug("Using destination pool default format {} for volume {} due to CLVM migration (src: {}, dst: {})",
+ format, volume.getUuid(),
+ migrationOptions != null ? migrationOptions.getSrcPoolType() : "unknown",
+ primaryStore.getPoolType());
+ }
} else {
format = PhysicalDiskFormat.valueOf(volume.getFormat().toString().toUpperCase());
}
- MigrationOptions migrationOptions = volume.getMigrationOptions();
if (migrationOptions != null) {
int timeout = migrationOptions.getTimeout();
@@ -1778,6 +1943,29 @@ public Answer createVolume(final CreateObjectCommand cmd) {
}
}
+ /**
+ * For migration involving CLVM (RAW format), use destination pool's default format
+ * CLVM uses RAW format which may not match destination pool's format (e.g., NFS uses QCOW2)
+ * This specifically handles:
+ * - CLVM (RAW) -> NFS/Local/CLVM_NG (QCOW2)
+ * - NFS/Local/CLVM_NG (QCOW2) -> CLVM (RAW)
+ * @param migrationOptions
+ * @param primaryStore
+ * @return
+ */
+ private boolean useDestPoolFormat(MigrationOptions migrationOptions, PrimaryDataStoreTO primaryStore) {
+ boolean useDstPoolFormat = false;
+ if (migrationOptions != null && migrationOptions.getSrcPoolType() != null) {
+ StoragePoolType srcPoolType = migrationOptions.getSrcPoolType();
+ StoragePoolType dstPoolType = primaryStore.getPoolType();
+
+ if (srcPoolType != dstPoolType) {
+ useDstPoolFormat = (srcPoolType == StoragePoolType.CLVM || dstPoolType == StoragePoolType.CLVM);
+ }
+ }
+ return useDstPoolFormat;
+ }
+
/**
* XML to take disk-only snapshot of the VM.
* 1st parameter: snapshot's name;
@@ -1842,8 +2030,10 @@ public Answer createSnapshot(final CreateObjectCommand cmd) {
}
}
- if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state) && volume.requiresEncryption()) {
- throw new CloudRuntimeException("VM is running, encrypted volume snapshots aren't supported");
+ if (DomainInfo.DomainState.VIR_DOMAIN_RUNNING.equals(state)) {
+ if (volume.requiresEncryption()) {
+ throw new CloudRuntimeException("VM is running, encrypted volume snapshots aren't supported");
+ }
}
KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
@@ -1867,10 +2057,22 @@ public Answer createSnapshot(final CreateObjectCommand cmd) {
if (snapshotSize != null) {
newSnapshot.setPhysicalSize(snapshotSize);
}
- } else if (primaryPool.getType() == StoragePoolType.CLVM) {
- CreateObjectAnswer result = takeClvmVolumeSnapshotOfStoppedVm(disk, snapshotName);
- if (result != null) return result;
- newSnapshot.setPath(snapshotPath);
+ } else if (primaryPool.getType() == StoragePoolType.CLVM || primaryPool.getType() == StoragePoolType.CLVM_NG) {
+ if (primaryPool.getType() == StoragePoolType.CLVM_NG && snapshotTO.isKvmIncrementalSnapshot()) {
+ if (secondaryPool == null) {
+ String errorMsg = String.format("Incremental snapshots for CLVM_NG require secondary storage. " +
+ "Please configure secondary storage or disable incremental snapshots for volume [%s].", volume.getName());
+ logger.error(errorMsg);
+ return new CreateObjectAnswer(errorMsg);
+ }
+ logger.info("Taking incremental snapshot of CLVM_NG volume [{}] using QCOW2 backup to secondary storage.", volume.getName());
+ newSnapshot = takeIncrementalVolumeSnapshotOfStoppedVm(snapshotTO, primaryPool, secondaryPool,
+ imageStoreTo.getUrl(), snapshotName, volume, conn, cmd.getWait());
+ } else {
+ CreateObjectAnswer result = takeClvmVolumeSnapshotOfStoppedVm(disk, snapshotName);
+ if (result != null) return result;
+ newSnapshot.setPath(snapshotPath);
+ }
} else {
if (snapshotTO.isKvmIncrementalSnapshot()) {
newSnapshot = takeIncrementalVolumeSnapshotOfStoppedVm(snapshotTO, primaryPool, secondaryPool, imageStoreTo != null ? imageStoreTo.getUrl() : null, snapshotName, volume, conn, cmd.getWait());
@@ -1943,7 +2145,11 @@ private String getVmXml(KVMStoragePool primaryPool, VolumeObjectTO volumeObjectT
String machine = resource.isGuestAarch64() ? LibvirtComputingResource.VIRT : LibvirtComputingResource.PC;
String cpuArch = resource.getGuestCpuArch() != null ? resource.getGuestCpuArch() : "x86_64";
- return String.format(DUMMY_VM_XML, vmName, cpuArch, machine, resource.getHypervisorPath(), primaryPool.getLocalPathFor(volumeObjectTo.getPath()));
+ String volumePath = primaryPool.getLocalPathFor(volumeObjectTo.getPath());
+ boolean isClvmNg = StoragePoolType.CLVM_NG == primaryPool.getType();
+
+ String xmlTemplate = isClvmNg ? DUMMY_VM_XML_BLOCK : DUMMY_VM_XML;
+ return String.format(xmlTemplate, vmName, cpuArch, machine, resource.getHypervisorPath(), volumePath);
}
private SnapshotObjectTO takeIncrementalVolumeSnapshotOfRunningVm(SnapshotObjectTO snapshotObjectTO, KVMStoragePool primaryPool, KVMStoragePool secondaryPool,
@@ -2647,11 +2853,13 @@ public Answer deleteVolume(final DeleteCommand cmd) {
final PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)vol.getDataStore();
try {
final KVMStoragePool pool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid());
- try {
- pool.getPhysicalDisk(vol.getPath());
- } catch (final Exception e) {
- logger.debug(String.format("can't find volume: %s, return true", vol));
- return new Answer(null);
+ if (pool.getType() != StoragePoolType.CLVM && pool.getType() != StoragePoolType.CLVM_NG) {
+ try {
+ pool.getPhysicalDisk(vol.getPath());
+ } catch (final Exception e) {
+ logger.debug(String.format("can't find volume: %s, return true", vol));
+ return new Answer(null);
+ }
}
pool.deletePhysicalDisk(vol.getPath(), vol.getFormat());
return new Answer(null);
@@ -2880,6 +3088,25 @@ public Answer deleteSnapshot(final DeleteCommand cmd) {
if (snapshotTO.isKvmIncrementalSnapshot()) {
deleteCheckpoint(snapshotTO);
}
+ } else if (primaryPool.getType() == StoragePoolType.CLVM || primaryPool.getType() == StoragePoolType.CLVM_NG) {
+ // For CLVM/CLVM_NG, snapshots are typically already deleted from primary storage during backup
+ // via deleteSnapshotOnPrimary in the backupSnapshot finally block.
+ // This is called when the user explicitly deletes the snapshot via UI/API.
+ // We check if the snapshot still exists and clean it up if needed.
+ logger.info("Processing CLVM/CLVM_NG snapshot deletion (id={}, name={}, path={}) on primary storage",
+ snapshotTO.getId(), snapshotTO.getName(), snapshotTO.getPath());
+
+ String snapshotPath = snapshotTO.getPath();
+ if (snapshotPath != null && !snapshotPath.isEmpty()) {
+ boolean wasDeleted = deleteClvmSnapshot(snapshotPath, primaryPool.getType(), true);
+ if (wasDeleted) {
+ logger.info("Successfully cleaned up {} snapshot {} from primary storage", primaryPool.getType(), snapshotName);
+ } else {
+ logger.info("{} snapshot {} was already deleted from primary storage during backup, no cleanup needed", primaryPool.getType(), snapshotName);
+ }
+ } else {
+ logger.debug("{} snapshot path is null or empty, assuming already cleaned up", primaryPool.getType());
+ }
} else {
logger.warn("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
throw new InternalErrorException("Operation not implemented for storage pool type of " + primaryPool.getType().toString());
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
index a03daeb197bf..b6dfa70d7606 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java
@@ -34,6 +34,9 @@
import com.cloud.agent.properties.AgentProperties;
import com.cloud.agent.properties.AgentPropertiesFileHandler;
+import com.cloud.utils.script.OutputInterpreter;
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.utils.cryptsetup.KeyFile;
import org.apache.cloudstack.utils.qemu.QemuImageOptions;
@@ -47,6 +50,7 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
+import org.joda.time.Duration;
import org.libvirt.Connect;
import org.libvirt.LibvirtException;
import org.libvirt.Secret;
@@ -64,6 +68,7 @@
import com.ceph.rbd.jna.RbdImageInfo;
import com.ceph.rbd.jna.RbdSnapInfo;
import com.cloud.exception.InternalErrorException;
+import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
import com.cloud.hypervisor.kvm.resource.LibvirtSecretDef;
import com.cloud.hypervisor.kvm.resource.LibvirtSecretDef.Usage;
@@ -231,6 +236,11 @@ private void createTemplateOnRBDFromDirectDownloadFile(String srcTemplateFilePat
}
public StorageVol getVolume(StoragePool pool, String volName) {
+ if (pool == null) {
+ logger.debug("LibVirt StoragePool is null (likely CLVM/CLVM_NG virtual pool), cannot lookup volume {} via libvirt", volName);
+ return null;
+ }
+
StorageVol vol = null;
try {
@@ -254,9 +264,12 @@ public StorageVol getVolume(StoragePool pool, String volName) {
try {
vol = pool.storageVolLookupByName(volName);
- logger.debug("Found volume " + volName + " in storage pool " + pool.getName() + " after refreshing the pool");
+ if (vol != null) {
+ logger.debug("Found volume " + volName + " in storage pool " + pool.getName() + " after refreshing the pool");
+ }
} catch (LibvirtException e) {
- throw new CloudRuntimeException("Could not find volume " + volName + ": " + e.getMessage());
+ logger.debug("Volume " + volName + " still not found after pool refresh: " + e.getMessage());
+ return null;
}
}
@@ -350,37 +363,144 @@ private StoragePool createSharedStoragePool(Connect conn, String uuid, String ho
}
private StoragePool createCLVMStoragePool(Connect conn, String uuid, String host, String path) {
-
String volgroupPath = "/dev/" + path;
String volgroupName = path;
- volgroupName = volgroupName.replaceFirst("/", "");
+ volgroupName = volgroupName.replaceFirst("^/", "");
+
+ Script checkVgExists = new Script("vgs", 5000, logger);
+ checkVgExists.add("--noheadings");
+ checkVgExists.add("-o", "vg_name");
+ checkVgExists.add(volgroupName);
+ String vgCheckResult = checkVgExists.execute();
+
+ if (vgCheckResult != null) {
+ logger.error("Volume group {} does not exist or is not accessible", volgroupName);
+ return null;
+ }
+
+ logger.info("Volume group {} verified, creating libvirt pool definition for CLVM/CLVM_NG", volgroupName);
+ LibvirtStoragePoolDef poolDef = new LibvirtStoragePoolDef(
+ LibvirtStoragePoolDef.PoolType.LOGICAL,
+ volgroupName,
+ uuid,
+ null,
+ volgroupName,
+ volgroupPath
+ );
- LibvirtStoragePoolDef spd = new LibvirtStoragePoolDef(PoolType.LOGICAL, volgroupName, uuid, host, volgroupPath, volgroupPath);
- StoragePool sp = null;
try {
- logger.debug(spd.toString());
- sp = conn.storagePoolCreateXML(spd.toString(), 0);
- return sp;
+ StoragePool pool = conn.storagePoolDefineXML(poolDef.toString(), 0);
+ logger.info("Created libvirt pool definition for CLVM/CLVM_NG VG: {} (pool will remain inactive, we use direct LVM access)", volgroupName);
+ pool.setAutostart(1);
+ return pool;
} catch (LibvirtException e) {
- logger.error(e.toString());
- if (sp != null) {
- try {
- if (sp.isPersistent() == 1) {
- sp.destroy();
- sp.undefine();
- } else {
- sp.destroy();
- }
- sp.free();
- } catch (LibvirtException l) {
- logger.debug("Failed to define clvm storage pool with: " + l.toString());
- }
- }
+ logger.warn("Failed to define CLVM/CLVM_NG pool in libvirt: {}", e.getMessage());
return null;
}
+ }
+
+ /**
+ * Set pool capacity statistics from VG stats array.
+ * Extracts capacity and available from VG stats and calculates used space.
+ *
+ * @param pool The storage pool to update
+ * @param vgStats Array containing [capacity, available] in bytes from getVgStats()
+ * @param vgName The VG name for logging purposes
+ */
+ private void setPoolCapacityFromVgStats(LibvirtStoragePool pool, long[] vgStats, String vgName) {
+ long capacity = vgStats[0];
+ long available = vgStats[1];
+ long used = capacity - available;
+
+ pool.setCapacity(capacity);
+ pool.setUsed(used);
+ pool.setAvailable(available);
+
+ logger.debug("CLVM/CLVM_NG pool {} - Capacity: {}, Used: {}, Available: {}",
+ vgName, toHumanReadableSize(capacity), toHumanReadableSize(used), toHumanReadableSize(available));
+ }
+
+ /**
+ * Get VG statistics (capacity and available) using direct LVM commands.
+ *
+ * @param vgName The volume group name
+ * @return long array [capacity, available] in bytes
+ * @throws CloudRuntimeException if VG statistics cannot be retrieved or parsed
+ */
+ private long[] getVgStats(String vgName) {
+ Script getVgStats = new Script("vgs", 5000, logger);
+ getVgStats.add("--noheadings");
+ getVgStats.add("--units", "b");
+ getVgStats.add("--nosuffix");
+ getVgStats.add("-o", "vg_size,vg_free");
+ getVgStats.add(vgName);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String result = getVgStats.execute(parser);
+
+ if (result != null) {
+ String errorMsg = "Failed to get statistics for volume group " + vgName + ": " + result;
+ logger.error(errorMsg);
+ throw new CloudRuntimeException(errorMsg);
+ }
+
+ String output = parser.getLines().trim();
+ String[] lines = output.split("\\n");
+ String dataLine = null;
+
+ for (String line : lines) {
+ line = line.trim();
+ if (!line.isEmpty() && Character.isDigit(line.charAt(0))) {
+ dataLine = line;
+ break;
+ }
+ }
+
+ if (dataLine == null) {
+ String errorMsg = "No valid data line found in vgs output for " + vgName + ": " + output;
+ logger.error(errorMsg);
+ throw new CloudRuntimeException(errorMsg);
+ }
+
+ String[] stats = dataLine.split("\\s+");
+
+ if (stats.length < 2) {
+ String errorMsg = "Unexpected output from vgs command for " + vgName + ": " + dataLine;
+ logger.error(errorMsg);
+ throw new CloudRuntimeException(errorMsg);
+ }
+
+ try {
+ long capacity = Long.parseLong(stats[0].trim());
+ long available = Long.parseLong(stats[1].trim());
+ return new long[]{capacity, available};
+ } catch (NumberFormatException e) {
+ String errorMsg = "Failed to parse VG statistics for " + vgName + ": " + e.getMessage();
+ logger.error(errorMsg);
+ throw new CloudRuntimeException(errorMsg, e);
+ }
+ }
+
+ private KVMStoragePool createVirtualClvmPool(String uuid, String host, String path, StoragePoolType type, Map details) {
+ String volgroupName = path.replaceFirst("^/", "");
+ String volgroupPath = "/dev/" + volgroupName;
+
+ logger.info("Creating virtual CLVM/CLVM_NG pool {} without libvirt using direct LVM access", volgroupName);
+
+ long[] vgStats = getVgStats(volgroupName);
+
+ LibvirtStoragePool pool = new LibvirtStoragePool(uuid, volgroupName, type, this, null);
+ pool.setLocalPath(volgroupPath);
+ setPoolCapacityFromVgStats(pool, vgStats, volgroupName);
+
+ if (details != null) {
+ pool.setDetails(details);
+ }
+ return pool;
}
+
private List getNFSMountOptsFromDetails(StoragePoolType type, Map details) {
List nfsMountOpts = null;
if (!type.equals(StoragePoolType.NetworkFilesystem) || details == null) {
@@ -580,14 +700,11 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
Connect conn = LibvirtConnection.getConnection();
storage = conn.storagePoolLookupByUUIDString(uuid);
- if (storage.getInfo().state != StoragePoolState.VIR_STORAGE_POOL_RUNNING) {
- logger.warn("Storage pool " + uuid + " is not in running state. Attempting to start it.");
- storage.create(0);
- }
LibvirtStoragePoolDef spd = getStoragePoolDef(conn, storage);
if (spd == null) {
throw new CloudRuntimeException("Unable to parse the storage pool definition for storage pool " + uuid);
}
+
StoragePoolType type = null;
if (spd.getPoolType() == LibvirtStoragePoolDef.PoolType.NETFS) {
type = StoragePoolType.NetworkFilesystem;
@@ -603,6 +720,12 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
type = StoragePoolType.PowerFlex;
}
+ // Skip pool activation for CLVM/CLVM_NG - we keep them inactive and use direct LVM commands
+ if (storage.getInfo().state != StoragePoolState.VIR_STORAGE_POOL_RUNNING && type != StoragePoolType.CLVM && type != StoragePoolType.CLVM_NG) {
+ logger.warn("Storage pool " + uuid + " is not in running state. Attempting to start it.");
+ storage.create(0);
+ }
+
LibvirtStoragePool pool = new LibvirtStoragePool(uuid, storage.getName(), type, this, storage);
if (pool.getType() != StoragePoolType.RBD && pool.getType() != StoragePoolType.PowerFlex)
@@ -640,15 +763,31 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
logger.info("Asking libvirt to refresh storage pool " + uuid);
pool.refresh();
}
- pool.setCapacity(storage.getInfo().capacity);
- pool.setUsed(storage.getInfo().allocation);
- updateLocalPoolIops(pool);
- pool.setAvailable(storage.getInfo().available);
- logger.debug("Successfully refreshed pool " + uuid +
- " Capacity: " + toHumanReadableSize(storage.getInfo().capacity) +
- " Used: " + toHumanReadableSize(storage.getInfo().allocation) +
- " Available: " + toHumanReadableSize(storage.getInfo().available));
+ if (type == StoragePoolType.CLVM || type == StoragePoolType.CLVM_NG) {
+ logger.debug("Getting capacity for CLVM/CLVM_NG pool {} using direct LVM commands", uuid);
+ String vgName = storage.getName();
+ try {
+ long[] vgStats = getVgStats(vgName);
+ setPoolCapacityFromVgStats(pool, vgStats, vgName);
+ } catch (CloudRuntimeException e) {
+ logger.warn("Failed to get VG stats for CLVM/CLVM_NG pool {}: {}. Using libvirt values (may be 0)", vgName, e.getMessage());
+ pool.setCapacity(storage.getInfo().capacity);
+ pool.setUsed(storage.getInfo().allocation);
+ pool.setAvailable(storage.getInfo().available);
+ }
+ } else {
+ pool.setCapacity(storage.getInfo().capacity);
+ pool.setUsed(storage.getInfo().allocation);
+ pool.setAvailable(storage.getInfo().available);
+
+ logger.debug("Successfully refreshed pool {} Capacity: {} Used: {} Available: {}",
+ uuid, toHumanReadableSize(storage.getInfo().capacity),
+ toHumanReadableSize(storage.getInfo().allocation),
+ toHumanReadableSize(storage.getInfo().available));
+ }
+
+ updateLocalPoolIops(pool);
return pool;
} catch (LibvirtException e) {
@@ -660,9 +799,25 @@ public KVMStoragePool getStoragePool(String uuid, boolean refreshInfo) {
@Override
public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
LibvirtStoragePool libvirtPool = (LibvirtStoragePool)pool;
+ boolean isClvmPool = (pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG);
+
+ // For CLVM pools without libvirt backing, use direct block device access immediately
+ if (isClvmPool && libvirtPool.getPool() == null) {
+ logger.debug("CLVM/CLVM_NG pool has no libvirt backing, using direct block device access for volume: {}", volumeUuid);
+ return getPhysicalDiskViaDirectBlockDevice(volumeUuid, pool);
+ }
try {
StorageVol vol = getVolume(libvirtPool.getPool(), volumeUuid);
+ if (vol == null) {
+ logger.debug("Volume " + volumeUuid + " not found in libvirt, will check for CLVM/CLVM_NG fallback");
+ if (isClvmPool) {
+ return getPhysicalDiskWithClvmFallback(volumeUuid, pool, libvirtPool);
+ }
+
+ throw new CloudRuntimeException("Volume " + volumeUuid + " not found in libvirt pool");
+ }
+
KVMPhysicalDisk disk;
LibvirtStorageVolumeDef voldef = getStorageVolumeDef(libvirtPool.getPool().getConnect(), vol);
disk = new KVMPhysicalDisk(vol.getPath(), vol.getName(), pool);
@@ -693,11 +848,248 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
}
return disk;
} catch (LibvirtException e) {
- logger.debug("Failed to get physical disk:", e);
+ logger.debug("Failed to get volume from libvirt: " + e.getMessage());
+ if (isClvmPool) {
+ return getPhysicalDiskWithClvmFallback(volumeUuid, pool, libvirtPool);
+ }
+
throw new CloudRuntimeException(e.toString());
}
}
+ /**
+ * CLVM fallback: First tries to refresh libvirt pool to make volume visible,
+ * if that fails, accesses volume directly via block device path.
+ */
+ private KVMPhysicalDisk getPhysicalDiskWithClvmFallback(String volumeUuid, KVMStoragePool pool, LibvirtStoragePool libvirtPool) {
+ logger.info("CLVM volume not visible to libvirt, attempting pool refresh for volume: {}", volumeUuid);
+
+ try {
+ logger.debug("Refreshing libvirt storage pool: {}", pool.getUuid());
+ libvirtPool.getPool().refresh(0);
+
+ StorageVol vol = getVolume(libvirtPool.getPool(), volumeUuid);
+ if (vol != null) {
+ logger.info("Volume found after pool refresh: {}", volumeUuid);
+ KVMPhysicalDisk disk;
+ LibvirtStorageVolumeDef voldef = getStorageVolumeDef(libvirtPool.getPool().getConnect(), vol);
+ disk = new KVMPhysicalDisk(vol.getPath(), vol.getName(), pool);
+ disk.setSize(vol.getInfo().allocation);
+ disk.setVirtualSize(vol.getInfo().capacity);
+ disk.setFormat(voldef.getFormat() == LibvirtStorageVolumeDef.VolumeFormat.QCOW2 ?
+ PhysicalDiskFormat.QCOW2 : PhysicalDiskFormat.RAW);
+ return disk;
+ }
+ } catch (LibvirtException refreshEx) {
+ logger.debug("Pool refresh failed or volume still not found: {}", refreshEx.getMessage());
+ }
+
+ logger.info("Falling back to direct block device access for volume: {}", volumeUuid);
+ return getPhysicalDiskViaDirectBlockDevice(volumeUuid, pool);
+ }
+
+ private String getVgName(String sourceDir) {
+ String vgName = sourceDir;
+ if (vgName.startsWith("/")) {
+ String[] parts = vgName.split("/");
+ List tokens = Arrays.stream(parts)
+ .filter(s -> !s.isEmpty()).collect(Collectors.toList());
+
+ vgName = tokens.size() > 1 ? tokens.get(1)
+ : tokens.size() == 1 ? tokens.get(0)
+ : "";
+ }
+ return vgName;
+ }
+
+ /**
+ * For CLVM volumes that exist in LVM but are not visible to libvirt,
+ * access them directly via block device path.
+ */
+ private KVMPhysicalDisk getPhysicalDiskViaDirectBlockDevice(String volumeUuid, KVMStoragePool pool) {
+ try {
+ String vgName = extractVgNameFromPool(pool);
+ verifyLvExistsInVg(volumeUuid, vgName);
+
+ logger.info("Volume {} exists in LVM but not visible to libvirt, accessing directly", volumeUuid);
+
+ String lvPath = findAccessibleDeviceNode(volumeUuid, vgName, pool);
+ long size = getClvmVolumeSize(lvPath);
+
+ KVMPhysicalDisk disk = createPhysicalDiskFromClvmLv(lvPath, volumeUuid, pool, size);
+
+ ensureTemplateAccessibility(volumeUuid, lvPath, pool);
+
+ return disk;
+
+ } catch (CloudRuntimeException ex) {
+ throw ex;
+ } catch (Exception ex) {
+ logger.error("Failed to access CLVM volume via direct block device: {}", volumeUuid, ex);
+ throw new CloudRuntimeException(String.format("Could not find volume %s: %s ", volumeUuid, ex.getMessage()));
+ }
+ }
+
+ private String extractVgNameFromPool(KVMStoragePool pool) {
+ String sourceDir = pool.getLocalPath();
+ if (sourceDir == null || sourceDir.isEmpty()) {
+ throw new CloudRuntimeException("CLVM pool sourceDir is not set, cannot determine VG name");
+ }
+ String vgName = getVgName(sourceDir);
+ logger.debug("Using VG name: {} (from sourceDir: {})", vgName, sourceDir);
+ return vgName;
+ }
+
+ private void verifyLvExistsInVg(String volumeUuid, String vgName) {
+ logger.debug("Checking if volume {} exists in VG {}", volumeUuid, vgName);
+ Script checkLvCmd = new Script("/usr/sbin/lvs", 5000, logger);
+ checkLvCmd.add("--noheadings");
+ checkLvCmd.add("--unbuffered");
+ checkLvCmd.add(vgName + "/" + volumeUuid);
+
+ String checkResult = checkLvCmd.execute();
+ if (checkResult != null) {
+ logger.debug("Volume {} does not exist in VG {}: {}", volumeUuid, vgName, checkResult);
+ throw new CloudRuntimeException(String.format("Storage volume not found: no storage vol with matching name '%s'", volumeUuid));
+ }
+ }
+
+ private String findAccessibleDeviceNode(String volumeUuid, String vgName, KVMStoragePool pool) {
+ String lvPath = "/dev/" + vgName + "/" + volumeUuid;
+ File lvDevice = new File(lvPath);
+
+ if (!lvDevice.exists()) {
+ lvPath = tryDeviceMapperPath(volumeUuid, vgName, lvDevice);
+
+ if (!lvDevice.exists()) {
+ lvPath = handleMissingDeviceNode(volumeUuid, vgName, pool);
+ }
+ }
+
+ return lvPath;
+ }
+
+ private String tryDeviceMapperPath(String volumeUuid, String vgName, File lvDevice) {
+ String vgNameEscaped = vgName.replace("-", "--");
+ String volumeUuidEscaped = volumeUuid.replace("-", "--");
+ String mapperPath = "/dev/mapper/" + vgNameEscaped + "-" + volumeUuidEscaped;
+ File mapperDevice = new File(mapperPath);
+
+ if (!mapperDevice.exists()) {
+ lvDevice = mapperDevice;
+ }
+
+ return mapperPath;
+ }
+
+ private String handleMissingDeviceNode(String volumeUuid, String vgName, KVMStoragePool pool) {
+ if (pool.getType() == StoragePoolType.CLVM_NG && volumeUuid.startsWith("template-")) {
+ return activateTemplateAndGetPath(volumeUuid, vgName);
+ } else {
+ logger.warn("Volume exists in LVM but device node not found: {}", volumeUuid);
+ throw new CloudRuntimeException(String.format("Could not find volume %s " +
+ "in VG %s - volume exists in LVM but device node not accessible", volumeUuid, vgName));
+ }
+ }
+
+ private String activateTemplateAndGetPath(String volumeUuid, String vgName) {
+ logger.info("Template volume {} device node not found. Attempting to activate in shared mode.", volumeUuid);
+ String templateLvPath = "/dev/" + vgName + "/" + volumeUuid;
+
+ try {
+ ensureTemplateLvInSharedMode(templateLvPath, false);
+
+ String lvPath = findDeviceNodeAfterActivation(templateLvPath, volumeUuid, vgName);
+
+ logger.info("Successfully activated template volume {} at {}", volumeUuid, lvPath);
+ return lvPath;
+
+ } catch (CloudRuntimeException e) {
+ throw new CloudRuntimeException(String.format("Failed to activate template volume %s " +
+ "in VG %s: %s", volumeUuid, vgName, e.getMessage()), e);
+ }
+ }
+
+ private String findDeviceNodeAfterActivation(String templateLvPath, String volumeUuid, String vgName) {
+ File lvDevice = new File(templateLvPath);
+ String lvPath = templateLvPath;
+
+ if (!lvDevice.exists()) {
+ String vgNameEscaped = vgName.replace("-", "--");
+ String volumeUuidEscaped = volumeUuid.replace("-", "--");
+ lvPath = "/dev/mapper/" + vgNameEscaped + "-" + volumeUuidEscaped;
+ lvDevice = new File(lvPath);
+ }
+
+ if (!lvDevice.exists()) {
+ logger.error("Template volume {} still not accessible after activation attempt", volumeUuid);
+ throw new CloudRuntimeException(String.format("Could not activate template volume %s " +
+ "in VG %s - device node not accessible even after activation", volumeUuid, vgName));
+ }
+
+ return lvPath;
+ }
+
+ private long getClvmVolumeSize(String lvPath) {
+ try {
+ Script lvsCmd = new Script("/usr/sbin/lvs", 5000, logger);
+ lvsCmd.add("--noheadings");
+ lvsCmd.add("--units");
+ lvsCmd.add("b");
+ lvsCmd.add("-o");
+ lvsCmd.add("lv_size");
+ lvsCmd.add(lvPath);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String result = lvsCmd.execute(parser);
+
+ String output = (result == null) ? parser.getLines() : result;
+
+ if (output != null && !output.isEmpty()) {
+ String sizeStr = output.trim().replaceAll("[^0-9]", "");
+ if (!sizeStr.isEmpty()) {
+ return Long.parseLong(sizeStr);
+ }
+ }
+ } catch (Exception sizeEx) {
+ logger.warn("Failed to get size for CLVM volume via lvs: {}", sizeEx.getMessage());
+ File lvDevice = new File(lvPath);
+ if (lvDevice.isFile()) {
+ return lvDevice.length();
+ }
+ }
+ return 0;
+ }
+
+ private KVMPhysicalDisk createPhysicalDiskFromClvmLv(String lvPath, String volumeUuid,
+ KVMStoragePool pool, long size) {
+ KVMPhysicalDisk disk = new KVMPhysicalDisk(lvPath, volumeUuid, pool);
+
+ PhysicalDiskFormat diskFormat = (pool.getType() == StoragePoolType.CLVM_NG)
+ ? PhysicalDiskFormat.QCOW2
+ : PhysicalDiskFormat.RAW;
+
+ logger.debug("{} pool detected, setting disk format to {} for volume {}",
+ pool.getType(), diskFormat, volumeUuid);
+
+ disk.setFormat(diskFormat);
+ disk.setSize(size);
+ disk.setVirtualSize(size);
+
+ logger.info("Successfully accessed CLVM/CLVM_NG volume via direct block device: {} " +
+ "with format: {} and size: {} bytes", lvPath, diskFormat, size);
+
+ return disk;
+ }
+
+ private void ensureTemplateAccessibility(String volumeUuid, String lvPath, KVMStoragePool pool) {
+ if (pool.getType() == StoragePoolType.CLVM_NG && volumeUuid.startsWith("template-")) {
+ logger.info("Detected template volume {}. Ensuring it's activated in shared mode for backing file access.",
+ volumeUuid);
+ ensureTemplateLvInSharedMode(lvPath, false);
+ }
+ }
+
/**
* adjust refcount
*/
@@ -814,7 +1206,7 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri
try {
sp = createNetfsStoragePool(PoolType.NETFS, conn, name, host, path, nfsMountOpts);
} catch (LibvirtException e) {
- logger.error("Failed to create netfs mount: " + host + ":" + path , e);
+ logger.error("Failed to create netfs mount: " + host + ":" + path, e);
logger.error(e.getStackTrace());
throw new CloudRuntimeException(e.toString());
}
@@ -822,7 +1214,7 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri
try {
sp = createNetfsStoragePool(PoolType.GLUSTERFS, conn, name, host, path, null);
} catch (LibvirtException e) {
- logger.error("Failed to create glusterfs mount: " + host + ":" + path , e);
+ logger.error("Failed to create glusterlvm_fs mount: " + host + ":" + path, e);
logger.error(e.getStackTrace());
throw new CloudRuntimeException(e.toString());
}
@@ -830,8 +1222,12 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri
sp = createSharedStoragePool(conn, name, host, path);
} else if (type == StoragePoolType.RBD) {
sp = createRBDStoragePool(conn, name, host, port, userInfo, path);
- } else if (type == StoragePoolType.CLVM) {
+ } else if (type == StoragePoolType.CLVM || type == StoragePoolType.CLVM_NG) {
sp = createCLVMStoragePool(conn, name, host, path);
+ if (sp == null) {
+ logger.info("Falling back to virtual CLVM/CLVM_NG pool without libvirt for: {}", name);
+ return createVirtualClvmPool(name, host, path, type, details);
+ }
}
}
@@ -845,8 +1241,7 @@ public KVMStoragePool createStoragePool(String name, String host, int port, Stri
// to be always mounted, as long the primary storage isn't fully deleted.
incStoragePoolRefCount(name);
}
-
- if (sp.isActive() == 0) {
+ if (sp.isActive() == 0 && type != StoragePoolType.CLVM && type != StoragePoolType.CLVM_NG) {
logger.debug("Attempting to activate pool " + name);
sp.create(0);
}
@@ -1010,6 +1405,10 @@ public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool,
return (dataPool == null) ? createPhysicalDiskByLibVirt(name, pool, PhysicalDiskFormat.RAW, provisioningType, size) :
createPhysicalDiskByQemuImg(name, pool, PhysicalDiskFormat.RAW, provisioningType, size, passphrase);
+ } else if (StoragePoolType.CLVM_NG.equals(poolType)) {
+ return createClvmNgDiskWithBacking(name, 0, size, null, pool, provisioningType);
+ } else if (StoragePoolType.CLVM.equals(poolType)) {
+ return createClvmVolume(name, size, pool);
} else if (StoragePoolType.NetworkFilesystem.equals(poolType) || StoragePoolType.Filesystem.equals(poolType)) {
switch (format) {
case QCOW2:
@@ -1116,9 +1515,84 @@ private KVMPhysicalDisk createPhysicalDiskByQemuImg(String name, KVMStoragePool
@Override
public boolean connectPhysicalDisk(String name, KVMStoragePool pool, Map details, boolean isVMMigrate) {
// this is for managed storage that needs to prep disks prior to use
+ if ((pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG) && isVMMigrate) {
+ logger.info("Activating CLVM/CLVM_NG volume {} at location: {} in shared mode for VM migration", name, pool.getLocalPath() + File.separator + name);
+ Script activateVolInSharedMode = new Script("lvchange", 5000, logger);
+ activateVolInSharedMode.add("-asy");
+ activateVolInSharedMode.add(pool.getLocalPath() + File.separator + name);
+ String result = activateVolInSharedMode.execute();
+ if (result != null) {
+ logger.error("Failed to activate CLVM/CLVM_NG volume {} in shared mode for VM migration. Command output: {}", name, result);
+ return false;
+ }
+ }
+
+ if (pool.getType() == StoragePoolType.CLVM_NG) {
+ ensureClvmNgBackingFileAccessible(name, pool);
+ }
+
return true;
}
+ /**
+ * Checks if a CLVM_NG QCOW2 volume has a backing file (template) and ensures it's activated in shared mode.
+ * This is critical for multi-host deployments where VMs on different hosts need to access the same template.
+ * Called during VM deployment to ensure template backing files are accessible on the current host.
+ *
+ * @param volumeName The name of the volume (e.g., volume-uuid)
+ * @param pool The CLVM_NG storage pool
+ */
+ private void ensureClvmNgBackingFileAccessible(String volumeName, KVMStoragePool pool) {
+ try {
+ String vgName = getVgName(pool.getLocalPath());
+ String volumePath = "/dev/" + vgName + "/" + volumeName;
+
+ logger.debug("Checking if CLVM_NG volume {} has a backing file that needs activation", volumePath);
+
+ Script qemuImgInfo = new Script("qemu-img", Duration.millis(10000), logger);
+ qemuImgInfo.add("info");
+ qemuImgInfo.add("--output=json");
+ qemuImgInfo.add(volumePath);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String result = qemuImgInfo.execute(parser);
+
+ if (result == null && parser.getLines() != null && !parser.getLines().isEmpty()) {
+ String jsonOutput = parser.getLines();
+
+ if (jsonOutput.contains("\"backing-filename\"")) {
+ int backingStart = jsonOutput.indexOf("\"backing-filename\"");
+ if (backingStart > 0) {
+ int valueStart = jsonOutput.indexOf(":", backingStart);
+ if (valueStart > 0) {
+ valueStart = jsonOutput.indexOf("\"", valueStart) + 1;
+ int valueEnd = jsonOutput.indexOf("\"", valueStart);
+
+ if (valueEnd > valueStart) {
+ String backingFile = jsonOutput.substring(valueStart, valueEnd).trim();
+
+ if (!backingFile.isEmpty() && backingFile.startsWith("/dev/")) {
+ logger.info("Volume {} has backing file: {}. Ensuring backing file is in shared mode on this host.",
+ volumePath, backingFile);
+ ensureTemplateLvInSharedMode(backingFile, false);
+ } else {
+ logger.debug("Volume {} has backing file but not a block device path: {}", volumePath, backingFile);
+ }
+ }
+ }
+ } else {
+ logger.debug("Volume {} does not have a backing file (full clone)", volumePath);
+ }
+ } else {
+ logger.debug("Volume {} does not have a backing file (full clone)", volumePath);
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("Failed to check/activate backing file for volume {}: {}. VM deployment may fail if template is not accessible.",
+ volumeName, e.getMessage());
+ }
+ }
+
@Override
public boolean disconnectPhysicalDisk(String uuid, KVMStoragePool pool) {
// this is for managed storage that needs to cleanup disks after use
@@ -1224,10 +1698,21 @@ public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.Imag
}
}
+ // For CLVM/CLVM_NG pools, always use direct LVM cleanup to ensure secure zero-fill
+ if (pool.getType() == StoragePoolType.CLVM || pool.getType() == StoragePoolType.CLVM_NG) {
+ logger.info("CLVM/CLVM_NG pool detected - using direct LVM cleanup with secure zero-fill for volume {}", uuid);
+ return cleanupCLVMVolume(uuid, pool);
+ }
+
+ // For non-CLVM pools, use libvirt deletion
LibvirtStoragePool libvirtPool = (LibvirtStoragePool)pool;
try {
StorageVol vol = getVolume(libvirtPool.getPool(), uuid);
- logger.debug("Instructing libvirt to remove volume " + uuid + " from pool " + pool.getUuid());
+ if (vol == null) {
+ logger.warn("Volume {} not found in libvirt pool {}, it may have been already deleted", uuid, pool.getUuid());
+ return true;
+ }
+ logger.debug("Instructing libvirt to remove volume {} from pool {}", uuid, pool.getUuid());
if(Storage.ImageFormat.DIR.equals(format)){
deleteDirVol(libvirtPool, vol);
} else {
@@ -1240,6 +1725,86 @@ public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool, Storage.Imag
}
}
+ private boolean shouldSecureZeroFill(KVMStoragePool pool) {
+ Map details = pool.getDetails();
+ String secureZeroFillStr = (details != null) ? details.get(KVMStoragePool.CLVM_SECURE_ZERO_FILL) : null;
+ return Boolean.parseBoolean(secureZeroFillStr);
+ }
+
+ /**
+ * Clean up CLVM volume and its snapshots directly using LVM commands.
+ * This is used as a fallback when libvirt cannot find or delete the volume.
+ */
+ private boolean cleanupCLVMVolume(String uuid, KVMStoragePool pool) {
+ logger.info("Starting direct LVM cleanup for CLVM volume: {} in pool: {}", uuid, pool.getUuid());
+
+ try {
+ String sourceDir = pool.getLocalPath();
+ if (sourceDir == null || sourceDir.isEmpty()) {
+ logger.debug("Source directory is null or empty, cannot determine VG name for CLVM pool {}, skipping direct cleanup", pool.getUuid());
+ return true;
+ }
+ String vgName = getVgName(sourceDir);
+ logger.info("Determined VG name: {} for pool: {}", vgName, pool.getUuid());
+
+ if (vgName == null || vgName.isEmpty()) {
+ logger.warn("Cannot determine VG name for CLVM pool {}, skipping direct cleanup", pool.getUuid());
+ return true;
+ }
+
+ String lvPath = "/dev/" + vgName + "/" + uuid;
+ logger.debug("Volume path: {}", lvPath);
+
+ // Check if the LV exists
+ Script checkLvs = new Script("lvs", 5000, logger);
+ checkLvs.add("--noheadings");
+ checkLvs.add("--unbuffered");
+ checkLvs.add(lvPath);
+
+ logger.info("Checking if volume exists: lvs --noheadings --unbuffered {}", lvPath);
+ String checkResult = checkLvs.execute();
+
+ if (checkResult != null) {
+ logger.info("CLVM volume {} does not exist in LVM (check returned: {}), considering it as already deleted", uuid, checkResult);
+ return true;
+ }
+
+ logger.info("Volume {} exists, proceeding with cleanup", uuid);
+
+ boolean secureZeroFillEnabled = shouldSecureZeroFill(pool);
+
+ if (secureZeroFillEnabled) {
+ logger.info("Step 1: Zero-filling volume {} for security", uuid);
+ secureZeroFillVolume(lvPath, uuid);
+ } else {
+ logger.info("Secure zero-fill is disabled, skipping zero-filling for volume {}", uuid);
+ }
+
+ logger.info("Step 2: Removing volume {}", uuid);
+ Script removeLv = new Script("lvremove", 10000, logger);
+ removeLv.add("-f");
+ removeLv.add(lvPath);
+
+ logger.info("Executing command: lvremove -f {}", lvPath);
+ String removeResult = removeLv.execute();
+
+ if (removeResult == null) {
+ logger.info("Successfully removed CLVM volume {} using direct LVM cleanup", uuid);
+ return true;
+ } else {
+ logger.warn("Command 'lvremove -f {}' returned error: {}", lvPath, removeResult);
+ if (removeResult.contains("not found") || removeResult.contains("Failed to find")) {
+ logger.info("CLVM volume {} not found during cleanup, considering it as already deleted", uuid);
+ return true;
+ }
+ return false;
+ }
+ } catch (Exception ex) {
+ logger.error("Exception during CLVM volume cleanup for {}: {}", uuid, ex.getMessage(), ex);
+ return true;
+ }
+ }
+
/**
* This function copies a physical disk from Secondary Storage to Primary Storage
* or from Primary to Primary Storage
@@ -1263,6 +1828,13 @@ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template,
} else {
try (KeyFile keyFile = new KeyFile(passphrase)){
String newUuid = name;
+ if (destPool.getType() == StoragePoolType.CLVM_NG && format == PhysicalDiskFormat.QCOW2) {
+ logger.info("Creating CLVM_NG volume {} with backing file from template {}", newUuid, template.getName());
+ String backingFile = getClvmBackingFile(template, destPool);
+
+ disk = createClvmNgDiskWithBacking(newUuid, timeout, size, backingFile, destPool, provisioningType);
+ return disk;
+ }
List passphraseObjects = new ArrayList<>();
disk = destPool.createPhysicalDisk(newUuid, format, provisioningType, template.getVirtualSize(), passphrase);
if (disk == null) {
@@ -1338,6 +1910,78 @@ public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template,
return disk;
}
+ private String getClvmBackingFile(KVMPhysicalDisk template, KVMStoragePool destPool) {
+ String templateLvName = template.getName();
+ KVMPhysicalDisk templateOnPrimary = null;
+
+ try {
+ templateOnPrimary = destPool.getPhysicalDisk(templateLvName);
+ } catch (CloudRuntimeException e) {
+ logger.warn("Template {} not found on CLVM_NG pool {}.", templateLvName, destPool.getUuid());
+ }
+
+ String backingFile;
+ if (templateOnPrimary != null) {
+ backingFile = templateOnPrimary.getPath();
+ logger.info("Using template on primary storage as backing file: {}", backingFile);
+
+ ensureTemplateLvInSharedMode(backingFile);
+ } else {
+ logger.error("Template {} should be on primary storage before creating volumes from it", templateLvName);
+ throw new CloudRuntimeException(String.format("Template not found on CLVM_NG primary storage: {}." +
+ "Template must be copied to primary storage first.", templateLvName));
+ }
+ return backingFile;
+ }
+
+ /**
+ * Ensures a template LV is activated in shared mode so multiple VMs can use it as a backing file.
+ *
+ * @param templatePath The full path to the template LV (e.g., /dev/vgname/template-uuid)
+ * @param throwOnFailure If true, throws CloudRuntimeException on failure; if false, logs warning and continues
+ */
+ private void ensureTemplateLvInSharedMode(String templatePath, boolean throwOnFailure) {
+ try {
+ Script checkLvs = new Script("lvs", Duration.millis(5000), logger);
+ checkLvs.add("--noheadings");
+ checkLvs.add("-o", "lv_attr");
+ checkLvs.add(templatePath);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String result = checkLvs.execute(parser);
+
+ if (result == null && parser.getLines() != null && !parser.getLines().isEmpty()) {
+ String lvAttr = parser.getLines().trim();
+ if (lvAttr.length() >= 6) {
+ boolean isActive = (lvAttr.indexOf('a') >= 0);
+ boolean isShared = (lvAttr.indexOf('s') >= 0);
+
+ if (!isShared || !isActive) {
+ logger.info("Template LV {} is not in shared mode (attr: {}).",
+ templatePath, lvAttr);
+ logger.info("Activating template LV {} in shared mode", templatePath);
+ LibvirtComputingResource.setClvmVolumeToSharedMode(templatePath);
+ } else {
+ logger.debug("Template LV {} is already in shared mode (attr: {})", templatePath, lvAttr);
+ }
+ }
+ }
+ } catch (CloudRuntimeException e) {
+ throw e;
+ } catch (Exception e) {
+ String errorMsg = "Failed to check/ensure template LV shared mode for " + templatePath + ": " + e.getMessage();
+ if (throwOnFailure) {
+ throw new CloudRuntimeException(errorMsg, e);
+ } else {
+ logger.warn(errorMsg, e);
+ }
+ }
+ }
+
+ private void ensureTemplateLvInSharedMode(String templatePath) {
+ ensureTemplateLvInSharedMode(templatePath, false);
+ }
+
private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template,
String name, PhysicalDiskFormat format, Storage.ProvisioningType provisioningType, long size, KVMStoragePool destPool, int timeout){
@@ -1420,9 +2064,7 @@ private KVMPhysicalDisk createDiskFromTemplateOnRBD(KVMPhysicalDisk template,
rbd.close(destImage);
} else {
logger.debug("The source image " + srcPool.getSourceDir() + "/" + template.getName()
- + " is RBD format 2. We will perform a RBD clone using snapshot "
- + rbdTemplateSnapName);
- /* The source image is format 2, we can do a RBD snapshot+clone (layering) */
+ + " is RBD format 2. We will perform a RBD snapshot+clone (layering)");
logger.debug("Checking if RBD snapshot " + srcPool.getSourceDir() + "/" + template.getName()
@@ -1684,8 +2326,8 @@ to support snapshots(backuped) as qcow2 files. */
}
} else {
/**
- We let Qemu-Img do the work here. Although we could work with librbd and have that do the cloning
- it doesn't benefit us. It's better to keep the current code in place which works
+ We let Qemu-Img do the work here. Although we could work with librbd and have that do the cloning
+ it doesn't benefit us. It's better to keep the current code in place which works
*/
srcFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(srcPool, sourcePath));
srcFile.setFormat(sourceFormat);
@@ -1737,7 +2379,402 @@ private void deleteVol(LibvirtStoragePool pool, StorageVol vol) throws LibvirtEx
vol.delete(0);
}
+ /**
+ * Securely zero-fill a volume before deletion to prevent data leakage.
+ * Uses blkdiscard (fast TRIM) as primary method, with dd zero-fill as fallback.
+ *
+ * @param lvPath The full path to the logical volume (e.g., /dev/vgname/lvname)
+ * @param volumeUuid The UUID of the volume for logging purposes
+ */
+ private void secureZeroFillVolume(String lvPath, String volumeUuid) {
+ logger.info("Starting secure zero-fill for CLVM volume: {} at path: {}", volumeUuid, lvPath);
+
+ boolean blkdiscardSuccess = false;
+
+ // Try blkdiscard first (fast - sends TRIM commands)
+ try {
+ Script blkdiscard = new Script("blkdiscard", 300000, logger); // 5 minute timeout
+ blkdiscard.add("-f"); // Force flag to suppress confirmation prompts
+ blkdiscard.add(lvPath);
+
+ String result = blkdiscard.execute();
+ if (result == null) {
+ logger.info("Successfully zero-filled CLVM volume {} using blkdiscard (TRIM)", volumeUuid);
+ blkdiscardSuccess = true;
+ } else {
+ if (result.contains("Operation not supported") || result.contains("BLKDISCARD ioctl failed")) {
+ logger.info("blkdiscard not supported for volume {} (device doesn't support TRIM/DISCARD), using dd fallback", volumeUuid);
+ } else {
+ logger.warn("blkdiscard failed for volume {}: {}, will try dd fallback", volumeUuid, result);
+ }
+ }
+ } catch (Exception e) {
+ logger.warn("Exception during blkdiscard for volume {}: {}, will try dd fallback", volumeUuid, e.getMessage());
+ }
+
+ // Fallback to dd zero-fill (slow)
+ if (!blkdiscardSuccess) {
+ logger.info("Attempting zero-fill using dd for CLVM volume: {}", volumeUuid);
+ try {
+ // nice -n 19: lowest CPU priority
+ // ionice -c 2 -n 7: best-effort I/O scheduling with lowest priority
+ // oflag=direct: bypass cache for more predictable performance
+ String command = String.format(
+ "nice -n 19 ionice -c 2 -n 7 dd if=/dev/zero of=%s bs=1M oflag=direct 2>&1 || true",
+ lvPath
+ );
+
+ Script ddZeroFill = new Script("/bin/bash", 3600000, logger); // 60 minute timeout for large volumes
+ ddZeroFill.add("-c");
+ ddZeroFill.add(command);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String ddResult = ddZeroFill.execute(parser);
+ String output = parser.getLines();
+
+ if (output != null && (output.contains("copied") || output.contains("records in") ||
+ output.contains("No space left on device"))) {
+ logger.info("Successfully zero-filled CLVM volume {} using dd", volumeUuid);
+ } else if (ddResult == null) {
+ logger.info("Zero-fill completed for CLVM volume {}", volumeUuid);
+ } else {
+ logger.warn("dd zero-fill for volume {} completed with output: {}", volumeUuid,
+ output != null ? output : ddResult);
+ }
+ } catch (Exception e) {
+ logger.warn("Failed to zero-fill CLVM volume {} before deletion: {}. Proceeding with deletion anyway.",
+ volumeUuid, e.getMessage());
+ }
+ }
+ }
+
private void deleteDirVol(LibvirtStoragePool pool, StorageVol vol) throws LibvirtException {
Script.runSimpleBashScript("rm -r --interactive=never " + vol.getPath());
}
+
+ /**
+ * Get Physical Extent (PE) from the volume group
+ * @param vgName Volume group name
+ * @return PE size in bytes, defaults to 4MiB if it cannot be determined
+ */
+ private long getVgPhysicalExtentSize(String vgName) {
+ final long DEFAULT_PE_SIZE = 4 * 1024 * 1024L;
+ String warningMessage = String.format("Failed to get PE size for VG %s, defaulting to 4MiB", vgName);
+
+ try {
+ Script vgDisplay = new Script("vgdisplay", 300000, logger);
+ vgDisplay.add("--units", "b");
+ vgDisplay.add("-C");
+ vgDisplay.add("--noheadings");
+ vgDisplay.add("-o", "vg_extent_size");
+ vgDisplay.add(vgName);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String result = vgDisplay.execute(parser);
+
+ if (result != null) {
+ logger.warn("{}: {}", warningMessage, result);
+ return DEFAULT_PE_SIZE;
+ }
+
+ String output = parser.getLines();
+ if (output == null || output.trim().isEmpty()) {
+ logger.warn("{}: empty output", warningMessage);
+ return DEFAULT_PE_SIZE;
+ }
+
+ output = output.trim();
+ if (output.endsWith("B")) {
+ output = output.substring(0, output.length() - 1).trim();
+ }
+
+ long peSize = Long.parseLong(output);
+ logger.debug("Physical Extent size for VG {} is {} bytes", vgName, peSize);
+ return peSize;
+
+ } catch (NumberFormatException e) {
+ logger.warn("{}: failed to parse PE size", warningMessage, e);
+ } catch (Exception e) {
+ logger.warn("{}: {}", warningMessage, e.getMessage());
+ }
+
+ logger.info("Using default PE size for VG {}: {} bytes (4 MiB)", vgName, DEFAULT_PE_SIZE);
+ return DEFAULT_PE_SIZE;
+ }
+
+ /**
+ * Calculate LVM LV size for CLVM_NG volume allocation.
+ * Volumes use QCOW2-on-LVM with extended_l2=on and need:
+ * - Base size (virtual size)
+ * - QCOW2 metadata overhead (L1/L2 tables, refcount tables, headers)
+ *
+ * For QCOW2 with 64k clusters and extended L2 tables (extended_l2=on):
+ * - Each 64KB cluster contains data
+ * - Each L2 table entry is 16 bytes (extended L2, double the standard 8 bytes)
+ * - Each 64KB L2 cluster can hold 4096 entries (64KB / 16 bytes)
+ * Formula: Total overhead (MiB) = ((virtualSize_GiB × 1024 × 1024) / (64 × 4096)) × 2 + 2 MiB headers
+ *
+ * Quick reference (64k clusters, extended_l2=on):
+ * 10 GiB virtual → ~7 MiB overhead → 2 PEs (8 MiB)
+ * 100 GiB virtual → ~52 MiB overhead → 13 PEs (52 MiB)
+ * 1 TiB virtual → ~514 MiB overhead → 129 PEs (516 MiB)
+ * 2 TiB virtual → ~1026 MiB overhead → 257 PEs (1028 MiB)
+ *
+ * @param virtualSize Virtual disk size in bytes (for overhead calculation)
+ * @param vgName Volume group name to query PE size
+ * @return Size in bytes to allocate for LV
+ */
+ private long calculateClvmNgLvSize(long virtualSize, String vgName) {
+ long peSize = getVgPhysicalExtentSize(vgName);
+
+ long clusterSize = 64 * 1024L;
+ // Each L2 entry is 16 bytes (extended_l2=on), and each 64KB cluster holds 4096 entries (64KB / 16 bytes)
+ long l2Multiplier = 4096L;
+
+ long numDataClusters = (virtualSize + clusterSize - 1) / clusterSize;
+ long numL2Clusters = (numDataClusters + l2Multiplier - 1) / l2Multiplier;
+ long l2TableSize = numL2Clusters * clusterSize;
+ long refcountTableSize = l2TableSize;
+
+ // Headers and other metadata (L1 table, QCOW2 header, etc.)
+ long headerOverhead = 2 * 1024 * 1024L; // 2 MiB for headers
+ long metadataOverhead = l2TableSize + refcountTableSize + headerOverhead;
+ long targetSize = virtualSize + metadataOverhead;
+ long roundedSize = ((targetSize + peSize - 1) / peSize) * peSize;
+ long virtualSizeGiB = virtualSize / (1024 * 1024 * 1024L);
+ long overheadMiB = metadataOverhead / (1024 * 1024L);
+
+ logger.info("Calculated volume LV size: {} bytes (virtual: {} GiB, " +
+ "QCOW2 metadata overhead: {} MiB (64k clusters, extended_l2=on), rounded to {} PEs, PE size = {} bytes)",
+ roundedSize, virtualSizeGiB, overheadMiB, roundedSize / peSize, peSize);
+
+ return roundedSize;
+ }
+
+
+ private long getQcow2VirtualSize(String imagePath) {
+ Script qemuImg = new Script("qemu-img", 300000, logger);
+ qemuImg.add("info");
+ qemuImg.add("--output=json");
+ qemuImg.add(imagePath);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String result = qemuImg.execute(parser);
+
+ if (result != null) {
+ throw new CloudRuntimeException("Failed to get QCOW2 virtual size for " + imagePath + ": " + result);
+ }
+
+ String output = parser.getLines();
+ if (output == null || output.trim().isEmpty()) {
+ throw new CloudRuntimeException("qemu-img info returned empty output for " + imagePath);
+ }
+
+ JsonObject info = JsonParser.parseString(output).getAsJsonObject();
+ return info.get("virtual-size").getAsLong();
+ }
+
+
+ private long getQcow2PhysicalSize(String imagePath) {
+ Script qemuImg = new Script("qemu-img", Duration.millis(300000), logger);
+ qemuImg.add("info");
+ qemuImg.add("--output=json");
+ qemuImg.add(imagePath);
+
+ OutputInterpreter.AllLinesParser parser = new OutputInterpreter.AllLinesParser();
+ String result = qemuImg.execute(parser);
+
+ if (result != null) {
+ throw new CloudRuntimeException("Failed to get QCOW2 physical size for " + imagePath + ": " + result);
+ }
+
+ String output = parser.getLines();
+ if (output == null || output.trim().isEmpty()) {
+ throw new CloudRuntimeException("qemu-img info returned empty output for " + imagePath);
+ }
+
+ JsonObject info = JsonParser.parseString(output).getAsJsonObject();
+ return info.get("actual-size").getAsLong();
+ }
+
+ private KVMPhysicalDisk createClvmNgDiskWithBacking(String volumeUuid, int timeout, long virtualSize, String backingFile,
+ KVMStoragePool pool, Storage.ProvisioningType provisioningType) {
+ String vgName = getVgName(pool.getLocalPath());
+ long lvSize = calculateClvmNgLvSize(virtualSize, vgName);
+ String volumePath = "/dev/" + vgName + "/" + volumeUuid;
+
+ logger.debug("Creating CLVM_NG volume {} with LV size {} bytes (virtual size: {} bytes, provisioning: {})",
+ volumeUuid, lvSize, virtualSize, provisioningType);
+
+ Script lvcreate = new Script("lvcreate", Duration.millis(timeout), logger);
+ lvcreate.add("-n", volumeUuid);
+ lvcreate.add("-L", lvSize + "B");
+ lvcreate.add("--yes");
+ lvcreate.add(vgName);
+
+ String result = lvcreate.execute();
+ if (result != null) {
+ throw new CloudRuntimeException("Failed to create LV for CLVM_NG volume: " + result);
+ }
+
+ Script qemuImg = new Script("qemu-img", Duration.millis(timeout), logger);
+ qemuImg.add("create");
+ qemuImg.add("-f", "qcow2");
+
+ StringBuilder qcow2Options = new StringBuilder();
+
+ // Set preallocation based on provisioning type
+ // THIN: preallocation=off (sparse file, allocate on write)
+ // SPARSE / FAT: preallocation=metadata (allocate metadata only)
+ String preallocation;
+ if (provisioningType == Storage.ProvisioningType.THIN) {
+ preallocation = "off";
+ } else {
+ preallocation = "metadata";
+ }
+
+ qcow2Options.append("preallocation=").append(preallocation);
+ qcow2Options.append(",extended_l2=on");
+ qcow2Options.append(",cluster_size=64k");
+
+ if (backingFile != null && !backingFile.isEmpty()) {
+ qcow2Options.append(",backing_file=").append(backingFile);
+ qcow2Options.append(",backing_fmt=qcow2");
+ logger.debug("Creating CLVM_NG volume with backing file: {}", backingFile);
+ }
+
+ qemuImg.add("-o", qcow2Options.toString());
+ qemuImg.add(volumePath);
+ qemuImg.add(virtualSize + "");
+
+ result = qemuImg.execute();
+ if (result != null) {
+ removeLvOnFailure(volumePath, timeout);
+ throw new CloudRuntimeException("Failed to create QCOW2 on CLVM_NG volume: " + result);
+ }
+
+ long actualSize = getClvmVolumeSize(volumePath);
+ KVMPhysicalDisk disk = new KVMPhysicalDisk(volumePath, volumeUuid, pool);
+ disk.setFormat(PhysicalDiskFormat.QCOW2);
+ disk.setSize(actualSize);
+ disk.setVirtualSize(actualSize);
+
+ logger.info("Successfully created CLVM_NG volume {} with backing file (LV size: {}, virtual size: {}, provisioning: {}, preallocation: {})",
+ volumeUuid, lvSize, virtualSize, provisioningType, preallocation);
+
+ return disk;
+ }
+
+ public void createTemplateOnClvmNg(String templatePath, String templateUuid, int timeout, KVMStoragePool pool) {
+ String vgName = getVgName(pool.getLocalPath());
+ String lvName = "template-" + templateUuid;
+ String lvPath = "/dev/" + vgName + "/" + lvName;
+
+ if (lvExists(lvPath)) {
+ logger.info("Template LV {} already exists in VG {}. Skipping creation.", lvName, vgName);
+ return;
+ }
+
+ logger.info("Creating new template LV {} in VG {} for template {}", lvName, vgName, templateUuid);
+
+ long virtualSize = getQcow2VirtualSize(templatePath);
+ long physicalSize = getQcow2PhysicalSize(templatePath);
+ long lvSize = virtualSize; // as extended_l2=off and preallocation=off
+
+ logger.info("Template source - Physical: {} bytes, Virtual: {} bytes, LV will be: {} bytes",
+ physicalSize, virtualSize, lvSize);
+
+ Script lvcreate = new Script("lvcreate", Duration.millis(timeout), logger);
+ lvcreate.add("-n", lvName);
+ lvcreate.add("-L", lvSize + "B");
+ lvcreate.add("--yes");
+ lvcreate.add(vgName);
+ String result = lvcreate.execute();
+ if (result != null) {
+ throw new CloudRuntimeException("Failed to create LV for CLVM_NG template: " + result);
+ }
+ Script qemuImgConvert = new Script("qemu-img", Duration.millis(timeout), logger);
+ qemuImgConvert.add("convert");
+ qemuImgConvert.add(templatePath);
+ qemuImgConvert.add("-O", "qcow2");
+ qemuImgConvert.add("-o", "cluster_size=64k,extended_l2=off,preallocation=off");
+ qemuImgConvert.add(lvPath);
+ result = qemuImgConvert.execute();
+
+ if (result != null) {
+ removeLvOnFailure(lvPath, timeout);
+ throw new CloudRuntimeException("Failed to convert template to CLVM_NG volume: " + result);
+ }
+
+ long actualVirtualSize = getQcow2VirtualSize(lvPath);
+ logger.info("Created template LV {} with size {} bytes (source physical: {}, actual virtual: {})", lvName, lvSize, physicalSize, actualVirtualSize);
+
+ try {
+ ensureTemplateLvInSharedMode(lvPath, true);
+ } catch (CloudRuntimeException e) {
+ logger.error("Failed to activate template LV {} in shared mode. Cleaning up.", lvPath);
+ removeLvOnFailure(lvPath, timeout);
+ throw e;
+ }
+
+ KVMPhysicalDisk templateDisk = new KVMPhysicalDisk(lvPath, lvName, pool);
+ templateDisk.setFormat(PhysicalDiskFormat.QCOW2);
+ templateDisk.setVirtualSize(actualVirtualSize);
+ templateDisk.setSize(lvSize);
+
+ }
+
+ private boolean lvExists(String lvPath) {
+ Script checkLv = new Script("lvs", Duration.millis(5000), logger);
+ checkLv.add("--noheadings");
+ checkLv.add("--unbuffered");
+ checkLv.add(lvPath);
+ String checkResult = checkLv.execute();
+ return checkResult == null;
+ }
+
+ private void removeLvOnFailure(String lvPath, int timeout) {
+ Script lvremove = new Script("lvremove", Duration.millis(timeout), logger);
+ lvremove.add("-f");
+ lvremove.add(lvPath);
+ lvremove.execute();
+ }
+
+ /**
+ * Creates a raw LV volume for CLVM pools using direct LVM commands.
+ * This bypasses libvirt since CLVM pools are kept inactive in libvirt.
+ *
+ * @param volumeName the name of the volume to create
+ * @param size the size of the volume in bytes
+ * @param pool the CLVM storage pool
+ * @return the created KVMPhysicalDisk
+ */
+ private KVMPhysicalDisk createClvmVolume(String volumeName, long size, KVMStoragePool pool) {
+ String vgName = getVgName(pool.getLocalPath());
+ String volumePath = "/dev/" + vgName + "/" + volumeName;
+ int timeout = 30000; // 30 seconds timeout for LV creation
+
+ logger.info("Creating CLVM volume {} in VG {} with size {} bytes", volumeName, vgName, size);
+
+ Script lvcreate = new Script("lvcreate", Duration.millis(timeout), logger);
+ lvcreate.add("-n", volumeName);
+ lvcreate.add("-L", size + "B");
+ lvcreate.add("--yes");
+ lvcreate.add(vgName);
+
+ String result = lvcreate.execute();
+ if (result != null) {
+ throw new CloudRuntimeException("Failed to create CLVM volume: " + result);
+ }
+
+ logger.info("Successfully created CLVM volume {} at {} with size {}", volumeName, volumePath, toHumanReadableSize(size));
+
+ long actualSize = getClvmVolumeSize(volumePath);
+ KVMPhysicalDisk disk = new KVMPhysicalDisk(volumePath, volumeName, pool);
+ disk.setFormat(PhysicalDiskFormat.RAW);
+ disk.setSize(actualSize);
+ disk.setVirtualSize(actualSize);
+
+ return disk;
+ }
}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java
index ab39f7bc6ffd..83898ed35c3d 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java
@@ -212,7 +212,7 @@ public boolean refresh() {
@Override
public boolean isExternalSnapshot() {
- if (this.type == StoragePoolType.CLVM || type == StoragePoolType.RBD) {
+ if (this.type == StoragePoolType.CLVM || this.type == StoragePoolType.CLVM_NG || type == StoragePoolType.RBD) {
return true;
}
return false;
@@ -277,6 +277,10 @@ public StoragePoolType getType() {
return this.type;
}
+ public void setType(StoragePoolType type) {
+ this.type = type;
+ }
+
public StoragePool getPool() {
return this._pool;
}
diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
index cde87fd93842..8d343d4c0e45 100644
--- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
+++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java
@@ -2728,8 +2728,11 @@ public void testCreateStoragePoolCommand() {
@Test
public void testModifyStoragePoolCommand() {
- final StoragePool pool = Mockito.mock(StoragePool.class);;
+ final StoragePool pool = Mockito.mock(StoragePool.class);
final ModifyStoragePoolCommand command = new ModifyStoragePoolCommand(true, pool);
+ Map details = new HashMap<>();
+ details.put(KVMStoragePool.CLVM_SECURE_ZERO_FILL, "false");
+ command.setDetails(details);
final KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
final KVMStoragePool kvmStoragePool = Mockito.mock(KVMStoragePool.class);
@@ -2753,8 +2756,11 @@ public void testModifyStoragePoolCommand() {
@Test
public void testModifyStoragePoolCommandFailure() {
- final StoragePool pool = Mockito.mock(StoragePool.class);;
+ final StoragePool pool = Mockito.mock(StoragePool.class);
final ModifyStoragePoolCommand command = new ModifyStoragePoolCommand(true, pool);
+ Map details = new HashMap<>();
+ details.put(KVMStoragePool.CLVM_SECURE_ZERO_FILL, "false");
+ command.setDetails(details);
final KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
diff --git a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java
index 88346abd0176..c6e5ef866e16 100644
--- a/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java
+++ b/plugins/hypervisors/kvm/src/test/java/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptorTest.java
@@ -17,11 +17,17 @@
package com.cloud.hypervisor.kvm.storage;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.when;
+import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
@@ -33,6 +39,7 @@
import org.libvirt.Connect;
import org.libvirt.StoragePool;
import org.mockito.Mock;
+import org.mockito.MockedConstruction;
import org.mockito.MockedStatic;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
@@ -45,6 +52,7 @@
import com.cloud.utils.Pair;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
+import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
@RunWith(MockitoJUnitRunner.class)
public class LibvirtStorageAdaptorTest {
@@ -58,6 +66,9 @@ public class LibvirtStorageAdaptorTest {
MockedStatic