diff --git a/client/pom.xml b/client/pom.xml
index b8dffe65d4fb..7118f455ab5f 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -121,6 +121,11 @@
cloud-plugin-storage-volume-adaptive
${project.version}
+
+ org.apache.cloudstack
+ cloud-plugin-storage-volume-ontap
+ ${project.version}
+
org.apache.cloudstack
cloud-plugin-storage-volume-solidfire
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java
index ba689d5107f7..433e173fbbf9 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/IscsiAdmStorageAdaptor.java
@@ -19,6 +19,8 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.nio.file.Files;
+import java.nio.file.Paths;
import org.apache.cloudstack.utils.qemu.QemuImg;
import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat;
@@ -96,10 +98,15 @@ public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map 0) {
@@ -238,6 +267,15 @@ public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) {
}
private long getDeviceSize(String deviceByPath) {
+ try {
+ if (!Files.exists(Paths.get(deviceByPath))) {
+ logger.debug("Device by-path does not exist yet: " + deviceByPath);
+ return 0L;
+ }
+ } catch (Exception ignore) {
+ // If FS check fails for any reason, fall back to blockdev call
+ }
+
Script iScsiAdmCmd = new Script(true, "blockdev", 0, logger);
iScsiAdmCmd.add("--getsize64", deviceByPath);
diff --git a/plugins/pom.xml b/plugins/pom.xml
index e7d13871285e..e4904ccdf40b 100755
--- a/plugins/pom.xml
+++ b/plugins/pom.xml
@@ -129,6 +129,7 @@
storage/volume/default
storage/volume/nexenta
storage/volume/sample
+ storage/volume/ontap
storage/volume/solidfire
storage/volume/scaleio
storage/volume/linstor
diff --git a/plugins/storage/volume/ontap/README.md b/plugins/storage/volume/ontap/README.md
new file mode 100644
index 000000000000..e7e066aafb55
--- /dev/null
+++ b/plugins/storage/volume/ontap/README.md
@@ -0,0 +1,123 @@
+
+
+# Apache CloudStack - NetApp ONTAP Storage Plugin
+
+## Overview
+
+The NetApp ONTAP Storage Plugin provides integration between Apache CloudStack and NetApp ONTAP storage systems. This plugin enables CloudStack to provision and manage primary storage on ONTAP clusters, supporting both NAS (NFS) and SAN (iSCSI) protocols.
+
+## Features
+
+- **Primary Storage Support**: Provision and manage primary storage pools on NetApp ONTAP
+- **Multiple Protocols**: Support for NFS 3.0 and iSCSI protocols
+- **Unified Storage**: Integration with traditional ONTAP unified storage architecture
+- **KVM Hypervisor Support**: Supports KVM hypervisor environments
+- **Managed Storage**: Operates as managed storage with full lifecycle management
+- **Flexible Scoping**: Support for Zone-wide and Cluster-scoped storage pools
+
+## Architecture
+
+### Component Structure
+
+| Package | Description |
+|---------|-------------------------------------------------------|
+| `driver` | Primary datastore driver implementation |
+| `feign` | REST API clients and data models for ONTAP operations |
+| `lifecycle` | Storage pool lifecycle management |
+| `listener` | Host connection event handlers |
+| `provider` | Main provider and strategy factory |
+| `service` | ONTAP Storage strategy implementations (NAS/SAN) |
+| `utils` | Constants and helper utilities |
+
+## Requirements
+
+### ONTAP Requirements
+
+- NetApp ONTAP 9.15.1 or higher
+- Storage Virtual Machine (SVM) configured with appropriate protocols enabled
+- Management LIF accessible from CloudStack management server
+- Data LIF(s) accessible from hypervisor hosts and are of IPv4 type
+- Aggregates assigned to the SVM with sufficient capacity
+
+### CloudStack Requirements
+
+- Apache CloudStack current version or higher
+- KVM hypervisor hosts
+- For iSCSI: Hosts must have iSCSI initiator configured with valid IQN
+- For NFS: Hosts must have NFS client packages installed
+
+### Minimum Volume Size
+
+ONTAP requires a minimum volume size of **1.56 GB** (1,677,721,600 bytes). The plugin will automatically adjust requested sizes below this threshold.
+
+## Configuration
+
+### Storage Pool Creation Parameters
+
+When creating an ONTAP primary storage pool, provide the following details in the URL field (semicolon-separated key=value pairs):
+
+| Parameter | Required | Description |
+|-----------|----------|-------------|
+| `username` | Yes | ONTAP cluster admin username |
+| `password` | Yes | ONTAP cluster admin password |
+| `svmName` | Yes | Storage Virtual Machine name |
+| `protocol` | Yes | Storage protocol (`NFS3` or `ISCSI`) |
+| `managementLIF` | Yes | ONTAP cluster management LIF IP address |
+
+### Example URL Format
+
+```
+username=admin;password=secretpass;svmName=svm1;protocol=ISCSI;managementLIF=192.168.1.100
+```
+
+## Port Configuration
+
+| Protocol | Default Port |
+|----------|--------------|
+| NFS | 2049 |
+| iSCSI | 3260 |
+| ONTAP Management API | 443 (HTTPS) |
+
+## Limitations
+
+- Supports only **KVM** hypervisor
+- Supports only **Unified ONTAP** storage (disaggregated not supported)
+- Supports only **NFS3** and **iSCSI** protocols
+- IPv6 type and FQDN LIFs are not supported
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Connection Failures**
+ - Verify management LIF is reachable from CloudStack management server
+ - Check firewall rules for port 443
+
+2. **Protocol Errors**
+ - Ensure the protocol (NFS/iSCSI) is enabled on the SVM
+ - Verify Data LIFs are configured for the protocol
+
+3. **Capacity Errors**
+ - Check aggregate space availability
+ - Ensure requested volume size meets minimum requirements (1.56 GB)
+
+4. **Host Connection Issues**
+ - For iSCSI: Verify host IQN is properly configured in host's storage URL
+ - For NFS: Ensure NFS client is installed and running
diff --git a/plugins/storage/volume/ontap/pom.xml b/plugins/storage/volume/ontap/pom.xml
new file mode 100644
index 000000000000..0a7f43bde6c9
--- /dev/null
+++ b/plugins/storage/volume/ontap/pom.xml
@@ -0,0 +1,166 @@
+
+
+ 4.0.0
+ cloud-plugin-storage-volume-ontap
+ Apache CloudStack Plugin - Storage Volume ONTAP Provider
+
+ org.apache.cloudstack
+ cloudstack-plugins
+ 4.23.0.0-SNAPSHOT
+ ../../../pom.xml
+
+
+ 2021.0.7
+ 11.0
+ 4.5.14
+ 1.6.2
+ 3.8.1
+ 2.22.2
+ 2.13.4
+ 3.24.2
+
+
+
+
+ org.springframework.cloud
+ spring-cloud-dependencies
+ ${spring-cloud.version}
+ pom
+ import
+
+
+
+
+
+ org.apache.cloudstack
+ cloud-plugin-storage-volume-default
+ ${project.version}
+
+
+ io.github.openfeign
+ feign-core
+ ${openfeign.version}
+
+
+ io.github.openfeign
+ feign-httpclient
+ ${openfeign.version}
+
+
+ io.github.openfeign
+ feign-jackson
+ ${openfeign.version}
+
+
+ com.fasterxml.jackson.core
+ jackson-databind
+ ${jackson-databind.version}
+
+
+ org.apache.httpcomponents
+ httpclient
+ ${httpclient.version}
+
+
+ org.apache.cloudstack
+ cloud-engine-storage-volume
+ ${project.version}
+
+
+ io.swagger
+ swagger-annotations
+ ${swagger-annotations.version}
+
+
+
+ org.junit.jupiter
+ junit-jupiter-engine
+ 5.8.1
+ test
+
+
+
+
+ org.mockito
+ mockito-core
+ 3.12.4
+ test
+
+
+ org.mockito
+ mockito-junit-jupiter
+ 5.2.0
+ test
+
+
+
+
+ org.mockito
+ mockito-inline
+ 3.12.4
+ test
+
+
+ org.assertj
+ assertj-core
+ ${assertj.version}
+ test
+
+
+
+
+ central
+ Maven Central
+ https://repo.maven.apache.org/maven2
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ ${maven-compiler-plugin.version}
+
+ 11
+ 11
+
+
+
+ maven-surefire-plugin
+ ${maven-surefire-plugin.version}
+
+ false
+
+ **/*Test.java
+
+
+
+
+ integration-test
+
+ test
+
+
+
+
+
+
+
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
new file mode 100755
index 000000000000..9ab57dc60a62
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriver.java
@@ -0,0 +1,625 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.driver;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.to.DataObjectType;
+import com.cloud.agent.api.to.DataStoreTO;
+import com.cloud.agent.api.to.DataTO;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.Host;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.ScopeType;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
+import com.cloud.utils.Pair;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.dao.VMInstanceDao;
+import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreCapabilities;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.storage.command.CommandResult;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.service.SANStrategy;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.UnifiedSANStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Primary datastore driver for NetApp ONTAP storage systems.
+ * Handles volume lifecycle operations for iSCSI and NFS protocols.
+ */
+public class OntapPrimaryDatastoreDriver implements PrimaryDataStoreDriver {
+
+ private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreDriver.class);
+
+ @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+ @Inject private PrimaryDataStoreDao storagePoolDao;
+ @Inject private VMInstanceDao vmDao;
+ @Inject private VolumeDao volumeDao;
+ @Inject private VolumeDetailsDao volumeDetailsDao;
+ @Override
+ public Map getCapabilities() {
+ s_logger.trace("OntapPrimaryDatastoreDriver: getCapabilities: Called");
+ Map mapCapabilities = new HashMap<>();
+ // RAW managed initial implementation: snapshot features not yet supported
+ // TODO Set it to false once we start supporting snapshot feature
+ mapCapabilities.put(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString(), Boolean.FALSE.toString());
+ mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.FALSE.toString());
+ return mapCapabilities;
+ }
+
+ @Override
+ public DataTO getTO(DataObject data) {
+ return null;
+ }
+
+ @Override
+ public DataStoreTO getStoreTO(DataStore store) {
+ return null;
+ }
+
+ /**
+ * Creates a volume on the ONTAP storage system.
+ */
+ @Override
+ public void createAsync(DataStore dataStore, DataObject dataObject, AsyncCompletionCallback callback) {
+ CreateCmdResult createCmdResult = null;
+ String errMsg;
+
+ if (dataObject == null) {
+ throw new InvalidParameterValueException("createAsync: dataObject should not be null");
+ }
+ if (dataStore == null) {
+ throw new InvalidParameterValueException("createAsync: dataStore should not be null");
+ }
+ if (callback == null) {
+ throw new InvalidParameterValueException("createAsync: callback should not be null");
+ }
+
+ try {
+ s_logger.info("createAsync: Started for data store name [{}] and data object name [{}] of type [{}]",
+ dataStore.getName(), dataObject.getName(), dataObject.getType());
+
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if (storagePool == null) {
+ s_logger.error("createAsync: Storage Pool not found for id: " + dataStore.getId());
+ throw new CloudRuntimeException("createAsync: Storage Pool not found for id: " + dataStore.getId());
+ }
+ String storagePoolUuid = dataStore.getUuid();
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(dataStore.getId());
+
+ if (dataObject.getType() == DataObjectType.VOLUME) {
+ VolumeInfo volInfo = (VolumeInfo) dataObject;
+
+ // Create the backend storage object (LUN for iSCSI, no-op for NFS)
+ CloudStackVolume created = createCloudStackVolume(dataStore, volInfo, details);
+
+ // Update CloudStack volume record with storage pool association and protocol-specific details
+ VolumeVO volumeVO = volumeDao.findById(volInfo.getId());
+ if (volumeVO != null) {
+ volumeVO.setPoolType(storagePool.getPoolType());
+ volumeVO.setPoolId(storagePool.getId());
+
+ if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
+ String svmName = details.get(Constants.SVM_NAME);
+ String lunName = created != null && created.getLun() != null ? created.getLun().getName() : null;
+ if (lunName == null) {
+ throw new CloudRuntimeException("createAsync: Missing LUN name for volume " + volInfo.getId());
+ }
+
+ // Persist LUN details for future operations (delete, grant/revoke access)
+ volumeDetailsDao.addDetail(volInfo.getId(), Constants.LUN_DOT_UUID, created.getLun().getUuid(), false);
+ volumeDetailsDao.addDetail(volInfo.getId(), Constants.LUN_DOT_NAME, lunName, false);
+ if (created.getLun().getUuid() != null) {
+ volumeVO.setFolder(created.getLun().getUuid());
+ }
+
+ // Create LUN-to-igroup mapping and retrieve the assigned LUN ID
+ UnifiedSANStrategy sanStrategy = (UnifiedSANStrategy) Utility.getStrategyByStoragePoolDetails(details);
+ String accessGroupName = Utility.getIgroupName(svmName, storagePoolUuid);
+ String lunNumber = sanStrategy.ensureLunMapped(svmName, lunName, accessGroupName);
+
+ // Construct iSCSI path: // format for KVM/libvirt attachment
+ String iscsiPath = Constants.SLASH + storagePool.getPath() + Constants.SLASH + lunNumber;
+ volumeVO.set_iScsiName(iscsiPath);
+ volumeVO.setPath(iscsiPath);
+ s_logger.info("createAsync: Volume [{}] iSCSI path set to {}", volumeVO.getId(), iscsiPath);
+ createCmdResult = new CreateCmdResult(null, new Answer(null, true, null));
+
+ } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
+ createCmdResult = new CreateCmdResult(volInfo.getUuid(), new Answer(null, true, null));
+ s_logger.info("createAsync: Managed NFS volume [{}] associated with pool {}",
+ volumeVO.getId(), storagePool.getId());
+ }
+ volumeDao.update(volumeVO.getId(), volumeVO);
+ }
+ } else {
+ errMsg = "Invalid DataObjectType (" + dataObject.getType() + ") passed to createAsync";
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ } catch (Exception e) {
+ errMsg = e.getMessage();
+ s_logger.error("createAsync: Failed for dataObject name [{}]: {}", dataObject.getName(), errMsg);
+ createCmdResult = new CreateCmdResult(null, new Answer(null, false, errMsg));
+ createCmdResult.setResult(e.toString());
+ } finally {
+ if (createCmdResult != null && createCmdResult.isSuccess()) {
+ s_logger.info("createAsync: Operation completed successfully for {}", dataObject.getType());
+ }
+ callback.complete(createCmdResult);
+ }
+ }
+
+ /**
+ * Creates a volume on the ONTAP backend.
+ */
+ private CloudStackVolume createCloudStackVolume(DataStore dataStore, DataObject dataObject, Map details) {
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if (storagePool == null) {
+ s_logger.error("createCloudStackVolume: Storage Pool not found for id: {}", dataStore.getId());
+ throw new CloudRuntimeException("createCloudStackVolume: Storage Pool not found for id: " + dataStore.getId());
+ }
+
+ StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details);
+
+ if (dataObject.getType() == DataObjectType.VOLUME) {
+ VolumeInfo volumeObject = (VolumeInfo) dataObject;
+ CloudStackVolume cloudStackVolumeRequest = Utility.createCloudStackVolumeRequestByProtocol(storagePool, details, volumeObject);
+ return storageStrategy.createCloudStackVolume(cloudStackVolumeRequest);
+ } else {
+ throw new CloudRuntimeException("createCloudStackVolume: Unsupported DataObjectType: " + dataObject.getType());
+ }
+ }
+
+ /**
+ * Deletes a volume from the ONTAP storage system.
+ */
+ @Override
+ public void deleteAsync(DataStore store, DataObject data, AsyncCompletionCallback callback) {
+ CommandResult commandResult = new CommandResult();
+ try {
+ if (store == null || data == null) {
+ throw new CloudRuntimeException("deleteAsync: store or data is null");
+ }
+
+ if (data.getType() == DataObjectType.VOLUME) {
+ StoragePoolVO storagePool = storagePoolDao.findById(store.getId());
+ if (storagePool == null) {
+ s_logger.error("deleteAsync: Storage Pool not found for id: " + store.getId());
+ throw new CloudRuntimeException("deleteAsync: Storage Pool not found for id: " + store.getId());
+ }
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(store.getId());
+ StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details);
+ s_logger.info("createCloudStackVolumeForTypeVolume: Connection to Ontap SVM [{}] successful, preparing CloudStackVolumeRequest", details.get(Constants.SVM_NAME));
+ VolumeInfo volumeInfo = (VolumeInfo) data;
+ CloudStackVolume cloudStackVolumeRequest = createDeleteCloudStackVolumeRequest(storagePool,details,volumeInfo);
+ storageStrategy.deleteCloudStackVolume(cloudStackVolumeRequest);
+ s_logger.error("deleteAsync : Volume deleted: " + volumeInfo.getId());
+ commandResult.setResult(null);
+ commandResult.setSuccess(true);
+ }
+ } catch (Exception e) {
+ s_logger.error("deleteAsync: Failed for data object [{}]: {}", data, e.getMessage());
+ commandResult.setSuccess(false);
+ commandResult.setResult(e.getMessage());
+ } finally {
+ callback.complete(commandResult);
+ }
+ }
+
+ @Override
+ public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) {
+ }
+
+ @Override
+ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, AsyncCompletionCallback callback) {
+
+ }
+
+ @Override
+ public boolean canCopy(DataObject srcData, DataObject destData) {
+ return false;
+ }
+
+ @Override
+ public void resize(DataObject data, AsyncCompletionCallback callback) {
+
+ }
+
+ @Override
+ public ChapInfo getChapInfo(DataObject dataObject) {
+ return null;
+ }
+
+ /**
+ * Grants a host access to a volume.
+ */
+ @Override
+ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore) {
+ try {
+ if (dataStore == null) {
+ throw new InvalidParameterValueException("grantAccess: dataStore should not be null");
+ }
+ if (dataObject == null) {
+ throw new InvalidParameterValueException("grantAccess: dataObject should not be null");
+ }
+ if (host == null) {
+ throw new InvalidParameterValueException("grantAccess: host should not be null");
+ }
+
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if (storagePool == null) {
+ s_logger.error("grantAccess: Storage Pool not found for id: " + dataStore.getId());
+ throw new CloudRuntimeException("grantAccess: Storage Pool not found for id: " + dataStore.getId());
+ }
+ String storagePoolUuid = dataStore.getUuid();
+
+ // ONTAP managed storage only supports cluster and zone scoped pools
+ if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) {
+ s_logger.error("grantAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName());
+ throw new CloudRuntimeException("grantAccess: Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName());
+ }
+
+ if (dataObject.getType() == DataObjectType.VOLUME) {
+ VolumeVO volumeVO = volumeDao.findById(dataObject.getId());
+ if (volumeVO == null) {
+ s_logger.error("grantAccess: CloudStack Volume not found for id: " + dataObject.getId());
+ throw new CloudRuntimeException("grantAccess: CloudStack Volume not found for id: " + dataObject.getId());
+ }
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
+ String svmName = details.get(Constants.SVM_NAME);
+
+ if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
+ // Only retrieve LUN name for iSCSI volumes
+ String cloudStackVolumeName = volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME).getValue();
+ UnifiedSANStrategy sanStrategy = (UnifiedSANStrategy) Utility.getStrategyByStoragePoolDetails(details);
+ String accessGroupName = Utility.getIgroupName(svmName, storagePoolUuid);
+
+ // Verify host initiator is registered in the igroup before allowing access
+ if (!sanStrategy.validateInitiatorInAccessGroup(host.getStorageUrl(), svmName, accessGroupName)) {
+ throw new CloudRuntimeException("grantAccess: Host initiator [" + host.getStorageUrl() +
+ "] is not present in iGroup [" + accessGroupName + "]");
+ }
+
+ // Create or retrieve existing LUN mapping
+ String lunNumber = sanStrategy.ensureLunMapped(svmName, cloudStackVolumeName, accessGroupName);
+
+ // Update volume path if changed (e.g., after migration or re-mapping)
+ String iscsiPath = Constants.SLASH + storagePool.getPath() + Constants.SLASH + lunNumber;
+ if (volumeVO.getPath() == null || !volumeVO.getPath().equals(iscsiPath)) {
+ volumeVO.set_iScsiName(iscsiPath);
+ volumeVO.setPath(iscsiPath);
+ }
+ } else if (ProtocolType.NFS3.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
+ // For NFS, no access grant needed - file is accessible via mount
+ s_logger.debug("grantAccess: NFS volume [{}], no igroup mapping required", volumeVO.getUuid());
+ return true;
+ }
+ volumeVO.setPoolType(storagePool.getPoolType());
+ volumeVO.setPoolId(storagePool.getId());
+ volumeDao.update(volumeVO.getId(), volumeVO);
+ } else {
+ s_logger.error("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess");
+ throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to grantAccess");
+ }
+ return true;
+ } catch (Exception e) {
+ s_logger.error("grantAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage());
+ throw new CloudRuntimeException("grantAccess: Failed with error: " + e.getMessage(), e);
+ }
+ }
+
+ /**
+ * Revokes a host's access to a volume.
+ */
+ @Override
+ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore) {
+ try {
+ if (dataStore == null) {
+ throw new InvalidParameterValueException("revokeAccess: dataStore should not be null");
+ }
+ if (dataObject == null) {
+ throw new InvalidParameterValueException("revokeAccess: dataObject should not be null");
+ }
+ if (host == null) {
+ throw new InvalidParameterValueException("revokeAccess: host should not be null");
+ }
+
+ // Safety check: don't revoke access if volume is still attached to an active VM
+ if (dataObject.getType() == DataObjectType.VOLUME) {
+ Volume volume = volumeDao.findById(dataObject.getId());
+ if (volume.getInstanceId() != null) {
+ VirtualMachine vm = vmDao.findById(volume.getInstanceId());
+ if (vm != null && !Arrays.asList(
+ VirtualMachine.State.Destroyed,
+ VirtualMachine.State.Expunging,
+ VirtualMachine.State.Error).contains(vm.getState())) {
+ s_logger.warn("revokeAccess: Volume [{}] is still attached to VM [{}] in state [{}], skipping revokeAccess",
+ dataObject.getId(), vm.getInstanceName(), vm.getState());
+ return;
+ }
+ }
+ }
+
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if (storagePool == null) {
+ s_logger.error("revokeAccess: Storage Pool not found for id: " + dataStore.getId());
+ throw new CloudRuntimeException("revokeAccess: Storage Pool not found for id: " + dataStore.getId());
+ }
+
+ if (storagePool.getScope() != ScopeType.CLUSTER && storagePool.getScope() != ScopeType.ZONE) {
+ s_logger.error("revokeAccess: Only Cluster and Zone scoped primary storage is supported for storage Pool: " + storagePool.getName());
+ throw new CloudRuntimeException("revokeAccess: Only Cluster and Zone scoped primary storage is supported for Storage Pool: " + storagePool.getName());
+ }
+
+ if (dataObject.getType() == DataObjectType.VOLUME) {
+ VolumeVO volumeVO = volumeDao.findById(dataObject.getId());
+ if (volumeVO == null) {
+ s_logger.error("revokeAccess: CloudStack Volume not found for id: " + dataObject.getId());
+ throw new CloudRuntimeException("revokeAccess: CloudStack Volume not found for id: " + dataObject.getId());
+ }
+ revokeAccessForVolume(storagePool, volumeVO, host);
+ } else {
+ s_logger.error("revokeAccess: Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess");
+ throw new CloudRuntimeException("Invalid DataObjectType (" + dataObject.getType() + ") passed to revokeAccess");
+ }
+ } catch (Exception e) {
+ s_logger.error("revokeAccess: Failed for dataObject [{}]: {}", dataObject, e.getMessage());
+ throw new CloudRuntimeException("revokeAccess: Failed with error: " + e.getMessage(), e);
+ }
+ }
+
+ /**
+ * Revokes volume access for the specified host.
+ */
+ private void revokeAccessForVolume(StoragePoolVO storagePool, VolumeVO volumeVO, Host host) {
+ s_logger.info("revokeAccessForVolume: Revoking access to volume [{}] for host [{}]", volumeVO.getName(), host.getName());
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(storagePool.getId());
+ StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details);
+ String svmName = details.get(Constants.SVM_NAME);
+ String storagePoolUuid = storagePool.getUuid();
+
+ if (ProtocolType.ISCSI.name().equalsIgnoreCase(details.get(Constants.PROTOCOL))) {
+ String accessGroupName = Utility.getIgroupName(svmName, storagePoolUuid);
+
+ // Retrieve LUN name from volume details; if missing, volume may not have been fully created
+ String lunName = volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME) != null ?
+ volumeDetailsDao.findDetail(volumeVO.getId(), Constants.LUN_DOT_NAME).getValue() : null;
+ if (lunName == null) {
+ s_logger.warn("revokeAccessForVolume: No LUN name found for volume [{}]; skipping revoke", volumeVO.getId());
+ return;
+ }
+
+ // Verify LUN still exists on ONTAP (may have been manually deleted)
+ CloudStackVolume cloudStackVolume = getCloudStackVolumeByName(storageStrategy, svmName, lunName);
+ if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getUuid() == null) {
+ s_logger.warn("revokeAccessForVolume: LUN for volume [{}] not found on ONTAP, skipping revoke", volumeVO.getId());
+ return;
+ }
+
+ // Verify igroup still exists on ONTAP
+ AccessGroup accessGroup = getAccessGroupByName(storageStrategy, svmName, accessGroupName);
+ if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getUuid() == null) {
+ s_logger.warn("revokeAccessForVolume: iGroup [{}] not found on ONTAP, skipping revoke", accessGroupName);
+ return;
+ }
+
+ // Verify host initiator is in the igroup before attempting to remove mapping
+ SANStrategy sanStrategy = (UnifiedSANStrategy) storageStrategy;
+ if (!sanStrategy.validateInitiatorInAccessGroup(host.getStorageUrl(), svmName, accessGroup.getIgroup().getName())) {
+ s_logger.warn("revokeAccessForVolume: Initiator [{}] is not in iGroup [{}], skipping revoke",
+ host.getStorageUrl(), accessGroupName);
+ return;
+ }
+
+ // Remove the LUN mapping from the igroup
+ Map disableLogicalAccessMap = new HashMap<>();
+ disableLogicalAccessMap.put(Constants.LUN_DOT_UUID, cloudStackVolume.getLun().getUuid());
+ disableLogicalAccessMap.put(Constants.IGROUP_DOT_UUID, accessGroup.getIgroup().getUuid());
+ storageStrategy.disableLogicalAccess(disableLogicalAccessMap);
+
+ s_logger.info("revokeAccessForVolume: Successfully revoked access to LUN [{}] for host [{}]",
+ lunName, host.getName());
+ }
+ }
+
+ /**
+ * Retrieves a volume from ONTAP by name.
+ */
+ private CloudStackVolume getCloudStackVolumeByName(StorageStrategy storageStrategy, String svmName, String cloudStackVolumeName) {
+ Map getCloudStackVolumeMap = new HashMap<>();
+ getCloudStackVolumeMap.put(Constants.NAME, cloudStackVolumeName);
+ getCloudStackVolumeMap.put(Constants.SVM_DOT_NAME, svmName);
+
+ CloudStackVolume cloudStackVolume = storageStrategy.getCloudStackVolume(getCloudStackVolumeMap);
+ if (cloudStackVolume == null || cloudStackVolume.getLun() == null || cloudStackVolume.getLun().getName() == null) {
+ s_logger.warn("getCloudStackVolumeByName: LUN [{}] not found on ONTAP", cloudStackVolumeName);
+ return null;
+ }
+ return cloudStackVolume;
+ }
+
+ /**
+ * Retrieves an access group from ONTAP by name.
+ */
+ private AccessGroup getAccessGroupByName(StorageStrategy storageStrategy, String svmName, String accessGroupName) {
+ Map getAccessGroupMap = new HashMap<>();
+ getAccessGroupMap.put(Constants.NAME, accessGroupName);
+ getAccessGroupMap.put(Constants.SVM_DOT_NAME, svmName);
+
+ AccessGroup accessGroup = storageStrategy.getAccessGroup(getAccessGroupMap);
+ if (accessGroup == null || accessGroup.getIgroup() == null || accessGroup.getIgroup().getName() == null) {
+ s_logger.warn("getAccessGroupByName: iGroup [{}] not found on ONTAP", accessGroupName);
+ return null;
+ }
+ return accessGroup;
+ }
+
+ @Override
+ public long getDataObjectSizeIncludingHypervisorSnapshotReserve(DataObject dataObject, StoragePool storagePool) {
+ return 0;
+ }
+
+ @Override
+ public long getBytesRequiredForTemplate(TemplateInfo templateInfo, StoragePool storagePool) {
+ return 0;
+ }
+
+ @Override
+ public long getUsedBytes(StoragePool storagePool) {
+ return 0;
+ }
+
+ @Override
+ public long getUsedIops(StoragePool storagePool) {
+ return 0;
+ }
+
+ @Override
+ public void takeSnapshot(SnapshotInfo snapshot, AsyncCompletionCallback callback) {
+
+ }
+
+ @Override
+ public void revertSnapshot(SnapshotInfo snapshotOnImageStore, SnapshotInfo snapshotOnPrimaryStore, AsyncCompletionCallback callback) {
+
+ }
+
+ @Override
+ public void handleQualityOfServiceForVolumeMigration(VolumeInfo volumeInfo, QualityOfServiceState qualityOfServiceState) {
+
+ }
+
+ @Override
+ public boolean canProvideStorageStats() {
+ return false;
+ }
+
+ @Override
+ public Pair getStorageStats(StoragePool storagePool) {
+ return null;
+ }
+
+ @Override
+ public boolean canProvideVolumeStats() {
+ return false; // Not yet implemented for RAW managed NFS
+ }
+
+ @Override
+ public Pair getVolumeStats(StoragePool storagePool, String volumeId) {
+ return null;
+ }
+
+ @Override
+ public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
+ return true;
+ }
+
+ @Override
+ public boolean isVmInfoNeeded() {
+ return true;
+ }
+
+ @Override
+ public void provideVmInfo(long vmId, long volumeId) {
+
+ }
+
+ @Override
+ public boolean isVmTagsNeeded(String tagKey) {
+ return true;
+ }
+
+ @Override
+ public void provideVmTags(long vmId, long volumeId, String tagValue) {
+
+ }
+
+ @Override
+ public boolean isStorageSupportHA(Storage.StoragePoolType type) {
+ return true;
+ }
+
+ @Override
+ public void detachVolumeFromAllStorageNodes(Volume volume) {
+ }
+
+ private CloudStackVolume createDeleteCloudStackVolumeRequest(StoragePool storagePool, Map details, VolumeInfo volumeInfo) {
+ CloudStackVolume cloudStackVolumeDeleteRequest = null;
+
+ String protocol = details.get(Constants.PROTOCOL);
+ ProtocolType protocolType = ProtocolType.valueOf(protocol);
+ switch (protocolType) {
+ case NFS3:
+ cloudStackVolumeDeleteRequest = new CloudStackVolume();
+ cloudStackVolumeDeleteRequest.setDatastoreId(String.valueOf(storagePool.getId()));
+ cloudStackVolumeDeleteRequest.setVolumeInfo(volumeInfo);
+ break;
+ case ISCSI:
+ // Retrieve LUN identifiers stored during volume creation
+ String lunName = volumeDetailsDao.findDetail(volumeInfo.getId(), Constants.LUN_DOT_NAME).getValue();
+ String lunUUID = volumeDetailsDao.findDetail(volumeInfo.getId(), Constants.LUN_DOT_UUID).getValue();
+ if (lunName == null) {
+ throw new CloudRuntimeException("deleteAsync: Missing LUN name for volume " + volumeInfo.getId());
+ }
+ cloudStackVolumeDeleteRequest = new CloudStackVolume();
+ Lun lun = new Lun();
+ lun.setName(lunName);
+ lun.setUuid(lunUUID);
+ cloudStackVolumeDeleteRequest.setLun(lun);
+ break;
+ default:
+ throw new CloudRuntimeException("createDeleteCloudStackVolumeRequest: Unsupported protocol " + protocol);
+
+ }
+ return cloudStackVolumeDeleteRequest;
+
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignClientFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignClientFactory.java
new file mode 100644
index 000000000000..3bbf3aaaafc4
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignClientFactory.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign;
+
+import feign.Feign;
+
+public class FeignClientFactory {
+
+ private final FeignConfiguration feignConfiguration;
+
+ public FeignClientFactory() {
+ this.feignConfiguration = new FeignConfiguration();
+ }
+
+ public FeignClientFactory(FeignConfiguration feignConfiguration) {
+ this.feignConfiguration = feignConfiguration;
+ }
+
+ public T createClient(Class clientClass, String baseURL) {
+ return Feign.builder()
+ .client(feignConfiguration.createClient())
+ .encoder(feignConfiguration.createEncoder())
+ .decoder(feignConfiguration.createDecoder())
+ .retryer(feignConfiguration.createRetryer())
+ .requestInterceptor(feignConfiguration.createRequestInterceptor())
+ .target(clientClass, baseURL);
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
new file mode 100644
index 000000000000..d722a857c007
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/FeignConfiguration.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import feign.RequestInterceptor;
+import feign.Retryer;
+import feign.Client;
+import feign.httpclient.ApacheHttpClient;
+import feign.codec.Decoder;
+import feign.codec.Encoder;
+import feign.Response;
+import feign.codec.DecodeException;
+import feign.codec.EncodeException;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import org.apache.http.conn.ConnectionKeepAliveStrategy;
+import org.apache.http.conn.ssl.NoopHostnameVerifier;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.conn.ssl.TrustAllStrategy;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.ssl.SSLContexts;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
+import java.io.InputStream;
+import java.lang.reflect.Type;
+import java.nio.charset.StandardCharsets;
+import java.util.concurrent.TimeUnit;
+
+public class FeignConfiguration {
+ private static final Logger logger = LogManager.getLogger(FeignConfiguration.class);
+
+ private final int retryMaxAttempt = 3;
+ private final int retryMaxInterval = 5;
+ private final String ontapFeignMaxConnection = "80";
+ private final String ontapFeignMaxConnectionPerRoute = "20";
+ private final ObjectMapper objectMapper;
+
+ public FeignConfiguration() {
+ this.objectMapper = new ObjectMapper();
+ this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ }
+
+ public Client createClient() {
+ int maxConn;
+ int maxConnPerRoute;
+ try {
+ maxConn = Integer.parseInt(this.ontapFeignMaxConnection);
+ } catch (Exception e) {
+ logger.error("ontapFeignClient: parse max connection failed, using default");
+ maxConn = 20;
+ }
+ try {
+ maxConnPerRoute = Integer.parseInt(this.ontapFeignMaxConnectionPerRoute);
+ } catch (Exception e) {
+ logger.error("ontapFeignClient: parse max connection per route failed, using default");
+ maxConnPerRoute = 2;
+ }
+ logger.debug("ontapFeignClient: maxConn={}, maxConnPerRoute={}", maxConn, maxConnPerRoute);
+ ConnectionKeepAliveStrategy keepAliveStrategy = (response, context) -> 0;
+ CloseableHttpClient httpClient = HttpClientBuilder.create()
+ .setMaxConnTotal(maxConn)
+ .setMaxConnPerRoute(maxConnPerRoute)
+ .setKeepAliveStrategy(keepAliveStrategy)
+ .setSSLSocketFactory(getSSLSocketFactory())
+ .setConnectionTimeToLive(60, TimeUnit.SECONDS)
+ .build();
+ return new ApacheHttpClient(httpClient);
+ }
+
+ private SSLConnectionSocketFactory getSSLSocketFactory() {
+ try {
+ SSLContext sslContext = SSLContexts.custom().loadTrustMaterial(null, new TrustAllStrategy()).build();
+ return new SSLConnectionSocketFactory(sslContext, new NoopHostnameVerifier());
+ } catch (Exception ex) {
+ throw new RuntimeException(ex);
+ }
+ }
+
+ public RequestInterceptor createRequestInterceptor() {
+ return template -> {
+ logger.info("Feign Request URL: {}", template.url());
+ logger.info("HTTP Method: {}", template.method());
+ logger.info("Headers: {}", template.headers());
+ if (template.body() != null) {
+ logger.info("Body: {}", new String(template.body(), StandardCharsets.UTF_8));
+ }
+ };
+ }
+
+ public Retryer createRetryer() {
+ return new Retryer.Default(1000L, retryMaxInterval * 1000L, retryMaxAttempt);
+ }
+
+ public Encoder createEncoder() {
+ return new Encoder() {
+ @Override
+ public void encode(Object object, Type bodyType, feign.RequestTemplate template) throws EncodeException {
+ if (object == null) {
+ template.body(null, StandardCharsets.UTF_8);
+ return;
+ }
+ try {
+ byte[] jsonBytes = objectMapper.writeValueAsBytes(object);
+ template.body(jsonBytes, StandardCharsets.UTF_8);
+ template.header("Content-Type", "application/json");
+ } catch (JsonProcessingException e) {
+ throw new EncodeException("Error encoding object to JSON", e);
+ }
+ }
+ };
+ }
+
+ public Decoder createDecoder() {
+ return new Decoder() {
+ @Override
+ public Object decode(Response response, Type type) throws IOException, DecodeException {
+ if (response.body() == null) {
+ logger.debug("Response body is null, returning null");
+ return null;
+ }
+ String json = null;
+ try (InputStream bodyStream = response.body().asInputStream()) {
+ json = new String(bodyStream.readAllBytes(), StandardCharsets.UTF_8);
+ logger.debug("Decoding JSON response: {}", json);
+ return objectMapper.readValue(json, objectMapper.getTypeFactory().constructType(type));
+ } catch (IOException e) {
+ logger.error("IOException during decoding. Status: {}, Raw body: {}", response.status(), json, e);
+ throw new DecodeException(response.status(), "Error decoding JSON response", response.request(), e);
+ } catch (Exception e) {
+ logger.error("Unexpected error during decoding. Status: {}, Type: {}, Raw body: {}", response.status(), type, json, e);
+ throw new DecodeException(response.status(), "Unexpected error during decoding", response.request(), e);
+ }
+ }
+ };
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/AggregateFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/AggregateFeignClient.java
new file mode 100644
index 000000000000..f756c3d32f18
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/AggregateFeignClient.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.client;
+
+import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+
+public interface AggregateFeignClient {
+
+ @RequestLine("GET /api/storage/aggregates")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getAggregateResponse(@Param("authHeader") String authHeader);
+
+ @RequestLine("GET /api/storage/aggregates/{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ Aggregate getAggregateByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/ClusterFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/ClusterFeignClient.java
new file mode 100644
index 000000000000..582fb58e6f3b
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/ClusterFeignClient.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.client;
+
+import org.apache.cloudstack.storage.feign.model.Cluster;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+
+public interface ClusterFeignClient {
+
+ @RequestLine("GET /api/cluster")
+ @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
+ Cluster getCluster(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/JobFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/JobFeignClient.java
new file mode 100644
index 000000000000..535e112d9eb3
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/JobFeignClient.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.client;
+
+import org.apache.cloudstack.storage.feign.model.Job;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+
+public interface JobFeignClient {
+
+ @RequestLine("GET /api/cluster/jobs/{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ Job getJobByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java
new file mode 100644
index 000000000000..f48f83dc28de
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NASFeignClient.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.QueryMap;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
+import org.apache.cloudstack.storage.feign.model.FileInfo;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+
+import java.util.Map;
+
+public interface NASFeignClient {
+
+ // File Operations
+ @RequestLine("GET /api/storage/volumes/{volumeUuid}/files/{path}")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getFileResponse(@Param("authHeader") String authHeader,
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath);
+
+ @RequestLine("DELETE /api/storage/volumes/{volumeUuid}/files/{path}")
+ @Headers({"Authorization: {authHeader}"})
+ void deleteFile(@Param("authHeader") String authHeader,
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath);
+
+ @RequestLine("PATCH /api/storage/volumes/{volumeUuid}/files/{path}")
+ @Headers({"Authorization: {authHeader}"})
+ void updateFile(@Param("authHeader") String authHeader,
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath,
+ FileInfo fileInfo);
+
+ @RequestLine("POST /api/storage/volumes/{volumeUuid}/files/{path}")
+ @Headers({"Authorization: {authHeader}"})
+ void createFile(@Param("authHeader") String authHeader,
+ @Param("volumeUuid") String volumeUUID,
+ @Param("path") String filePath,
+ FileInfo file);
+
+ // Export Policy Operations
+ @RequestLine("POST /api/protocols/nfs/export-policies")
+ @Headers({"Authorization: {authHeader}"})
+ void createExportPolicy(@Param("authHeader") String authHeader,
+ ExportPolicy exportPolicy);
+
+ @RequestLine("GET /api/protocols/nfs/export-policies")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getExportPolicyResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
+
+ @RequestLine("GET /api/protocols/nfs/export-policies/{id}")
+ @Headers({"Authorization: {authHeader}"})
+ ExportPolicy getExportPolicyById(@Param("authHeader") String authHeader,
+ @Param("id") String id);
+
+ @RequestLine("DELETE /api/protocols/nfs/export-policies/{id}")
+ @Headers({"Authorization: {authHeader}"})
+ void deleteExportPolicyById(@Param("authHeader") String authHeader,
+ @Param("id") String id);
+
+ @RequestLine("PATCH /api/protocols/nfs/export-policies/{id}")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse updateExportPolicy(@Param("authHeader") String authHeader,
+ @Param("id") String id,
+ ExportPolicy request);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
new file mode 100644
index 000000000000..4dc82a68238e
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/NetworkFeignClient.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.Headers;
+import feign.Param;
+import feign.QueryMap;
+import feign.RequestLine;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+
+import java.util.Map;
+
+public interface NetworkFeignClient {
+ @RequestLine("GET /api/network/ip/interfaces")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getNetworkIpInterfaces(@Param("authHeader") String authHeader, @QueryMap Map queryParams);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java
new file mode 100644
index 000000000000..45a20fe876fe
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SANFeignClient.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.QueryMap;
+import org.apache.cloudstack.storage.feign.model.Igroup;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.feign.model.LunMap;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+import java.util.Map;
+
+//TODO: Proper URLs should be added in the RequestLine annotations below
+public interface SANFeignClient {
+ // iSCSI Service APIs
+ @RequestLine("GET /api/protocols/san/iscsi/services")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getIscsiServices(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
+
+ // LUN Operation APIs
+ @RequestLine("POST /api/storage/luns?return_records={returnRecords}")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse createLun(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Lun lun);
+
+ @RequestLine("GET /api/storage/luns")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getLunResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
+
+ @RequestLine("GET /{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ Lun getLunByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+
+ @RequestLine("PATCH /{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ void updateLun(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Lun lun);
+
+ @RequestLine("DELETE /api/storage/luns/{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ void deleteLun(@Param("authHeader") String authHeader, @Param("uuid") String uuid, @QueryMap Map queryMap);
+
+ // iGroup Operation APIs
+ @RequestLine("POST /api/protocols/san/igroups?return_records={returnRecords}")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse createIgroup(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, Igroup igroupRequest);
+
+ @RequestLine("GET /api/protocols/san/igroups")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getIgroupResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
+
+ @RequestLine("GET /{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ Igroup getIgroupByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+
+ @RequestLine("DELETE /api/protocols/san/igroups/{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ void deleteIgroup(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+
+ // LUN Maps Operation APIs
+ @RequestLine("POST /api/protocols/san/lun-maps")
+ @Headers({"Authorization: {authHeader}", "return_records: {returnRecords}"})
+ OntapResponse createLunMap(@Param("authHeader") String authHeader, @Param("returnRecords") boolean returnRecords, LunMap lunMap);
+
+
+ @RequestLine("GET /api/protocols/san/lun-maps")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getLunMapResponse(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
+
+ @RequestLine("DELETE /api/protocols/san/lun-maps/{lunUuid}/{igroupUuid}")
+ @Headers({"Authorization: {authHeader}"})
+ void deleteLunMap(@Param("authHeader") String authHeader,
+ @Param("lunUuid") String lunUUID,
+ @Param("igroupUuid") String igroupUUID);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SvmFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SvmFeignClient.java
new file mode 100644
index 000000000000..29ea3b5f694f
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/SvmFeignClient.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.QueryMap;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+import java.util.Map;
+
+public interface SvmFeignClient {
+
+ // SVM Operation APIs
+ @RequestLine("GET /api/svm/svms")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getSvmResponse(@QueryMap Map queryMap, @Param("authHeader") String authHeader);
+
+ @RequestLine("GET /api/svm/svms/{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ Svm getSvmByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
new file mode 100644
index 000000000000..6384566487d4
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/client/VolumeFeignClient.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.client;
+
+import feign.QueryMap;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import feign.Headers;
+import feign.Param;
+import feign.RequestLine;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+
+import java.util.Map;
+
+public interface VolumeFeignClient {
+
+ @RequestLine("DELETE /api/storage/volumes/{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ JobResponse deleteVolume(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+
+ @RequestLine("POST /api/storage/volumes")
+ @Headers({"Authorization: {authHeader}"})
+ JobResponse createVolumeWithJob(@Param("authHeader") String authHeader, Volume volumeRequest);
+
+ @RequestLine("GET /api/storage/volumes")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getAllVolumes(@Param("authHeader") String authHeader, @QueryMap Map queryParams);
+
+ @RequestLine("GET /api/storage/volumes/{uuid}")
+ @Headers({"Authorization: {authHeader}"})
+ Volume getVolumeByUUID(@Param("authHeader") String authHeader, @Param("uuid") String uuid);
+
+ @RequestLine("GET /api/storage/volumes")
+ @Headers({"Authorization: {authHeader}"})
+ OntapResponse getVolume(@Param("authHeader") String authHeader, @QueryMap Map queryMap);
+
+ @RequestLine("PATCH /api/storage/volumes/{uuid}")
+ @Headers({ "Authorization: {authHeader}"})
+ JobResponse updateVolumeRebalancing(@Param("authHeader") String authHeader, @Param("uuid") String uuid, Volume volumeRequest);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java
new file mode 100644
index 000000000000..8ac1717604a5
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Aggregate.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonValue;
+
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Aggregate {
+ // Replace previous enum with case-insensitive mapping
+ public enum StateEnum {
+ ONLINE("online");
+ private final String value;
+
+ StateEnum(String value) {
+ this.value = value;
+ }
+
+ @JsonValue
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ @JsonCreator
+ public static StateEnum fromValue(String text) {
+ for (StateEnum b : StateEnum.values()) {
+ if (String.valueOf(b.value).equals(text)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
+
+ @JsonProperty("name")
+ private String name = null;
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(getName(), getUuid());
+ }
+
+ @JsonProperty("uuid")
+ private String uuid = null;
+
+ @JsonProperty("state")
+ private StateEnum state = null;
+
+ @JsonProperty("space")
+ private AggregateSpace space = null;
+
+
+ public Aggregate name(String name) {
+ this.name = name;
+ return this;
+ }
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public Aggregate uuid(String uuid) {
+ this.uuid = uuid;
+ return this;
+ }
+
+ public String getUuid() {
+ return uuid;
+ }
+
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+
+ public StateEnum getState() {
+ return state;
+ }
+
+ public AggregateSpace getSpace() {
+ return space;
+ }
+
+ public Double getAvailableBlockStorageSpace() {
+ if (space != null && space.blockStorage != null) {
+ return space.blockStorage.available;
+ }
+ return null;
+ }
+
+
+ @Override
+ public boolean equals(java.lang.Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ Aggregate diskAggregates = (Aggregate) o;
+ return Objects.equals(this.name, diskAggregates.name) &&
+ Objects.equals(this.uuid, diskAggregates.uuid);
+ }
+
+ /**
+ * Convert the given object to string with each line indented by 4 spaces
+ * (except the first line).
+ */
+ private String toIndentedString(java.lang.Object o) {
+ if (o == null) {
+ return "null";
+ }
+ return o.toString().replace("\n", "\n ");
+ }
+
+ @Override
+ public String toString() {
+ return "DiskAggregates [name=" + name + ", uuid=" + uuid + "]";
+ }
+
+ public static class AggregateSpace {
+ @JsonProperty("block_storage")
+ private AggregateSpaceBlockStorage blockStorage = null;
+ }
+
+ public static class AggregateSpaceBlockStorage {
+ @JsonProperty("available")
+ private Double available = null;
+ @JsonProperty("size")
+ private Double size = null;
+ @JsonProperty("used")
+ private Double used = null;
+ }
+
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/AntiRansomware.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/AntiRansomware.java
new file mode 100644
index 000000000000..21748dcd53ec
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/AntiRansomware.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+public class AntiRansomware {
+ @JsonProperty("state")
+ private String state;
+
+ public String getState() {
+ return state;
+ }
+
+ public void setState(String state) {
+ this.state = state;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Cluster.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Cluster.java
new file mode 100644
index 000000000000..061372756175
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Cluster.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.Objects;
+
+/**
+ * Complete cluster information
+ */
+@SuppressWarnings("checkstyle:RegexpSingleline")
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Cluster {
+
+ @JsonProperty("name")
+ private String name = null;
+ @JsonProperty("uuid")
+ private String uuid = null;
+ @JsonProperty("version")
+ private Version version = null;
+ @JsonProperty("health")
+ private String health = null;
+
+ @JsonProperty("san_optimized")
+ private Boolean sanOptimized = null;
+
+ @JsonProperty("disaggregated")
+ private Boolean disaggregated = null;
+
+
+ public String getHealth() {
+ return health;
+ }
+
+ public void setHealth(String health) {
+ this.health = health;
+ }
+
+ public Cluster name(String name) {
+ this.name = name;
+ return this;
+ }
+
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+
+ public String getUuid() {
+ return uuid;
+ }
+
+ public Cluster version(Version version) {
+ this.version = version;
+ return this;
+ }
+
+ public Version getVersion() {
+ return version;
+ }
+
+ public void setVersion(Version version) {
+ this.version = version;
+ }
+
+ public Boolean getSanOptimized() {
+ return sanOptimized;
+ }
+
+ public void setSanOptimized(Boolean sanOptimized) {
+ this.sanOptimized = sanOptimized;
+ }
+
+ public Boolean getDisaggregated() {
+ return disaggregated;
+ }
+ public void setDisaggregated(Boolean disaggregated) {
+ this.disaggregated = disaggregated;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(getName(), getUuid());
+ }
+
+ @Override
+ public boolean equals(java.lang.Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ Cluster cluster = (Cluster) o;
+ return Objects.equals(this.name, cluster.name) &&
+ Objects.equals(this.uuid, cluster.uuid);
+ }
+ @Override
+ public String toString() {
+ return "Cluster{" +
+ "name='" + name + '\'' +
+ ", uuid='" + uuid + '\'' +
+ ", version=" + version +
+ ", sanOptimized=" + sanOptimized +
+ ", disaggregated=" + disaggregated +
+ '}';
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportPolicy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportPolicy.java
new file mode 100644
index 000000000000..8c7c0323e662
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportPolicy.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.math.BigInteger;
+import java.util.List;
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class ExportPolicy {
+
+ @JsonProperty("id")
+ private BigInteger id = null;
+ @JsonProperty("name")
+ private String name = null;
+ @JsonProperty("rules")
+ private List rules = null;
+ @JsonProperty("svm")
+ private Svm svm = null;
+
+ public BigInteger getId() {
+ return id;
+ }
+
+ public ExportPolicy name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public ExportPolicy rules(List rules) {
+ this.rules = rules;
+ return this;
+ }
+
+ public List getRules() {
+ return rules;
+ }
+
+ public void setRules(List rules) {
+ this.rules = rules;
+ }
+
+ public ExportPolicy svm(Svm svm) {
+ this.svm = svm;
+ return this;
+ }
+
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ ExportPolicy exportPolicy = (ExportPolicy) o;
+ return Objects.equals(this.id, exportPolicy.id) &&
+ Objects.equals(this.name, exportPolicy.name);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash( id, name);
+ }
+
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("class ExportPolicy {\n");
+ sb.append(" id: ").append(toIndentedString(id)).append("\n");
+ sb.append(" name: ").append(toIndentedString(name)).append("\n");
+ sb.append(" rules: ").append(toIndentedString(rules)).append("\n");
+ sb.append(" svm: ").append(toIndentedString(svm)).append("\n");
+ sb.append("}");
+ return sb.toString();
+ }
+ private String toIndentedString(Object o) {
+ if (o == null) {
+ return "null";
+ }
+ return o.toString().replace("\n", "\n ");
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java
new file mode 100644
index 000000000000..788fc8b5544d
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/ExportRule.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.List;
+
+/**
+ * ExportRule
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class ExportRule {
+ @JsonProperty("anonymous_user")
+ private String anonymousUser ;
+
+ @JsonProperty("clients")
+ private List clients = null;
+
+ @JsonProperty("index")
+ private Integer index = null;
+
+ public enum ProtocolsEnum {
+ any("any"),
+
+ nfs("nfs"),
+
+ nfs3("nfs3"),
+
+ nfs4("nfs4");
+
+ private String value;
+
+ ProtocolsEnum(String value) {
+ this.value = value;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ public static ProtocolsEnum fromValue(String text) {
+ for (ProtocolsEnum b : ProtocolsEnum.values()) {
+ if (String.valueOf(b.value).equals(text)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
+
+ @JsonProperty("protocols")
+ private List protocols = null;
+
+ @JsonProperty("ro_rule")
+ private List roRule = null;
+
+ @JsonProperty("rw_rule")
+ private List rwRule = null;
+
+ @JsonProperty("superuser")
+ private List superuser = null;
+
+
+ public ExportRule anonymousUser(String anonymousUser) {
+ this.anonymousUser = anonymousUser;
+ return this;
+ }
+
+ public String getAnonymousUser() {
+ return anonymousUser;
+ }
+
+ public void setAnonymousUser(String anonymousUser) {
+ this.anonymousUser = anonymousUser;
+ }
+
+ public ExportRule clients(List clients) {
+ this.clients = clients;
+ return this;
+ }
+
+ public List getClients() {
+ return clients;
+ }
+
+ public void setClients(List clients) {
+ this.clients = clients;
+ }
+
+ public Integer getIndex() {
+ return index;
+ }
+ public void setIndex(Integer index)
+ {
+ this.index=index;
+ }
+
+ public ExportRule protocols(List protocols) {
+ this.protocols = protocols;
+ return this;
+ }
+
+ public List getProtocols() {
+ return protocols;
+ }
+
+ public void setProtocols(List protocols) {
+ this.protocols = protocols;
+ }
+
+ public static class ExportClient {
+ @JsonProperty("match")
+ private String match = null;
+
+ public ExportClient match (String match) {
+ this.match = match;
+ return this;
+ }
+ public String getMatch () {
+ return match;
+ }
+
+ public void setMatch (String match) {
+ this.match = match;
+ }
+ }
+
+ public List getRwRule() {
+ return rwRule;
+ }
+
+ public void setRwRule(List rwRule) {
+ this.rwRule = rwRule;
+ }
+
+ public List getRoRule() {
+ return roRule;
+ }
+
+ public void setRoRule(List roRule) {
+ this.roRule = roRule;
+ }
+
+ public List getSuperuser() {
+ return superuser;
+ }
+
+ public void setSuperuser(List superuser) {
+ this.superuser = superuser;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("class ExportRule {\n");
+
+ sb.append(" anonymousUser: ").append(toIndentedString(anonymousUser)).append("\n");
+ sb.append(" clients: ").append(toIndentedString(clients)).append("\n");
+ sb.append(" index: ").append(toIndentedString(index)).append("\n");
+ sb.append(" protocols: ").append(toIndentedString(protocols)).append("\n");
+ sb.append("}");
+ return sb.toString();
+ }
+ private String toIndentedString(Object o) {
+ if (o == null) {
+ return "null";
+ }
+ return o.toString().replace("\n", "\n ");
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java
new file mode 100644
index 000000000000..181620268932
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/FileInfo.java
@@ -0,0 +1,297 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonValue;
+
+import java.time.OffsetDateTime;
+import java.util.Objects;
+
+/**
+ * Information about a single file.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class FileInfo {
+ @JsonProperty("bytes_used")
+ private Long bytesUsed = null;
+ @JsonProperty("creation_time")
+ private OffsetDateTime creationTime = null;
+ @JsonProperty("fill_enabled")
+ private Boolean fillEnabled = null;
+ @JsonProperty("is_empty")
+ private Boolean isEmpty = null;
+ @JsonProperty("is_snapshot")
+ private Boolean isSnapshot = null;
+ @JsonProperty("is_vm_aligned")
+ private Boolean isVmAligned = null;
+ @JsonProperty("modified_time")
+ private OffsetDateTime modifiedTime = null;
+ @JsonProperty("name")
+ private String name = null;
+ @JsonProperty("overwrite_enabled")
+ private Boolean overwriteEnabled = null;
+ @JsonProperty("path")
+ private String path = null;
+ @JsonProperty("size")
+ private Long size = null;
+ @JsonProperty("target")
+ private String target = null;
+
+ /**
+ * Type of the file.
+ */
+ public enum TypeEnum {
+ FILE("file"),
+ DIRECTORY("directory");
+
+ private String value;
+
+ TypeEnum(String value) {
+ this.value = value;
+ }
+
+ @JsonValue
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ @JsonCreator
+ public static TypeEnum fromValue(String value) {
+ for (TypeEnum b : TypeEnum.values()) {
+ if (b.value.equals(value)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
+
+ @JsonProperty("type")
+ private TypeEnum type = null;
+
+ @JsonProperty("unique_bytes")
+ private Long uniqueBytes = null;
+
+ @JsonProperty("unix_permissions")
+ private Integer unixPermissions = null;
+
+ /**
+ * The actual number of bytes used on disk by this file. If byte_offset and length parameters are specified, this will return the bytes used by the file within the given range.
+ * @return bytesUsed
+ **/
+ public Long getBytesUsed() {
+ return bytesUsed;
+ }
+
+ public OffsetDateTime getCreationTime() {
+ return creationTime;
+ }
+
+ public FileInfo fillEnabled(Boolean fillEnabled) {
+ this.fillEnabled = fillEnabled;
+ return this;
+ }
+
+ public Boolean isFillEnabled() {
+ return fillEnabled;
+ }
+
+ public void setFillEnabled(Boolean fillEnabled) {
+ this.fillEnabled = fillEnabled;
+ }
+
+
+ public Boolean isIsEmpty() {
+ return isEmpty;
+ }
+
+ public void setIsEmpty(Boolean isEmpty) {
+ this.isEmpty = isEmpty;
+ }
+
+ public Boolean isIsSnapshot() {
+ return isSnapshot;
+ }
+
+ public void setIsSnapshot(Boolean isSnapshot) {
+ this.isSnapshot = isSnapshot;
+ }
+
+
+ public Boolean isIsVmAligned() {
+ return isVmAligned;
+ }
+
+
+ public OffsetDateTime getModifiedTime() {
+ return modifiedTime;
+ }
+
+ public FileInfo name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public FileInfo overwriteEnabled(Boolean overwriteEnabled) {
+ this.overwriteEnabled = overwriteEnabled;
+ return this;
+ }
+
+ public Boolean isOverwriteEnabled() {
+ return overwriteEnabled;
+ }
+
+ public void setOverwriteEnabled(Boolean overwriteEnabled) {
+ this.overwriteEnabled = overwriteEnabled;
+ }
+
+ public FileInfo path(String path) {
+ this.path = path;
+ return this;
+ }
+
+ public String getPath() {
+ return path;
+ }
+
+ public void setPath(String path) {
+ this.path = path;
+ }
+ public Long getSize() {
+ return size;
+ }
+
+ public void setSize(Long size) {
+ this.size = size;
+ }
+
+ public FileInfo target(String target) {
+ this.target = target;
+ return this;
+ }
+
+ public String getTarget() {
+ return target;
+ }
+
+ public void setTarget(String target) {
+ this.target = target;
+ }
+
+ public FileInfo type(TypeEnum type) {
+ this.type = type;
+ return this;
+ }
+
+ public TypeEnum getType() {
+ return type;
+ }
+
+ public void setType(TypeEnum type) {
+ this.type = type;
+ }
+
+ public Long getUniqueBytes() {
+ return uniqueBytes;
+ }
+
+ public FileInfo unixPermissions(Integer unixPermissions) {
+ this.unixPermissions = unixPermissions;
+ return this;
+ }
+
+ public Integer getUnixPermissions() {
+ return unixPermissions;
+ }
+
+ public void setUnixPermissions(Integer unixPermissions) {
+ this.unixPermissions = unixPermissions;
+ }
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ FileInfo fileInfo = (FileInfo) o;
+ return Objects.equals(this.name, fileInfo.name) &&
+ Objects.equals(this.path, fileInfo.path);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, path);
+ }
+
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("class FileInfo {\n");
+ sb.append(" bytesUsed: ").append(toIndentedString(bytesUsed)).append("\n");
+ sb.append(" creationTime: ").append(toIndentedString(creationTime)).append("\n");
+ sb.append(" fillEnabled: ").append(toIndentedString(fillEnabled)).append("\n");
+ sb.append(" isEmpty: ").append(toIndentedString(isEmpty)).append("\n");
+ sb.append(" isSnapshot: ").append(toIndentedString(isSnapshot)).append("\n");
+ sb.append(" isVmAligned: ").append(toIndentedString(isVmAligned)).append("\n");
+ sb.append(" modifiedTime: ").append(toIndentedString(modifiedTime)).append("\n");
+ sb.append(" name: ").append(toIndentedString(name)).append("\n");
+ sb.append(" overwriteEnabled: ").append(toIndentedString(overwriteEnabled)).append("\n");
+ sb.append(" path: ").append(toIndentedString(path)).append("\n");
+ sb.append(" size: ").append(toIndentedString(size)).append("\n");
+ sb.append(" target: ").append(toIndentedString(target)).append("\n");
+ sb.append(" type: ").append(toIndentedString(type)).append("\n");
+ sb.append(" uniqueBytes: ").append(toIndentedString(uniqueBytes)).append("\n");
+ sb.append(" unixPermissions: ").append(toIndentedString(unixPermissions)).append("\n");
+ sb.append("}");
+ return sb.toString();
+ }
+
+ /**
+ * Convert the given object to string with each line indented by 4 spaces
+ * (except the first line).
+ */
+ private String toIndentedString(Object o) {
+ if (o == null) {
+ return "null";
+ }
+ return o.toString().replace("\n", "\n ");
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Igroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Igroup.java
new file mode 100644
index 000000000000..4dc07e349fad
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Igroup.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.List;
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Igroup {
+ @JsonProperty("delete_on_unmap")
+ private Boolean deleteOnUnmap = null;
+ @JsonProperty("initiators")
+ private List initiators = null;
+ @JsonProperty("lun_maps")
+ private List lunMaps = null;
+ @JsonProperty("os_type")
+ private OsTypeEnum osType = null;
+
+ @JsonProperty("parent_igroups")
+ private List parentIgroups = null;
+
+ @JsonProperty("igroups")
+ private List igroups = null;
+
+ @JsonProperty("name")
+ private String name = null;
+
+ @JsonProperty("protocol")
+ private ProtocolEnum protocol = null;
+ @JsonProperty("svm")
+ private Svm svm = null;
+ @JsonProperty("uuid")
+ private String uuid = null;
+
+ public enum OsTypeEnum {
+ hyper_v("hyper_v"),
+
+ linux("linux"),
+
+ vmware("vmware"),
+
+ windows("windows"),
+
+ xen("xen");
+
+ private String value;
+
+ OsTypeEnum(String value) {
+ this.value = value;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ public static OsTypeEnum fromValue(String text) {
+ for (OsTypeEnum b : OsTypeEnum.values()) {
+ if (String.valueOf(b.value).equals(text)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
+
+ public List getParentIgroups() {
+ return parentIgroups;
+ }
+
+ public void setParentIgroups(List parentIgroups) {
+ this.parentIgroups = parentIgroups;
+ }
+
+ public Igroup igroups(List igroups) {
+ this.igroups = igroups;
+ return this;
+ }
+
+ public List getIgroups() {
+ return igroups;
+ }
+
+ public void setIgroups(List igroups) {
+ this.igroups = igroups;
+ }
+
+ public enum ProtocolEnum {
+ iscsi("iscsi"),
+
+ mixed("mixed");
+
+ private String value;
+
+ ProtocolEnum(String value) {
+ this.value = value;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ public static ProtocolEnum fromValue(String text) {
+ for (ProtocolEnum b : ProtocolEnum.values()) {
+ if (String.valueOf(b.value).equals(text)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
+ public Igroup deleteOnUnmap(Boolean deleteOnUnmap) {
+ this.deleteOnUnmap = deleteOnUnmap;
+ return this;
+ }
+
+ public Boolean isDeleteOnUnmap() {
+ return deleteOnUnmap;
+ }
+
+ public void setDeleteOnUnmap(Boolean deleteOnUnmap) {
+ this.deleteOnUnmap = deleteOnUnmap;
+ }
+
+ public Igroup initiators(List initiators) {
+ this.initiators = initiators;
+ return this;
+ }
+ public List getInitiators() {
+ return initiators;
+ }
+
+ public void setInitiators(List initiators) {
+ this.initiators = initiators;
+ }
+
+ public Igroup lunMaps(List lunMaps) {
+ this.lunMaps = lunMaps;
+ return this;
+ }
+ public List getLunMaps() {
+ return lunMaps;
+ }
+
+ public void setLunMaps(List lunMaps) {
+ this.lunMaps = lunMaps;
+ }
+
+ public Igroup name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public Igroup osType(OsTypeEnum osType) {
+ this.osType = osType;
+ return this;
+ }
+ public OsTypeEnum getOsType() {
+ return osType;
+ }
+
+ public void setOsType(OsTypeEnum osType) {
+ this.osType = osType;
+ }
+
+ public Igroup protocol(ProtocolEnum protocol) {
+ this.protocol = protocol;
+ return this;
+ }
+
+ public ProtocolEnum getProtocol() {
+ return protocol;
+ }
+
+ public void setProtocol(ProtocolEnum protocol) {
+ this.protocol = protocol;
+ }
+
+ public Igroup svm(Svm svm) {
+ this.svm = svm;
+ return this;
+ }
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+ public String getUuid() {
+ return uuid;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, uuid);
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj)
+ return true;
+ if (obj == null)
+ return false;
+ if (getClass() != obj.getClass())
+ return false;
+ Igroup other = (Igroup) obj;
+ return Objects.equals(name, other.name) && Objects.equals(uuid, other.uuid);
+ }
+
+ @Override
+ public String toString() {
+ return "Igroup [deleteOnUnmap=" + deleteOnUnmap + ", initiators=" + initiators + ", lunMaps=" + lunMaps
+ + ", name=" + name + ", replication=" + ", osType=" + osType + ", parentIgroups="
+ + parentIgroups + ", igroups=" + igroups + ", protocol=" + protocol + ", svm=" + svm + ", uuid=" + uuid
+ + ", portset=" + "]";
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Initiator.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Initiator.java
new file mode 100644
index 000000000000..b0a5bd24272a
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Initiator.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Initiator {
+ @JsonProperty("name")
+ private String name = null;
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java
new file mode 100644
index 000000000000..c15798a42b70
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IpInterface.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.List;
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class IpInterface {
+ @JsonProperty("uuid")
+ private String uuid;
+
+ @JsonProperty("name")
+ private String name;
+
+ @JsonProperty("ip")
+ private IpInfo ip;
+
+ @JsonProperty("svm")
+ private Svm svm;
+
+ @JsonProperty("services")
+ private List services;
+
+ // Getters and setters
+ public String getUuid() {
+ return uuid;
+ }
+
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public IpInfo getIp() {
+ return ip;
+ }
+
+ public void setIp(IpInfo ip) {
+ this.ip = ip;
+ }
+
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+ public List getServices() {
+ return services;
+ }
+
+ public void setServices(List services) {
+ this.services = services;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ IpInterface that = (IpInterface) o;
+ return Objects.equals(uuid, that.uuid) &&
+ Objects.equals(name, that.name) &&
+ Objects.equals(ip, that.ip) &&
+ Objects.equals(svm, that.svm) &&
+ Objects.equals(services, that.services);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(uuid, name, ip, svm, services);
+ }
+
+ @Override
+ public String toString() {
+ return "IpInterface{" +
+ "uuid='" + uuid + '\'' +
+ ", name='" + name + '\'' +
+ ", ip=" + ip +
+ ", svm=" + svm +
+ ", services=" + services +
+ '}';
+ }
+
+ // Nested class for IP information
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ public static class IpInfo {
+ @JsonProperty("address")
+ private String address;
+
+ public String getAddress() {
+ return address;
+ }
+
+ public void setAddress(String address) {
+ this.address = address;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ IpInfo ipInfo = (IpInfo) o;
+ return Objects.equals(address, ipInfo.address);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(address);
+ }
+
+ @Override
+ public String toString() {
+ return "IpInfo{" +
+ "address='" + address + '\'' +
+ '}';
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IscsiService.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IscsiService.java
new file mode 100644
index 000000000000..06d1ca92735f
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/IscsiService.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+
+/**
+ * An iSCSI service defines the properties of the iSCSI target for an SVM.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class IscsiService {
+ @JsonProperty("enabled")
+ private Boolean enabled = null;
+
+ @JsonProperty("svm")
+ private Svm svm = null;
+
+ @JsonProperty("target")
+ private IscsiServiceTarget target = null;
+
+ public Boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(Boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+ public IscsiServiceTarget getTarget() {
+ return target;
+ }
+
+ public void setTarget(IscsiServiceTarget target) {
+ this.target = target;
+ }
+
+ @Override
+ public String toString() {
+ return "IscsiService{" +
+ "enabled=" + enabled +
+ ", svm=" + svm +
+ ", target=" + target +
+ '}';
+ }
+
+ /**
+ * iSCSI target information
+ */
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ public static class IscsiServiceTarget {
+ @JsonProperty("alias")
+ private String alias = null;
+
+ @JsonProperty("name")
+ private String name = null;
+
+ public String getAlias() {
+ return alias;
+ }
+
+ public void setAlias(String alias) {
+ this.alias = alias;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String toString() {
+ return "IscsiServiceTarget{" +
+ "alias='" + alias + '\'' +
+ ", name='" + name + '\'' +
+ '}';
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java
new file mode 100644
index 000000000000..cdeaf2ed8388
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Job.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonInclude.Include;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * @author Administrator
+ *
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(Include.NON_NULL)
+public class Job {
+
+ @JsonProperty("uuid")
+ String uuid;
+ @JsonProperty("description")
+ String description;
+ @JsonProperty("state")
+ String state;
+ @JsonProperty("message")
+ String message;
+ @JsonProperty("code")
+ String code;
+ @JsonProperty("_links")
+ private Links links;
+
+ @JsonProperty("error")
+ private JobError error;
+ public JobError getError () { return error; }
+ public void setError (JobError error) { this.error = error; }
+ public Links getLinks() { return links; }
+ public void setLinks(Links links) { this.links = links; }
+ public String getUuid() {
+ return uuid;
+ }
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+ public String getDescription() {
+ return description;
+ }
+ public void setDescription(String description) {
+ this.description = description;
+ }
+ public String getState() {
+ return state;
+ }
+ public void setState(String state) {
+ this.state = state;
+ }
+ public String getMessage() {
+ return message;
+ }
+ public void setMessage(String message) {
+ this.message = message;
+ }
+ public String getCode() {
+ return code;
+ }
+ public void setCode(String code) {
+ this.code = code;
+ }
+ @Override
+ public String toString() {
+ return "JobDTO [uuid=" + uuid + ", description=" + description + ", state=" + state + ", message="
+ + message + ", code=" + code + "]";
+ }
+
+ public static class Links {
+ @JsonProperty("self")
+ private Self self;
+ public Self getSelf() { return self; }
+ public void setSelf(Self self) { this.self = self; }
+ }
+
+ public static class Self {
+ @JsonProperty("href")
+ private String href;
+ public String getHref() { return href; }
+ public void setHref(String href) { this.href = href; }
+ }
+
+ public static class JobError {
+ @JsonProperty("message")
+ String errorMesssage;
+ @JsonProperty("code")
+ String code;
+ public String getErrorMesssage () { return errorMesssage; }
+ public void setErrorMesssage (String errorMesssage) { this.errorMesssage = errorMesssage; }
+ public String getCode() {
+ return code;
+ }
+ public void setCode(String code) {
+ this.code = code;
+ }
+ @Override
+ public String toString() {
+ return "JobError [errorMesssage=" + errorMesssage + ", code=" + code + "]";
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Lun.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Lun.java
new file mode 100644
index 000000000000..364790958c8a
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Lun.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.annotation.JsonValue;
+
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * A LUN is the logical representation of storage in a storage area network (SAN).<br/> In ONTAP, a LUN is located within a volume. Optionally, it can be located within a qtree in a volume.<br/> A LUN can be created to a specified size using thin or thick provisioning. A LUN can then be renamed, resized, cloned, and moved to a different volume. LUNs support the assignment of a quality of service (QoS) policy for performance management or a QoS policy can be assigned to the volume containing the LUN. See the LUN object model to learn more about each of the properties supported by the LUN REST API.<br/> A LUN must be mapped to an initiator group to grant access to the initiator group's initiators (client hosts). Initiators can then access the LUN and perform I/O over a Fibre Channel (FC) fabric using the Fibre Channel Protocol or a TCP/IP network using iSCSI.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Lun {
+
+ @JsonProperty("auto_delete")
+ private Boolean autoDelete = null;
+
+ /**
+ * The class of LUN.<br/> Optional in POST.
+ */
+ public enum PropertyClassEnum {
+ REGULAR("regular");
+
+ private String value;
+
+ PropertyClassEnum(String value) {
+ this.value = value;
+ }
+
+ @JsonValue
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ @JsonCreator
+ public static PropertyClassEnum fromValue(String value) {
+ for (PropertyClassEnum b : PropertyClassEnum.values()) {
+ if (b.value.equals(value)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
+
+ @JsonProperty("class")
+ private PropertyClassEnum propertyClass = null;
+
+ @JsonProperty("enabled")
+ private Boolean enabled = null;
+
+ @JsonProperty("lun_maps")
+ private List lunMaps = null;
+
+ @JsonProperty("name")
+ private String name = null;
+
+ @JsonProperty("clone")
+ private Clone clone = null;
+
+ /**
+ * The operating system type of the LUN.<br/> Required in POST when creating a LUN that is not a clone of another. Disallowed in POST when creating a LUN clone.
+ */
+ public enum OsTypeEnum {
+ HYPER_V("hyper_v"),
+
+ LINUX("linux"),
+
+ VMWARE("vmware"),
+
+ WINDOWS("windows"),
+
+ XEN("xen");
+
+ private String value;
+
+ OsTypeEnum(String value) {
+ this.value = value;
+ }
+
+ @JsonValue
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public String toString() {
+ return String.valueOf(value);
+ }
+
+ @JsonCreator
+ public static OsTypeEnum fromValue(String value) {
+ for (OsTypeEnum b : OsTypeEnum.values()) {
+ if (b.value.equals(value)) {
+ return b;
+ }
+ }
+ return null;
+ }
+ }
+
+ @JsonProperty("os_type")
+ private OsTypeEnum osType = null;
+
+ @JsonProperty("serial_number")
+ private String serialNumber = null;
+
+ @JsonProperty("space")
+ private LunSpace space = null;
+
+ @JsonProperty("svm")
+ private Svm svm = null;
+
+ @JsonProperty("uuid")
+ private String uuid = null;
+
+ public Lun autoDelete(Boolean autoDelete) {
+ this.autoDelete = autoDelete;
+ return this;
+ }
+
+ public Boolean isAutoDelete() {
+ return autoDelete;
+ }
+
+ public void setAutoDelete(Boolean autoDelete) {
+ this.autoDelete = autoDelete;
+ }
+
+ public Lun propertyClass(PropertyClassEnum propertyClass) {
+ this.propertyClass = propertyClass;
+ return this;
+ }
+
+ public PropertyClassEnum getPropertyClass() {
+ return propertyClass;
+ }
+
+ public void setPropertyClass(PropertyClassEnum propertyClass) {
+ this.propertyClass = propertyClass;
+ }
+
+ public Lun enabled(Boolean enabled) {
+ this.enabled = enabled;
+ return this;
+ }
+
+ public Boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(Boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public List getLunMaps() {
+ return lunMaps;
+ }
+
+ public void setLunMaps(List lunMaps) {
+ this.lunMaps = lunMaps;
+ }
+
+ public Lun name(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public Lun osType(OsTypeEnum osType) {
+ this.osType = osType;
+ return this;
+ }
+
+ public OsTypeEnum getOsType() {
+ return osType;
+ }
+
+ public void setOsType(OsTypeEnum osType) {
+ this.osType = osType;
+ }
+
+ public String getSerialNumber() {
+ return serialNumber;
+ }
+
+ public Lun space(LunSpace space) {
+ this.space = space;
+ return this;
+ }
+
+ public LunSpace getSpace() {
+ return space;
+ }
+
+ public void setSpace(LunSpace space) {
+ this.space = space;
+ }
+
+ public Lun svm(Svm svm) {
+ this.svm = svm;
+ return this;
+ }
+
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+ public String getUuid() {
+ return uuid;
+ }
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+
+ public Clone getClone() {
+ return clone;
+ }
+
+ public void setClone(Clone clone) {
+ this.clone = clone;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ Lun lun = (Lun) o;
+ return Objects.equals(this.name, lun.name) && Objects.equals(this.uuid, lun.uuid);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, uuid);
+ }
+
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("class Lun {\n");
+ sb.append(" autoDelete: ").append(toIndentedString(autoDelete)).append("\n");
+ sb.append(" propertyClass: ").append(toIndentedString(propertyClass)).append("\n");
+ sb.append(" enabled: ").append(toIndentedString(enabled)).append("\n");
+ sb.append(" lunMaps: ").append(toIndentedString(lunMaps)).append("\n");
+ sb.append(" name: ").append(toIndentedString(name)).append("\n");
+ sb.append(" osType: ").append(toIndentedString(osType)).append("\n");
+ sb.append(" serialNumber: ").append(toIndentedString(serialNumber)).append("\n");
+ sb.append(" space: ").append(toIndentedString(space)).append("\n");
+ sb.append(" svm: ").append(toIndentedString(svm)).append("\n");
+ sb.append(" uuid: ").append(toIndentedString(uuid)).append("\n");
+ sb.append("}");
+ return sb.toString();
+ }
+
+ /**
+ * Convert the given object to string with each line indented by 4 spaces
+ * (except the first line).
+ */
+ private String toIndentedString(Object o) {
+ if (o == null) {
+ return "null";
+ }
+ return o.toString().replace("\n", "\n ");
+ }
+
+
+ public static class Clone {
+ @JsonProperty("source")
+ private Source source = null;
+ public Source getSource() {
+ return source;
+ }
+ public void setSource(Source source) {
+ this.source = source;
+ }
+ }
+
+ public static class Source {
+ @JsonProperty("name")
+ private String name = null;
+ @JsonProperty("uuid")
+ private String uuid = null;
+
+ public String getName() {
+ return name;
+ }
+ public void setName(String name) {
+ this.name = name;
+ }
+ public String getUuid() {
+ return uuid;
+ }
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java
new file mode 100644
index 000000000000..085e38f1e9c6
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunMap.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.google.gson.annotations.SerializedName;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class LunMap {
+ @JsonProperty("igroup")
+ private Igroup igroup = null;
+ @JsonProperty("logical_unit_number")
+ private Integer logicalUnitNumber = null;
+ @JsonProperty("lun")
+ private Lun lun = null;
+ @JsonProperty("svm")
+ @SerializedName("svm")
+ private Svm svm = null;
+
+ public LunMap igroup (Igroup igroup) {
+ this.igroup = igroup;
+ return this;
+ }
+
+ public Igroup getIgroup () {
+ return igroup;
+ }
+
+ public void setIgroup (Igroup igroup) {
+ this.igroup = igroup;
+ }
+
+ public LunMap logicalUnitNumber (Integer logicalUnitNumber) {
+ this.logicalUnitNumber = logicalUnitNumber;
+ return this;
+ }
+
+ public Integer getLogicalUnitNumber () {
+ return logicalUnitNumber;
+ }
+
+ public void setLogicalUnitNumber (Integer logicalUnitNumber) {
+ this.logicalUnitNumber = logicalUnitNumber;
+ }
+
+ public LunMap lun (Lun lun) {
+ this.lun = lun;
+ return this;
+ }
+
+ public Lun getLun () {
+ return lun;
+ }
+
+ public void setLun (Lun lun) {
+ this.lun = lun;
+ }
+
+ public LunMap svm (Svm svm) {
+ this.svm = svm;
+ return this;
+ }
+
+ public Svm getSvm () {
+ return svm;
+ }
+
+ public void setSvm (Svm svm) {
+ this.svm = svm;
+ }
+
+ @Override
+ public String toString () {
+ StringBuilder sb = new StringBuilder();
+ sb.append("class LunMap {\n");
+ sb.append(" igroup: ").append(toIndentedString(igroup)).append("\n");
+ sb.append(" logicalUnitNumber: ").append(toIndentedString(logicalUnitNumber)).append("\n");
+ sb.append(" lun: ").append(toIndentedString(lun)).append("\n");
+ sb.append(" svm: ").append(toIndentedString(svm)).append("\n");
+ sb.append("}");
+ return sb.toString();
+ }
+
+ private String toIndentedString (Object o) {
+ if (o == null) {
+ return "null";
+ }
+ return o.toString().replace("\n", "\n ");
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunSpace.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunSpace.java
new file mode 100644
index 000000000000..03e776cd378c
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/LunSpace.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * The storage space related properties of the LUN.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class LunSpace {
+
+ @JsonProperty("scsi_thin_provisioning_support_enabled")
+ private Boolean scsiThinProvisioningSupportEnabled = null;
+
+ @JsonProperty("size")
+ private Long size = null;
+
+ @JsonProperty("used")
+ private Long used = null;
+ @JsonProperty("physical_used")
+ private Long physicalUsed = null;
+
+ public LunSpace scsiThinProvisioningSupportEnabled(Boolean scsiThinProvisioningSupportEnabled) {
+ this.scsiThinProvisioningSupportEnabled = scsiThinProvisioningSupportEnabled;
+ return this;
+ }
+
+ public Boolean isScsiThinProvisioningSupportEnabled() {
+ return scsiThinProvisioningSupportEnabled;
+ }
+
+ public void setScsiThinProvisioningSupportEnabled(Boolean scsiThinProvisioningSupportEnabled) {
+ this.scsiThinProvisioningSupportEnabled = scsiThinProvisioningSupportEnabled;
+ }
+
+ public LunSpace size(Long size) {
+ this.size = size;
+ return this;
+ }
+
+ public Long getSize() {
+ return size;
+ }
+
+ public void setSize(Long size) {
+ this.size = size;
+ }
+
+ public Long getUsed() {
+ return used;
+ }
+
+ public Long getPhysicalUsed() {
+ return physicalUsed;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("class LunSpace {\n");
+ sb.append(" scsiThinProvisioningSupportEnabled: ").append(toIndentedString(scsiThinProvisioningSupportEnabled)).append("\n");
+ sb.append(" size: ").append(toIndentedString(size)).append("\n");
+ sb.append(" used: ").append(toIndentedString(used)).append("\n");
+ sb.append(" physicalUsed: ").append(toIndentedString(physicalUsed)).append("\n");
+ sb.append("}");
+ return sb.toString();
+ }
+
+ private String toIndentedString(Object o) {
+ if (o == null) {
+ return "null";
+ }
+ return o.toString().replace("\n", "\n ");
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Nas.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Nas.java
new file mode 100644
index 000000000000..42d348d80c80
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Nas.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Nas {
+ @JsonProperty("path")
+ private String path;
+
+ @JsonProperty("export_policy")
+ private ExportPolicy exportPolicy;
+
+ public String getPath() {
+ return path;
+ }
+
+ public void setPath(String path) {
+ this.path = path;
+ }
+
+ public ExportPolicy getExportPolicy() {
+ return exportPolicy;
+ }
+
+ public void setExportPolicy(ExportPolicy exportPolicy) {
+ this.exportPolicy = exportPolicy;
+ }
+
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java
new file mode 100644
index 000000000000..a42cd02912b3
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/OntapStorage.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+
+public class OntapStorage {
+ private final String username;
+ private final String password;
+ private final String storageIP;
+ private final String svmName;
+ private final Long size;
+ private final ProtocolType protocolType;
+
+ public OntapStorage(String username, String password, String storageIP, String svmName, Long size, ProtocolType protocolType) {
+ this.username = username;
+ this.password = password;
+ this.storageIP = storageIP;
+ this.svmName = svmName;
+ this.size = size;
+ this.protocolType = protocolType;
+ }
+
+ public String getUsername() {
+ return username;
+ }
+
+ public String getPassword() {
+ return password;
+ }
+
+ public String getStorageIP() { return storageIP; }
+
+ public String getSvmName() { return svmName; }
+
+ public Long getSize() {
+ return size;
+ }
+
+ public ProtocolType getProtocol() {
+ return protocolType;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Policy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Policy.java
new file mode 100644
index 000000000000..24fdee6a1424
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Policy.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Policy {
+ private int minThroughputIops;
+ private int minThroughputMbps;
+ private int maxThroughputIops;
+ private int maxThroughputMbps;
+ private String uuid;
+ private String name;
+ public int getMinThroughputIops() { return minThroughputIops; }
+ public void setMinThroughputIops(int minThroughputIops) { this.minThroughputIops = minThroughputIops; }
+ public int getMinThroughputMbps() { return minThroughputMbps; }
+ public void setMinThroughputMbps(int minThroughputMbps) { this.minThroughputMbps = minThroughputMbps; }
+ public int getMaxThroughputIops() { return maxThroughputIops; }
+ public void setMaxThroughputIops(int maxThroughputIops) { this.maxThroughputIops = maxThroughputIops; }
+ public int getMaxThroughputMbps() { return maxThroughputMbps; }
+ public void setMaxThroughputMbps(int maxThroughputMbps) { this.maxThroughputMbps = maxThroughputMbps; }
+ public String getUuid() { return uuid; }
+ public void setUuid(String uuid) { this.uuid = uuid; }
+ public String getName() { return name; }
+ public void setName(String name) { this.name = name; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == null || getClass() != o.getClass()) return false;
+ Policy policy = (Policy) o;
+ return Objects.equals(getUuid(), policy.getUuid());
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(getUuid());
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java
new file mode 100644
index 000000000000..3f7f8180de8e
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Qos.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Qos {
+ @JsonProperty("policy")
+ private Policy policy;
+
+ public Policy getPolicy() {
+ return policy;
+ }
+
+ public void setPolicy(Policy policy) {
+ this.policy = policy;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
new file mode 100644
index 000000000000..b1462c593863
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Svm.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.List;
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Svm {
+ @JsonProperty("uuid")
+ private String uuid = null;
+
+ @JsonProperty("name")
+ private String name = null;
+
+ @JsonProperty("iscsi.enabled")
+ private Boolean iscsiEnabled = null;
+
+ @JsonProperty("fcp.enabled")
+ private Boolean fcpEnabled = null;
+
+ @JsonProperty("nfs.enabled")
+ private Boolean nfsEnabled = null;
+
+ @JsonProperty("aggregates")
+ private List aggregates = null;
+
+ @JsonProperty("aggregates_delegated")
+ private Boolean aggregatesDelegated = null;
+
+ @JsonProperty("state")
+ private String state = null;
+
+ @JsonIgnore
+ private Links links = null;
+
+ public String getUuid() {
+ return uuid;
+ }
+
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public Boolean getIscsiEnabled() {
+ return iscsiEnabled;
+ }
+
+ public void setIscsiEnabled(Boolean iscsiEnabled) {
+ this.iscsiEnabled = iscsiEnabled;
+ }
+
+ public Boolean getFcpEnabled() {
+ return fcpEnabled;
+ }
+
+ public void setFcpEnabled(Boolean fcpEnabled) {
+ this.fcpEnabled = fcpEnabled;
+ }
+
+ public Boolean getNfsEnabled() {
+ return nfsEnabled;
+ }
+
+ public void setNfsEnabled(Boolean nfsEnabled) {
+ this.nfsEnabled = nfsEnabled;
+ }
+
+ public List getAggregates() {
+ return aggregates;
+ }
+
+ public void setAggregates(List aggregates) {
+ this.aggregates = aggregates;
+ }
+
+ public Boolean getAggregatesDelegated() {
+ return aggregatesDelegated;
+ }
+
+ public void setAggregatesDelegated(Boolean aggregatesDelegated) {
+ this.aggregatesDelegated = aggregatesDelegated;
+ }
+
+ public String getState() {
+ return state;
+ }
+
+ public void setState(String state) {
+ this.state = state;
+ }
+
+ public Links getLinks() {
+ return links;
+ }
+
+ public void setLinks(Links links) {
+ this.links = links;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == null || getClass() != o.getClass()) return false;
+ Svm svm = (Svm) o;
+ return Objects.equals(getUuid(), svm.getUuid());
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(getUuid());
+ }
+
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ public static class Links { }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Version.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Version.java
new file mode 100644
index 000000000000..80b4d0229abe
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Version.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.Objects;
+
+/**
+ * This returns the cluster version information. When the cluster has more than one node, the cluster version is equivalent to the lowest of generation, major, and minor versions on all nodes.
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Version {
+ @JsonProperty("full")
+ private String full = null;
+
+ @JsonProperty("generation")
+ private Integer generation = null;
+
+ @JsonProperty("major")
+ private Integer major = null;
+
+ @JsonProperty("minor")
+ private Integer minor = null;
+
+ public String getFull() {
+ return full;
+ }
+
+ public Integer getGeneration() {
+ return generation;
+ }
+
+ public Integer getMajor() {
+ return major;
+ }
+
+ public Integer getMinor() {
+ return minor;
+ }
+
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ Version clusterVersion = (Version) o;
+ return Objects.equals(this.full, clusterVersion.full) &&
+ Objects.equals(this.generation, clusterVersion.generation) &&
+ Objects.equals(this.major, clusterVersion.major) &&
+ Objects.equals(this.minor, clusterVersion.minor);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(full, generation, major, minor);
+ }
+
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("class ClusterVersion {\n");
+ sb.append(" full: ").append(toIndentedString(full)).append("\n");
+ sb.append(" generation: ").append(toIndentedString(generation)).append("\n");
+ sb.append(" major: ").append(toIndentedString(major)).append("\n");
+ sb.append(" minor: ").append(toIndentedString(minor)).append("\n");
+ sb.append("}");
+ return sb.toString();
+ }
+
+ /**
+ * Convert the given object to string with each line indented by 4 spaces
+ * (except the first line).
+ */
+ private String toIndentedString(Object o) {
+ if (o == null) {
+ return "null";
+ }
+ return o.toString().replace("\n", "\n ");
+ }
+
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Volume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Volume.java
new file mode 100644
index 000000000000..22ac7563a6de
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/Volume.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.List;
+import java.util.Objects;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class Volume {
+ @JsonProperty("uuid")
+ private String uuid;
+
+ @JsonProperty("name")
+ private String name;
+
+ @JsonProperty("state")
+ private String state;
+
+ @JsonProperty("nas")
+ private Nas nas;
+
+ @JsonProperty("svm")
+ private Svm svm;
+
+ @JsonProperty("qos")
+ private Qos qos;
+
+ @JsonProperty("space")
+ private VolumeSpace space;
+
+ @JsonProperty("anti_ransomware")
+ private AntiRansomware antiRansomware;
+
+ @JsonProperty("aggregates")
+ private List aggregates = null;
+
+ @JsonProperty("size")
+ private Long size = null;
+
+ // Getters and setters
+ public String getUuid() {
+ return uuid;
+ }
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+ public String getName() {
+ return name;
+ }
+ public void setName(String name) {
+ this.name = name;
+ }
+ public String getState() {
+ return state;
+ }
+
+ public void setState(String state) {
+ this.state = state;
+ }
+
+ public Nas getNas() {
+ return nas;
+ }
+
+ public void setNas(Nas nas) {
+ this.nas = nas;
+ }
+
+ public Svm getSvm() {
+ return svm;
+ }
+
+ public void setSvm(Svm svm) {
+ this.svm = svm;
+ }
+
+ public Qos getQos() {
+ return qos;
+ }
+
+ public void setQos(Qos qos) {
+ this.qos = qos;
+ }
+
+ public VolumeSpace getSpace() {
+ return space;
+ }
+
+ public void setSpace(VolumeSpace space) {
+ this.space = space;
+ }
+
+ public AntiRansomware getAntiRansomware() {
+ return antiRansomware;
+ }
+
+ public void setAntiRansomware(AntiRansomware antiRansomware) {
+ this.antiRansomware = antiRansomware;
+ }
+
+ public List getAggregates () { return aggregates; }
+
+ public void setAggregates (List aggregates) { this.aggregates = aggregates; }
+
+ public Long getSize () { return size; }
+
+ public void setSize (Long size) { this.size = size; }
+
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == null || getClass() != o.getClass()) return false;
+ Volume volume = (Volume) o;
+ return Objects.equals(uuid, volume.uuid);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hashCode(uuid);
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeQosPolicy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeQosPolicy.java
new file mode 100644
index 000000000000..7a9a4307ab1a
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeQosPolicy.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class VolumeQosPolicy {
+ @JsonProperty("max_throughput_iops")
+ private Integer maxThroughputIops = null;
+
+ @JsonProperty("max_throughput_mbps")
+ private Integer maxThroughputMbps = null;
+
+ @JsonProperty("min_throughput_iops")
+ private Integer minThroughputIops = null;
+
+ @JsonProperty("name")
+ private String name = null;
+
+ @JsonProperty("uuid")
+ private String uuid = null;
+
+ public Integer getMaxThroughputIops() {
+ return maxThroughputIops;
+ }
+
+ public void setMaxThroughputIops(Integer maxThroughputIops) {
+ this.maxThroughputIops = maxThroughputIops;
+ }
+
+ public Integer getMaxThroughputMbps() {
+ return maxThroughputMbps;
+ }
+
+ public void setMaxThroughputMbps(Integer maxThroughputMbps) {
+ this.maxThroughputMbps = maxThroughputMbps;
+ }
+
+ public Integer getMinThroughputIops() {
+ return minThroughputIops;
+ }
+
+ public void setMinThroughputIops(Integer minThroughputIops) {
+ this.minThroughputIops = minThroughputIops;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getUuid() {
+ return uuid;
+ }
+
+ public void setUuid(String uuid) {
+ this.uuid = uuid;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeSpace.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeSpace.java
new file mode 100644
index 000000000000..84ae7d93199e
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeSpace.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class VolumeSpace {
+ @JsonProperty("size")
+ private long size;
+
+ @JsonProperty("available")
+ private long available;
+
+ @JsonProperty("used")
+ private long used;
+
+ public long getSize() {
+ return size;
+ }
+
+ public void setSize(long size) {
+ this.size = size;
+ }
+
+ public long getAvailable() {
+ return available;
+ }
+
+ public void setAvailable(long available) {
+ this.available = available;
+ }
+
+ public long getUsed() {
+ return used;
+ }
+
+ public void setUsed(long used) {
+ this.used = used;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeSpaceLogicalSpace.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeSpaceLogicalSpace.java
new file mode 100644
index 000000000000..fa14252e4db3
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/VolumeSpaceLogicalSpace.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class VolumeSpaceLogicalSpace {
+
+ @JsonProperty("available")
+ private Long available = null;
+
+ @JsonProperty("used")
+ private Double used = null;
+
+ public Long getAvailable() {
+ return available;
+ }
+
+ public void setAvailable(Long available) {
+ this.available = available;
+ }
+
+ public Double getUsed() {
+ return used;
+ }
+
+ public void setUsed(Double used) {
+ this.used = used;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/response/JobResponse.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/response/JobResponse.java
new file mode 100644
index 000000000000..a794c191c493
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/response/JobResponse.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.feign.model.response;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.cloudstack.storage.feign.model.Job;
+
+public class JobResponse {
+ @JsonProperty("job")
+ private Job job;
+ public Job getJob() { return job; }
+ public void setJob(Job job) { this.job = job; }
+
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/response/OntapResponse.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/response/OntapResponse.java
new file mode 100644
index 000000000000..8377f8906b99
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/feign/model/response/OntapResponse.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.feign.model.response;
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.List;
+
+/**
+ * OntapResponse
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonInclude(JsonInclude.Include.NON_NULL)
+public class OntapResponse {
+ @JsonProperty("num_records")
+ private Integer numRecords;
+
+ @JsonProperty("records")
+ private List records;
+
+ public OntapResponse () {
+ // Default constructor
+ }
+
+ public OntapResponse (List records) {
+ this.records = records;
+ this.numRecords = (records != null) ? records.size() : 0;
+ }
+
+ public Integer getNumRecords() {
+ return numRecords;
+ }
+
+ public void setNumRecords(Integer numRecords) {
+ this.numRecords = numRecords;
+ }
+
+ public List getRecords() {
+ return records;
+ }
+
+ public void setRecords(List records) {
+ this.records = records;
+ this.numRecords = (records != null) ? records.size() : 0;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
new file mode 100755
index 000000000000..75f8301b8b2b
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycle.java
@@ -0,0 +1,533 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.lifecycle;
+
+
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolAutomation;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.google.common.base.Preconditions;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.lifecycle.BasePrimaryDataStoreLifeCycleImpl;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+
+public class OntapPrimaryDatastoreLifecycle extends BasePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle {
+ @Inject private ClusterDao _clusterDao;
+ @Inject private StorageManager _storageMgr;
+ @Inject private ResourceManager _resourceMgr;
+ @Inject private PrimaryDataStoreHelper _dataStoreHelper;
+ @Inject private PrimaryDataStoreDetailsDao _datastoreDetailsDao;
+ @Inject private StoragePoolAutomation _storagePoolAutomation;
+ @Inject private PrimaryDataStoreDao storagePoolDao;
+ @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+ private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreLifecycle.class);
+
+ // ONTAP minimum volume size is 1.56 GB (1677721600 bytes)
+ private static final long ONTAP_MIN_VOLUME_SIZE = 1677721600L;
+
+ /**
+ * Creates primary storage on NetApp storage
+ * @param dsInfos datastore information map
+ * @return DataStore instance
+ */
+ @Override
+ public DataStore initialize(Map dsInfos) {
+ if (dsInfos == null) {
+ throw new CloudRuntimeException("Datastore info map is null, cannot create primary storage");
+ }
+ Long zoneId = (Long) dsInfos.get("zoneId");
+ Long podId = (Long) dsInfos.get("podId");
+ Long clusterId = (Long) dsInfos.get("clusterId");
+ String storagePoolName = (String) dsInfos.get("name");
+ String providerName = (String) dsInfos.get("providerName");
+ Long capacityBytes = (Long) dsInfos.get("capacityBytes");
+ boolean managed = (boolean) dsInfos.get("managed");
+ String tags = (String) dsInfos.get("tags");
+ Boolean isTagARule = (Boolean) dsInfos.get("isTagARule");
+
+ s_logger.info("Creating ONTAP primary storage pool with name: " + storagePoolName + ", provider: " + providerName +
+ ", zoneId: " + zoneId + ", podId: " + podId + ", clusterId: " + clusterId);
+ s_logger.debug("Received capacityBytes from UI: " + capacityBytes);
+
+ // Additional details requested for ONTAP primary storage pool creation
+ @SuppressWarnings("unchecked")
+ Map details = (Map) dsInfos.get("details");
+
+ // Validate and set capacity
+ if (capacityBytes == null || capacityBytes <= 0) {
+ s_logger.warn("capacityBytes not provided or invalid (" + capacityBytes + "), using ONTAP minimum size: " + ONTAP_MIN_VOLUME_SIZE);
+ capacityBytes = ONTAP_MIN_VOLUME_SIZE;
+ } else if (capacityBytes < ONTAP_MIN_VOLUME_SIZE) {
+ s_logger.warn("capacityBytes (" + capacityBytes + ") is below ONTAP minimum (" + ONTAP_MIN_VOLUME_SIZE + "), adjusting to minimum");
+ capacityBytes = ONTAP_MIN_VOLUME_SIZE;
+ }
+
+ // Validate scope
+ if (podId == null ^ clusterId == null) {
+ throw new CloudRuntimeException("Cluster Id or Pod Id is null, cannot create primary storage");
+ }
+
+ if (podId == null && clusterId == null) {
+ if (zoneId != null) {
+ s_logger.info("Both Pod Id and Cluster Id are null, Primary storage pool will be associated with a Zone");
+ } else {
+ throw new CloudRuntimeException("Pod Id, Cluster Id and Zone Id are all null, cannot create primary storage");
+ }
+ }
+
+ if (storagePoolName == null || storagePoolName.isEmpty()) {
+ throw new CloudRuntimeException("Storage pool name is null or empty, cannot create primary storage");
+ }
+
+ if (providerName == null || providerName.isEmpty()) {
+ throw new CloudRuntimeException("Provider name is null or empty, cannot create primary storage");
+ }
+
+ PrimaryDataStoreParameters parameters = new PrimaryDataStoreParameters();
+ if (clusterId != null) {
+ ClusterVO clusterVO = _clusterDao.findById(clusterId);
+ Preconditions.checkNotNull(clusterVO, "Unable to locate the specified cluster");
+ if (clusterVO.getHypervisorType() != Hypervisor.HypervisorType.KVM) {
+ throw new CloudRuntimeException("ONTAP primary storage is supported only for KVM hypervisor");
+ }
+ parameters.setHypervisorType(clusterVO.getHypervisorType());
+ }
+
+ s_logger.debug("ONTAP primary storage will be created as " + (managed ? "managed" : "unmanaged"));
+ if (!managed) {
+ throw new CloudRuntimeException("ONTAP primary storage must be managed");
+ }
+
+ //Required ONTAP detail keys
+ Set requiredKeys = Set.of(
+ Constants.USERNAME,
+ Constants.PASSWORD,
+ Constants.SVM_NAME,
+ Constants.PROTOCOL,
+ Constants.STORAGE_IP
+ );
+
+ // Validate existing entries (reject unexpected keys, empty values)
+ for (Map.Entry e : details.entrySet()) {
+ String key = e.getKey();
+ String val = e.getValue();
+ if (!requiredKeys.contains(key)) {
+ throw new CloudRuntimeException("Unexpected ONTAP detail key in URL: " + key);
+ }
+ if (val == null || val.isEmpty()) {
+ throw new CloudRuntimeException("ONTAP primary storage creation failed, empty detail: " + key);
+ }
+ }
+
+ // Detect missing required keys
+ Set providedKeys = new java.util.HashSet<>(details.keySet());
+ if (!providedKeys.containsAll(requiredKeys)) {
+ Set missing = new java.util.HashSet<>(requiredKeys);
+ missing.removeAll(providedKeys);
+ throw new CloudRuntimeException("ONTAP primary storage creation failed, missing detail(s): " + missing);
+ }
+
+ details.put(Constants.SIZE, capacityBytes.toString());
+
+ ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
+ // Connect to ONTAP and create volume
+ long volumeSize = Long.parseLong(details.get(Constants.SIZE));
+ OntapStorage ontapStorage = new OntapStorage(
+ details.get(Constants.USERNAME),
+ details.get(Constants.PASSWORD),
+ details.get(Constants.STORAGE_IP),
+ details.get(Constants.SVM_NAME),
+ volumeSize,
+ protocol);
+
+ StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
+ boolean isValid = storageStrategy.connect();
+ if (isValid) {
+ // Get the DataLIF for data access
+ String dataLIF = storageStrategy.getNetworkInterface();
+ if (dataLIF == null || dataLIF.isEmpty()) {
+ throw new CloudRuntimeException("Failed to retrieve Data LIF from ONTAP, cannot create primary storage");
+ }
+ s_logger.info("Using Data LIF for storage access: " + dataLIF);
+ details.put(Constants.DATA_LIF, dataLIF);
+ s_logger.info("Creating ONTAP volume '" + storagePoolName + "' with size: " + volumeSize + " bytes (" +
+ (volumeSize / (1024 * 1024 * 1024)) + " GB)");
+ try {
+ Volume volume = storageStrategy.createStorageVolume(storagePoolName, volumeSize);
+ if (volume == null) {
+ s_logger.error("createStorageVolume returned null for volume: " + storagePoolName);
+ throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName);
+ }
+ s_logger.info("Volume object retrieved successfully. UUID: " + volume.getUuid() + ", Name: " + volume.getName());
+ details.putIfAbsent(Constants.VOLUME_UUID, volume.getUuid());
+ details.putIfAbsent(Constants.VOLUME_NAME, volume.getName());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while creating ONTAP volume: " + storagePoolName, e);
+ throw new CloudRuntimeException("Failed to create ONTAP volume: " + storagePoolName + ". Error: " + e.getMessage(), e);
+ }
+ } else {
+ throw new CloudRuntimeException("ONTAP details validation failed, cannot create primary storage");
+ }
+
+ // Determine storage pool type, path and port based on protocol
+ String path;
+ int port;
+ switch (protocol) {
+ case NFS3:
+ parameters.setType(Storage.StoragePoolType.NetworkFilesystem);
+ path = Constants.SLASH + storagePoolName;
+ port = Constants.NFS3_PORT;
+ // Force NFSv3 for ONTAP managed storage to avoid NFSv4 ID mapping issues
+ details.put("nfsmountopts", "vers=3");
+ s_logger.info("Setting NFS path for storage pool: " + path + ", port: " + port + " with mount option: vers=3");
+ break;
+ case ISCSI:
+ parameters.setType(Storage.StoragePoolType.Iscsi);
+ path = storageStrategy.getStoragePath();
+ port = Constants.ISCSI_PORT;
+ s_logger.info("Setting iSCSI path for storage pool: " + path + ", port: " + port);
+ break;
+ default:
+ throw new CloudRuntimeException("Unsupported protocol: " + protocol + ", cannot create primary storage");
+ }
+
+ // Set parameters for primary data store
+ parameters.setHost(details.get(Constants.DATA_LIF));
+ parameters.setPort(port);
+ parameters.setPath(path);
+ parameters.setTags(tags);
+ parameters.setIsTagARule(isTagARule);
+ parameters.setDetails(details);
+ parameters.setUuid(UUID.randomUUID().toString());
+ parameters.setZoneId(zoneId);
+ parameters.setPodId(podId);
+ parameters.setClusterId(clusterId);
+ parameters.setName(storagePoolName);
+ parameters.setProviderName(providerName);
+ parameters.setManaged(managed);
+ parameters.setCapacityBytes(capacityBytes);
+ parameters.setUsedBytes(0);
+
+ return _dataStoreHelper.createPrimaryDataStore(parameters);
+ }
+
+ @Override
+ public boolean attachCluster(DataStore dataStore, ClusterScope scope) {
+ logger.debug("In attachCluster for ONTAP primary storage");
+ if (dataStore == null) {
+ throw new InvalidParameterValueException("attachCluster: dataStore should not be null");
+ }
+ if (scope == null) {
+ throw new InvalidParameterValueException("attachCluster: scope should not be null");
+ }
+ List hostsIdentifier = new ArrayList<>();
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if (storagePool == null) {
+ s_logger.error("attachCluster : Storage Pool not found for id: " + dataStore.getId());
+ throw new CloudRuntimeException("attachCluster : Storage Pool not found for id: " + dataStore.getId());
+ }
+ PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
+ List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(primaryStore);
+ // TODO- need to check if no host to connect then throw exception or just continue?
+ logger.debug("attachCluster: Eligible Up and Enabled hosts: {} in cluster {}", hostsToConnect, primaryStore.getClusterId());
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId());
+ StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
+
+ ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
+ //TODO- Check if we have to handle heterogeneous host within the cluster
+ if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) {
+ String errMsg = "attachCluster: Not all hosts in the cluster support the protocol: " + protocol.name();
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+
+ logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
+ //TODO - check if no host to connect then also need to create access group without initiators
+ if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
+ try {
+ AccessGroup accessGroupRequest = new AccessGroup();
+ accessGroupRequest.setHostsToConnect(hostsToConnect);
+ accessGroupRequest.setScope(scope);
+ primaryStore.setDetails(details);// setting details as it does not come from cloudstack
+ accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+ strategy.createAccessGroup(accessGroupRequest);
+ } catch (Exception e) {
+ s_logger.error("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage());
+ throw new CloudRuntimeException("attachCluster: Failed to create access group on storage system for cluster: " + primaryStore.getClusterId() + ". Exception: " + e.getMessage());
+ }
+ }
+ logger.debug("attachCluster: Attaching the pool to each of the host in the cluster: {}", primaryStore.getClusterId());
+ for (HostVO host : hostsToConnect) {
+ try {
+ _storageMgr.connectHostToSharedPool(host, dataStore.getId());
+ } catch (Exception e) {
+ logger.warn("attachCluster: Unable to establish a connection between " + host + " and " + dataStore, e);
+ return false;
+ }
+ }
+ _dataStoreHelper.attachCluster(dataStore);
+ return true;
+ }
+
+ @Override
+ public boolean attachHost(DataStore store, HostScope scope, StoragePoolInfo existingInfo) {
+ return false;
+ }
+
+ @Override
+ public boolean attachZone(DataStore dataStore, ZoneScope scope, Hypervisor.HypervisorType hypervisorType) {
+ logger.debug("In attachZone for ONTAP primary storage");
+ if (dataStore == null) {
+ throw new InvalidParameterValueException("attachZone: dataStore should not be null");
+ }
+ if (scope == null) {
+ throw new InvalidParameterValueException("attachZone: scope should not be null");
+ }
+ List hostsIdentifier = new ArrayList<>();
+ StoragePoolVO storagePool = storagePoolDao.findById(dataStore.getId());
+ if (storagePool == null) {
+ s_logger.error("attachZone : Storage Pool not found for id: " + dataStore.getId());
+ throw new CloudRuntimeException("attachZone : Storage Pool not found for id: " + dataStore.getId());
+ }
+
+ PrimaryDataStoreInfo primaryStore = (PrimaryDataStoreInfo)dataStore;
+ List hostsToConnect = _resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(dataStore, scope.getScopeId(), Hypervisor.HypervisorType.KVM);
+ logger.debug(String.format("In createPool. Attaching the pool to each of the hosts in %s.", hostsToConnect));
+
+ Map details = storagePoolDetailsDao.listDetailsKeyPairs(primaryStore.getId());
+ StorageStrategy strategy = Utility.getStrategyByStoragePoolDetails(details);
+
+ // TODO- need to check if no host to connect then throw exception or just continue
+ logger.debug("attachZone: Eligible Up and Enabled hosts: {}", hostsToConnect);
+ ProtocolType protocol = ProtocolType.valueOf(details.get(Constants.PROTOCOL));
+ //TODO- Check if we have to handle heterogeneous host within the zone
+ if (!validateProtocolSupportAndFetchHostsIdentifier(hostsToConnect, protocol, hostsIdentifier)) {
+ String errMsg = "attachZone: Not all hosts in the zone support the protocol: " + protocol.name();
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ if (hostsIdentifier != null && !hostsIdentifier.isEmpty()) {
+ try {
+ AccessGroup accessGroupRequest = new AccessGroup();
+ accessGroupRequest.setHostsToConnect(hostsToConnect);
+ accessGroupRequest.setScope(scope);
+ primaryStore.setDetails(details); // setting details as it does not come from cloudstack
+ accessGroupRequest.setPrimaryDataStoreInfo(primaryStore);
+ strategy.createAccessGroup(accessGroupRequest);
+ } catch (Exception e) {
+ s_logger.error("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage());
+ throw new CloudRuntimeException("attachZone: Failed to create access group on storage system for zone with Exception: " + e.getMessage());
+ }
+ }
+ for (HostVO host : hostsToConnect) {
+ try {
+ _storageMgr.connectHostToSharedPool(host, dataStore.getId());
+ } catch (Exception e) {
+ logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e);
+ return false;
+ }
+ }
+ _dataStoreHelper.attachZone(dataStore);
+ return true;
+ }
+
+ private boolean validateProtocolSupportAndFetchHostsIdentifier(List hosts, ProtocolType protocolType, List hostIdentifiers) {
+ switch (protocolType) {
+ case ISCSI:
+ String protocolPrefix = Constants.IQN;
+ for (HostVO host : hosts) {
+ if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty()
+ || !host.getStorageUrl().startsWith(protocolPrefix)) {
+ return false;
+ }
+ hostIdentifiers.add(host.getStorageUrl());
+ }
+ break;
+ case NFS3:
+ String ip = "";
+ for (HostVO host : hosts) {
+ if (host != null) {
+ ip = host.getStorageIpAddress() != null ? host.getStorageIpAddress().trim() : "";
+ if (ip.isEmpty() && host.getPrivateIpAddress() != null || host.getPrivateIpAddress().trim().isEmpty()) {
+ return false;
+ } else {
+ ip = ip.isEmpty() ? host.getPrivateIpAddress().trim() : ip;
+ }
+ }
+ hostIdentifiers.add(ip);
+ }
+ break;
+ default:
+ throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name());
+ }
+ logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name());
+ return true;
+ }
+
+ @Override
+ public boolean maintain(DataStore store) {
+ logger.info("Placing storage pool {} in maintenance mode", store);
+ if (_storagePoolAutomation.maintain(store)) {
+ return _dataStoreHelper.maintain(store);
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public boolean cancelMaintain(DataStore store) {
+ logger.info("Cancelling storage pool maintenance for {}", store);
+ if (_dataStoreHelper.cancelMaintain(store)) {
+ return _storagePoolAutomation.cancelMaintain(store);
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public boolean deleteDataStore(DataStore store) {
+ s_logger.info("deleteDataStore: Starting deletion process for storage pool id: {}", store.getId());
+
+ long storagePoolId = store.getId();
+ // Get the StoragePool details
+ StoragePool storagePool = _storageMgr.getStoragePool(storagePoolId);
+ if (storagePool == null) {
+ s_logger.warn("deleteDataStore: Storage pool not found for id: {}, skipping deletion", storagePoolId);
+ return true; // Return true since the entity doesn't exist
+ }
+
+ try {
+ // Fetch storage pool details
+ Map details = _datastoreDetailsDao.listDetailsKeyPairs(storagePoolId);
+ if (details == null || details.isEmpty()) {
+ s_logger.warn("deleteDataStore: No details found for storage pool id: {}, proceeding with CS entity deletion only", storagePoolId);
+ return _dataStoreHelper.deletePrimaryDataStore(store);
+ }
+
+ s_logger.info("deleteDataStore: Deleting access groups for storage pool '{}'", storagePool.getName());
+
+ // Get the storage strategy to interact with ONTAP
+ StorageStrategy storageStrategy = Utility.getStrategyByStoragePoolDetails(details);
+
+ // Cast DataStore to PrimaryDataStoreInfo to get full details
+ PrimaryDataStoreInfo primaryDataStoreInfo = (PrimaryDataStoreInfo) store;
+ primaryDataStoreInfo.setDetails(details);
+
+ // Call deleteStorageVolume to delete the underlying ONTAP volume
+ s_logger.info("deleteDataStore: Deleting ONTAP volume for storage pool '{}'", storagePool.getName());
+ Volume volume = new Volume();
+ volume.setUuid(details.get(Constants.VOLUME_UUID));
+ volume.setName(details.get(Constants.VOLUME_NAME));
+ try {
+ if (volume.getUuid() == null || volume.getUuid().isEmpty() || volume.getName() == null || volume.getName().isEmpty()) {
+ s_logger.error("deleteDataStore: Volume UUID/Name not found in details for storage pool id: {}, cannot delete volume", storagePoolId);
+ throw new CloudRuntimeException("Volume UUID/Name not found in details, cannot delete ONTAP volume");
+ }
+ storageStrategy.deleteStorageVolume(volume);
+ s_logger.info("deleteDataStore: Successfully deleted ONTAP volume '{}' (UUID: {}) for storage pool '{}'",
+ volume.getName(), volume.getUuid(), storagePool.getName());
+ } catch (Exception e) {
+ s_logger.error("deleteDataStore: Exception while retrieving volume UUID for storage pool id: {}. Error: {}",
+ storagePoolId, e.getMessage(), e);
+ }
+ AccessGroup accessGroup = new AccessGroup();
+ accessGroup.setPrimaryDataStoreInfo(primaryDataStoreInfo);
+ // Delete access groups associated with this storage pool
+ storageStrategy.deleteAccessGroup(accessGroup);
+ s_logger.info("deleteDataStore: Successfully deleted access groups for storage pool '{}'", storagePool.getName());
+
+ } catch (Exception e) {
+ s_logger.error("deleteDataStore: Failed to delete access groups for storage pool id: {}. Error: {}",
+ storagePoolId, e.getMessage(), e);
+ // Continue with CloudStack entity deletion even if ONTAP cleanup fails
+ s_logger.warn("deleteDataStore: Proceeding with CloudStack entity deletion despite ONTAP cleanup failure");
+ }
+
+ // Delete the CloudStack primary data store entity
+ return _dataStoreHelper.deletePrimaryDataStore(store);
+ }
+
+ @Override
+ public boolean migrateToObjectStore(DataStore store) {
+ return true;
+ }
+
+ @Override
+ public void updateStoragePool(StoragePool storagePool, Map details) {
+
+ }
+
+ @Override
+ public void enableStoragePool(DataStore store) {
+ _dataStoreHelper.enable(store);
+ }
+
+ @Override
+ public void disableStoragePool(DataStore store) {
+ _dataStoreHelper.disable(store);
+ }
+
+ @Override
+ public void changeStoragePoolScopeToZone(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType) {
+
+ }
+
+ @Override
+ public void changeStoragePoolScopeToCluster(DataStore store, ClusterScope clusterScope, Hypervisor.HypervisorType hypervisorType) {
+
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
new file mode 100644
index 000000000000..fd527d285285
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/listener/OntapHostListener.java
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.listener;
+
+import javax.inject.Inject;
+
+import com.cloud.agent.api.ModifyStoragePoolCommand;
+import com.cloud.agent.api.ModifyStoragePoolAnswer;
+import com.cloud.agent.api.StoragePoolInfo;
+import com.cloud.alert.AlertManager;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import org.apache.logging.log4j.Logger;
+import org.apache.logging.log4j.LogManager;
+import com.cloud.agent.AgentManager;
+import com.cloud.agent.api.Answer;
+import com.cloud.agent.api.DeleteStoragePoolCommand;
+import com.cloud.host.Host;
+import com.cloud.storage.StoragePool;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import com.cloud.host.dao.HostDao;
+
+import java.util.Map;
+
+public class OntapHostListener implements HypervisorHostListener {
+ protected Logger logger = LogManager.getLogger(getClass());
+
+ @Inject
+ private AgentManager _agentMgr;
+ @Inject
+ private AlertManager _alertMgr;
+ @Inject
+ private PrimaryDataStoreDao _storagePoolDao;
+ @Inject
+ private HostDao _hostDao;
+ @Inject
+ private StoragePoolHostDao storagePoolHostDao;
+ @Inject
+ private StoragePoolDetailsDao _storagePoolDetailsDao;
+
+
+ @Override
+ public boolean hostConnect(long hostId, long poolId) {
+ logger.info("Connect to host " + hostId + " from pool " + poolId);
+ Host host = _hostDao.findById(hostId);
+ if (host == null) {
+ logger.error("host was not found with id : {}", hostId);
+ return false;
+ }
+
+ // TODO add host type check also since we support only KVM for now, host.getHypervisorType().equals(HypervisorType.KVM)
+ StoragePool pool = _storagePoolDao.findById(poolId);
+ if (pool == null) {
+ logger.error("Failed to connect host - storage pool not found with id: {}", poolId);
+ return false;
+ }
+ logger.info("Connecting host {} to ONTAP storage pool {}", host.getName(), pool.getName());
+ try {
+ // Load storage pool details from database to pass mount options and other config to agent
+ Map detailsMap = _storagePoolDetailsDao.listDetailsKeyPairs(poolId);
+ // Create the ModifyStoragePoolCommand to send to the agent
+ // Note: Always send command even if database entry exists, because agent may have restarted
+ // and lost in-memory pool registration. The command handler is idempotent.
+ ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, pool, detailsMap);
+
+ Answer answer = _agentMgr.easySend(hostId, cmd);
+
+ if (answer == null) {
+ throw new CloudRuntimeException(String.format("Unable to get an answer to the modify storage pool command (%s)", pool));
+ }
+
+ if (!answer.getResult()) {
+ String msg = String.format("Unable to attach storage pool %s to host %d", pool, hostId);
+
+ _alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, pool.getDataCenterId(), pool.getPodId(), msg, msg);
+
+ throw new CloudRuntimeException(String.format(
+ "Unable to establish a connection from agent to storage pool %s due to %s", pool, answer.getDetails()));
+ }
+
+ // Get the mount path from the answer
+ ModifyStoragePoolAnswer mspAnswer = (ModifyStoragePoolAnswer) answer;
+ StoragePoolInfo poolInfo = mspAnswer.getPoolInfo();
+ if (poolInfo == null) {
+ throw new CloudRuntimeException("ModifyStoragePoolAnswer returned null poolInfo");
+ }
+
+ String localPath = poolInfo.getLocalPath();
+ logger.info("Storage pool {} successfully mounted at: {}", pool.getName(), localPath);
+
+ // Update or create the storage_pool_host_ref entry with the correct local_path
+ StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(poolId, hostId);
+
+ if (storagePoolHost == null) {
+ storagePoolHost = new StoragePoolHostVO(poolId, hostId, localPath);
+ storagePoolHostDao.persist(storagePoolHost);
+ logger.info("Created storage_pool_host_ref entry for pool {} and host {}", pool.getName(), host.getName());
+ } else {
+ storagePoolHost.setLocalPath(localPath);
+ storagePoolHostDao.update(storagePoolHost.getId(), storagePoolHost);
+ logger.info("Updated storage_pool_host_ref entry with local_path: {}", localPath);
+ }
+
+ // Update pool capacity/usage information
+ StoragePoolVO poolVO = _storagePoolDao.findById(poolId);
+ if (poolVO != null && poolInfo.getCapacityBytes() > 0) {
+ poolVO.setCapacityBytes(poolInfo.getCapacityBytes());
+ poolVO.setUsedBytes(poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes());
+ _storagePoolDao.update(poolVO.getId(), poolVO);
+ logger.info("Updated storage pool capacity: {} GB, used: {} GB", poolInfo.getCapacityBytes() / (1024 * 1024 * 1024), (poolInfo.getCapacityBytes() - poolInfo.getAvailableBytes()) / (1024 * 1024 * 1024));
+ }
+
+ } catch (Exception e) {
+ logger.error("Exception while connecting host {} to storage pool {}", host.getName(), pool.getName(), e);
+ // CRITICAL: Don't throw exception - it crashes the agent and causes restart loops
+ // Return false to indicate failure without crashing
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public boolean hostDisconnected(Host host, StoragePool pool) {
+ logger.info("Disconnect from host " + host.getId() + " from pool " + pool.getName());
+
+ Host hostToremove = _hostDao.findById(host.getId());
+ if (hostToremove == null) {
+ logger.error("Failed to add host by HostListener as host was not found with id : {}", host.getId());
+ return false;
+ }
+ // TODO add storage pool get validation
+ logger.info("Disconnecting host {} from ONTAP storage pool {}", host.getName(), pool.getName());
+
+ try {
+ DeleteStoragePoolCommand cmd = new DeleteStoragePoolCommand(pool);
+ long hostId = host.getId();
+ Answer answer = _agentMgr.easySend(hostId, cmd);
+
+ if (answer != null && answer.getResult()) {
+ logger.info("Successfully disconnected host {} from ONTAP storage pool {}", host.getName(), pool.getName());
+ return true;
+ } else {
+ String errMsg = (answer != null) ? answer.getDetails() : "Unknown error";
+ logger.warn("Failed to disconnect host {} from storage pool {}. Error: {}", host.getName(), pool.getName(), errMsg);
+ return false;
+ }
+ } catch (Exception e) {
+ logger.error("Exception while disconnecting host {} from storage pool {}", host.getName(), pool.getName(), e);
+ return false;
+ }
+ }
+
+ @Override
+ public boolean hostDisconnected(long hostId, long poolId) {
+ return false;
+ }
+
+ @Override
+ public boolean hostAboutToBeRemoved(long hostId) {
+ return false;
+ }
+
+ @Override
+ public boolean hostRemoved(long hostId, long clusterId) {
+ return false;
+ }
+
+ @Override
+ public boolean hostEnabled(long hostId) {
+ return false;
+ }
+
+ @Override
+ public boolean hostAdded(long hostId) {
+ return false;
+ }
+
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
new file mode 100755
index 000000000000..5b44c951a5fa
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/OntapPrimaryDatastoreProvider.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.provider;
+
+
+import com.cloud.utils.component.ComponentContext;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle;
+import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider;
+import org.apache.cloudstack.storage.driver.OntapPrimaryDatastoreDriver;
+import org.apache.cloudstack.storage.lifecycle.OntapPrimaryDatastoreLifecycle;
+import org.apache.cloudstack.storage.listener.OntapHostListener;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+@Component
+public class OntapPrimaryDatastoreProvider implements PrimaryDataStoreProvider {
+
+ private static final Logger s_logger = LogManager.getLogger(OntapPrimaryDatastoreProvider.class);
+ private OntapPrimaryDatastoreDriver primaryDatastoreDriver;
+ private OntapPrimaryDatastoreLifecycle primaryDatastoreLifecycle;
+ private HypervisorHostListener listener;
+
+ public OntapPrimaryDatastoreProvider() {
+ s_logger.info("OntapPrimaryDatastoreProvider initialized");
+ }
+ @Override
+ public DataStoreLifeCycle getDataStoreLifeCycle() {
+ return primaryDatastoreLifecycle;
+ }
+
+ @Override
+ public DataStoreDriver getDataStoreDriver() {
+ return primaryDatastoreDriver;
+ }
+
+ @Override
+ public HypervisorHostListener getHostListener() {
+ return listener;
+ }
+
+ @Override
+ public String getName() {
+ s_logger.trace("OntapPrimaryDatastoreProvider: getName: Called");
+ return Constants.ONTAP_PLUGIN_NAME;
+ }
+
+ @Override
+ public boolean configure(Map params) {
+ s_logger.trace("OntapPrimaryDatastoreProvider: configure: Called");
+ primaryDatastoreDriver = ComponentContext.inject(OntapPrimaryDatastoreDriver.class);
+ primaryDatastoreLifecycle = ComponentContext.inject(OntapPrimaryDatastoreLifecycle.class);
+ listener = ComponentContext.inject(OntapHostListener.class);
+ return true;
+ }
+
+ @Override
+ public Set getTypes() {
+ s_logger.trace("OntapPrimaryDatastoreProvider: getTypes: Called");
+ Set typeSet = new HashSet();
+ typeSet.add(DataStoreProviderType.PRIMARY);
+ return typeSet;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java
new file mode 100644
index 000000000000..b854a26271ee
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/provider/StorageProviderFactory.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.provider;
+
+import com.cloud.utils.component.ComponentContext;
+import com.cloud.utils.exception.CloudRuntimeException;
+
+import java.nio.charset.StandardCharsets;
+
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.UnifiedNASStrategy;
+import org.apache.cloudstack.storage.service.UnifiedSANStrategy;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+
+public class StorageProviderFactory {
+ private static final Logger s_logger = LogManager.getLogger(StorageProviderFactory.class);
+
+ public static StorageStrategy getStrategy(OntapStorage ontapStorage) {
+ ProtocolType protocol = ontapStorage.getProtocol();
+ s_logger.info("Initializing StorageProviderFactory with protocol: " + protocol);
+ String decodedPassword = new String(java.util.Base64.getDecoder().decode(ontapStorage.getPassword()), StandardCharsets.UTF_8);
+ ontapStorage = new OntapStorage(
+ ontapStorage.getUsername(),
+ decodedPassword,
+ ontapStorage.getStorageIP(),
+ ontapStorage.getSvmName(),
+ ontapStorage.getSize(),
+ protocol);
+ switch (protocol) {
+ case NFS3:
+ UnifiedNASStrategy unifiedNASStrategy = new UnifiedNASStrategy(ontapStorage);
+ ComponentContext.inject(unifiedNASStrategy);
+ unifiedNASStrategy.setOntapStorage(ontapStorage);
+ return unifiedNASStrategy;
+ case ISCSI:
+ UnifiedSANStrategy unifiedSANStrategy = new UnifiedSANStrategy(ontapStorage);
+ ComponentContext.inject(unifiedSANStrategy);
+ unifiedSANStrategy.setOntapStorage(ontapStorage);
+ return unifiedSANStrategy;
+ default:
+ throw new CloudRuntimeException("Unsupported protocol: " + protocol);
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/NASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/NASStrategy.java
new file mode 100644
index 000000000000..27a4f3d2ce7d
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/NASStrategy.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+
+public abstract class NASStrategy extends StorageStrategy {
+ public NASStrategy(OntapStorage ontapStorage) {
+ super(ontapStorage);
+ }
+
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java
new file mode 100644
index 000000000000..6be5ecfaf3f2
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/SANStrategy.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+
+public abstract class SANStrategy extends StorageStrategy {
+ public SANStrategy(OntapStorage ontapStorage) {
+ super(ontapStorage);
+ }
+
+ /**
+ * Ensures the LUN is mapped to the specified access group (igroup).
+ * If a mapping already exists, returns the existing LUN number.
+ * If not, creates a new mapping and returns the assigned LUN number.
+ *
+ * @param svmName the SVM name
+ * @param lunName the LUN name
+ * @param accessGroupName the igroup name
+ * @return the logical unit number as a String
+ */
+ public abstract String ensureLunMapped(String svmName, String lunName, String accessGroupName);
+
+ /**
+ * Validates that the host initiator is present in the access group (igroup).
+ *
+ * @param hostInitiator the host initiator IQN
+ * @param svmName the SVM name
+ * @param accessGroupName the igroup name
+ * @return true if the initiator is found in the igroup, false otherwise
+ */
+ public abstract boolean validateInitiatorInAccessGroup(String hostInitiator, String svmName, String accessGroupName);
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
new file mode 100644
index 000000000000..d9f98dcf7cb1
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/StorageStrategy.java
@@ -0,0 +1,608 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.AggregateFeignClient;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NetworkFeignClient;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.client.SvmFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.Aggregate;
+import org.apache.cloudstack.storage.feign.model.IpInterface;
+import org.apache.cloudstack.storage.feign.model.IscsiService;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * Storage Strategy represents the communication path for all the ONTAP storage options
+ *
+ * ONTAP storage operation would vary based on
+ * Supported protocols: NFS3.0, NFS4.1, FC, iSCSI, Nvme/TCP and Nvme/FC
+ * Supported platform: Unified and Disaggregated
+ */
+public abstract class StorageStrategy {
+ // Replace @Inject Feign clients with FeignClientFactory
+ private final FeignClientFactory feignClientFactory;
+ private final AggregateFeignClient aggregateFeignClient;
+ private final VolumeFeignClient volumeFeignClient;
+ private final SvmFeignClient svmFeignClient;
+ private final JobFeignClient jobFeignClient;
+ private final NetworkFeignClient networkFeignClient;
+ private final SANFeignClient sanFeignClient;
+
+ protected OntapStorage storage;
+
+ /**
+ * Presents aggregate object for the unified storage, not eligible for disaggregated
+ */
+ private List aggregates;
+
+ private static final Logger s_logger = LogManager.getLogger(StorageStrategy.class);
+
+ public StorageStrategy(OntapStorage ontapStorage) {
+ storage = ontapStorage;
+ String baseURL = Constants.HTTPS + storage.getStorageIP();
+ s_logger.info("Initializing StorageStrategy with base URL: " + baseURL);
+ // Initialize FeignClientFactory and create clients
+ this.feignClientFactory = new FeignClientFactory();
+ this.aggregateFeignClient = feignClientFactory.createClient(AggregateFeignClient.class, baseURL);
+ this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
+ this.svmFeignClient = feignClientFactory.createClient(SvmFeignClient.class, baseURL);
+ this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL);
+ this.networkFeignClient = feignClientFactory.createClient(NetworkFeignClient.class, baseURL);
+ this.sanFeignClient = feignClientFactory.createClient(SANFeignClient.class, baseURL);
+ }
+
+ // Connect method to validate ONTAP cluster, credentials, protocol, and SVM
+ public boolean connect() {
+ s_logger.info("Attempting to connect to ONTAP cluster at " + storage.getStorageIP() + " and validate SVM " +
+ storage.getSvmName() + ", protocol " + storage.getProtocol());
+ //Get AuthHeader
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ String svmName = storage.getSvmName();
+ try {
+ // Call the SVM API to check if the SVM exists
+ Svm svm = new Svm();
+ s_logger.info("Fetching the SVM details...");
+ Map queryParams = Map.of(Constants.NAME, svmName, Constants.FIELDS, Constants.AGGREGATES +
+ Constants.COMMA + Constants.STATE);
+ OntapResponse svms = svmFeignClient.getSvmResponse(queryParams, authHeader);
+ if (svms != null && svms.getRecords() != null && !svms.getRecords().isEmpty()) {
+ svm = svms.getRecords().get(0);
+ } else {
+ s_logger.error("No SVM found on the ONTAP cluster by the name" + svmName + ".");
+ return false;
+ }
+
+ // Validations
+ s_logger.info("Validating SVM state and protocol settings...");
+ if (!Objects.equals(svm.getState(), Constants.RUNNING)) {
+ s_logger.error("SVM " + svmName + " is not in running state.");
+ return false;
+ }
+ if (Objects.equals(storage.getProtocol(), Constants.NFS) && !svm.getNfsEnabled()) {
+ s_logger.error("NFS protocol is not enabled on SVM " + svmName);
+ return false;
+ } else if (Objects.equals(storage.getProtocol(), Constants.ISCSI) && !svm.getIscsiEnabled()) {
+ s_logger.error("iSCSI protocol is not enabled on SVM " + svmName);
+ return false;
+ }
+ List aggrs = svm.getAggregates();
+ if (aggrs == null || aggrs.isEmpty()) {
+ s_logger.error("No aggregates are assigned to SVM " + svmName);
+ return false;
+ }
+ this.aggregates = aggrs;
+ s_logger.info("Successfully connected to ONTAP cluster and validated ONTAP details provided");
+ } catch (Exception e) {
+ s_logger.error("Failed to connect to ONTAP cluster: " + e.getMessage(), e);
+ return false;
+ }
+ return true;
+ }
+
+ // Common methods like create/delete etc., should be here
+
+ /**
+ * Creates ONTAP Flex-Volume
+ * Eligible only for Unified ONTAP storage
+ * throw exception in case of disaggregated ONTAP storage
+ *
+ * @param volumeName the name of the volume to create
+ * @param size the size of the volume in bytes
+ * @return the created Volume object
+ */
+ public Volume createStorageVolume(String volumeName, Long size) {
+ s_logger.info("Creating volume: " + volumeName + " of size: " + size + " bytes");
+
+ String svmName = storage.getSvmName();
+ if (aggregates == null || aggregates.isEmpty()) {
+ s_logger.error("No aggregates available to create volume on SVM " + svmName);
+ throw new CloudRuntimeException("No aggregates available to create volume on SVM " + svmName);
+ }
+ if (size == null || size <= 0) {
+ throw new CloudRuntimeException("Invalid volume size provided: " + size);
+ }
+
+ // Get the AuthHeader
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+
+ // Generate the Create Volume Request
+ Volume volumeRequest = new Volume();
+ Svm svm = new Svm();
+ svm.setName(svmName);
+ Nas nas = new Nas();
+ nas.setPath(Constants.SLASH + volumeName);
+
+ volumeRequest.setName(volumeName);
+ volumeRequest.setSvm(svm);
+
+ // Pick the best aggregate for this specific request (largest available, online, and sufficient space).
+ long maxAvailableAggregateSpaceBytes = -1L;
+ Aggregate aggrChosen = null;
+ for (Aggregate aggr : aggregates) {
+ s_logger.debug("Found aggregate: " + aggr.getName() + " with UUID: " + aggr.getUuid());
+ Aggregate aggrResp = aggregateFeignClient.getAggregateByUUID(authHeader, aggr.getUuid());
+
+ if (aggrResp == null) {
+ s_logger.warn("Aggregate details response is null for aggregate " + aggr.getName() + ". Skipping.");
+ continue;
+ }
+
+ if (!Objects.equals(aggrResp.getState(), Aggregate.StateEnum.ONLINE)) {
+ s_logger.warn("Aggregate " + aggr.getName() + " is not in online state. Skipping this aggregate.");
+ continue;
+ }
+
+ if (aggrResp.getSpace() == null || aggrResp.getAvailableBlockStorageSpace() == null) {
+ s_logger.warn("Aggregate " + aggr.getName() + " does not have space information. Skipping this aggregate.");
+ continue;
+ }
+
+ final long availableBytes = aggrResp.getAvailableBlockStorageSpace().longValue();
+ s_logger.debug("Aggregate " + aggr.getName() + " available bytes=" + availableBytes + ", requested=" + size);
+
+ if (availableBytes <= size) {
+ s_logger.warn("Aggregate " + aggr.getName() + " does not have sufficient available space. Required=" +
+ size + " bytes, available=" + availableBytes + " bytes. Skipping this aggregate.");
+ continue;
+ }
+
+ if (availableBytes > maxAvailableAggregateSpaceBytes) {
+ maxAvailableAggregateSpaceBytes = availableBytes;
+ aggrChosen = aggr;
+ }
+ }
+
+ if (aggrChosen == null) {
+ s_logger.error("No suitable aggregates found on SVM " + svmName + " for volume creation.");
+ throw new CloudRuntimeException("No suitable aggregates found on SVM " + svmName + " for volume operations.");
+ }
+ s_logger.info("Selected aggregate: " + aggrChosen.getName() + " for volume operations.");
+
+ Aggregate aggr = new Aggregate();
+ aggr.setName(aggrChosen.getName());
+ aggr.setUuid(aggrChosen.getUuid());
+ volumeRequest.setAggregates(List.of(aggr));
+ volumeRequest.setSize(size);
+ volumeRequest.setNas(nas);
+ try {
+ JobResponse jobResponse = volumeFeignClient.createVolumeWithJob(authHeader, volumeRequest);
+ if (jobResponse == null || jobResponse.getJob() == null) {
+ throw new CloudRuntimeException("Failed to initiate volume creation for " + volumeName);
+ }
+ String jobUUID = jobResponse.getJob().getUuid();
+
+ //Create URI for GET Job API
+ Boolean jobSucceeded = jobPollForSuccess(jobUUID, 10, 1);
+ if (!jobSucceeded) {
+ s_logger.error("Volume creation job failed for volume: " + volumeName);
+ throw new CloudRuntimeException("Volume creation job failed for volume: " + volumeName);
+ }
+ s_logger.info("Volume creation job completed successfully for volume: " + volumeName);
+ } catch (Exception e) {
+ s_logger.error("Exception while creating volume: ", e);
+ throw new CloudRuntimeException("Failed to create volume: " + e.getMessage());
+ }
+ // Verify if the Volume has been created and set the Volume object
+ // Call the VolumeFeignClient to get the created volume details
+ OntapResponse volumesResponse = volumeFeignClient.getAllVolumes(authHeader, Map.of(Constants.NAME, volumeName));
+ if (volumesResponse == null || volumesResponse.getRecords() == null || volumesResponse.getRecords().isEmpty()) {
+ s_logger.error("Volume " + volumeName + " not found after creation.");
+ throw new CloudRuntimeException("Volume " + volumeName + " not found after creation.");
+ }
+ Volume createdVolume = volumesResponse.getRecords().get(0);
+ if (createdVolume == null) {
+ s_logger.error("Failed to retrieve details of the created volume " + volumeName);
+ throw new CloudRuntimeException("Failed to retrieve details of the created volume " + volumeName);
+ } else if (createdVolume.getName() == null || !createdVolume.getName().equals(volumeName)) {
+ s_logger.error("Mismatch in created volume name. Expected: " + volumeName + ", Found: " + createdVolume.getName());
+ throw new CloudRuntimeException("Mismatch in created volume name. Expected: " + volumeName + ", Found: " + createdVolume.getName());
+ }
+ s_logger.info("Volume created successfully: " + volumeName);
+ try {
+ Map queryParams = Map.of(Constants.NAME, volumeName);
+ s_logger.debug("Fetching volume details for: " + volumeName);
+
+ OntapResponse ontapVolume = volumeFeignClient.getVolume(authHeader, queryParams);
+ s_logger.debug("Feign call completed. Processing response...");
+
+ if (ontapVolume == null) {
+ s_logger.error("OntapResponse is null for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Response is null");
+ }
+ s_logger.debug("OntapResponse is not null. Checking records field...");
+
+ if (ontapVolume.getRecords() == null) {
+ s_logger.error("OntapResponse.records is null for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": Records list is null");
+ }
+ s_logger.debug("Records field is not null. Size: " + ontapVolume.getRecords().size());
+
+ if (ontapVolume.getRecords().isEmpty()) {
+ s_logger.error("OntapResponse.records is empty for volume: " + volumeName);
+ throw new CloudRuntimeException("Failed to fetch volume " + volumeName + ": No records found");
+ }
+
+ Volume volume = ontapVolume.getRecords().get(0);
+ s_logger.info("Volume retrieved successfully: " + volumeName + ", UUID: " + volume.getUuid());
+ return volume;
+ } catch (Exception e) {
+ s_logger.error("Exception while retrieving volume details for: " + volumeName, e);
+ throw new CloudRuntimeException("Failed to fetch volume: " + volumeName + ". Error: " + e.getMessage(), e);
+ }
+ }
+
+ /**
+ * Updates ONTAP Flex-Volume
+ * Eligible only for Unified ONTAP storage
+ * throw exception in case of disaggregated ONTAP storage
+ *
+ * @param volume the volume to update
+ * @return the updated Volume object
+ */
+ public Volume updateStorageVolume(Volume volume) {
+ //TODO
+ return null;
+ }
+
+ /**
+ * Delete ONTAP Flex-Volume
+ * Eligible only for Unified ONTAP storage
+ * throw exception in case of disaggregated ONTAP storage
+ *
+ * @param volume the volume to delete
+ */
+ public void deleteStorageVolume(Volume volume) {
+ s_logger.info("Deleting ONTAP volume by name: " + volume.getName() + " and uuid: " + volume.getUuid());
+ // Calling the VolumeFeignClient to delete the volume
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ try {
+ // TODO: Implement lun and file deletion, if any, before deleting the volume
+ JobResponse jobResponse = volumeFeignClient.deleteVolume(authHeader, volume.getUuid());
+ Boolean jobSucceeded = jobPollForSuccess(jobResponse.getJob().getUuid(), 10, 1);
+ if (!jobSucceeded) {
+ s_logger.error("Volume deletion job failed for volume: " + volume.getName());
+ throw new CloudRuntimeException("Volume deletion job failed for volume: " + volume.getName());
+ }
+ s_logger.info("Volume deleted successfully: " + volume.getName());
+ } catch (FeignException.FeignClientException e) {
+ s_logger.error("Exception while deleting volume: ", e);
+ throw new CloudRuntimeException("Failed to delete volume: " + e.getMessage());
+ }
+ s_logger.info("ONTAP volume deletion process completed for volume: " + volume.getName());
+ }
+
+ /**
+ * Gets ONTAP Flex-Volume
+ * Eligible only for Unified ONTAP storage
+ * throw exception in case of disaggregated ONTAP storage
+ *
+ * @param volume the volume to retrieve
+ * @return the retrieved Volume object
+ */
+ public Volume getStorageVolume(Volume volume) {
+ //TODO
+ return null;
+ }
+
+ /**
+ * Get the storage path based on protocol.
+ * For iSCSI: Returns the iSCSI target IQN (e.g., iqn.1992-08.com.netapp:sn.xxx:vs.3)
+ * For NFS: Returns the mount path (to be implemented)
+ *
+ * @return the storage path as a String
+ */
+ public String getStoragePath() {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ String targetIqn = null;
+ try {
+ if (storage.getProtocol() == ProtocolType.ISCSI) {
+ // For iSCSI, fetch the target IQN from the iSCSI service
+ s_logger.info("Fetching iSCSI target IQN for SVM: {}", storage.getSvmName());
+
+ Map queryParams = new HashMap<>();
+ queryParams.put(Constants.SVM_DOT_NAME, storage.getSvmName());
+ queryParams.put("fields", "enabled,target");
+ queryParams.put("max_records", "1");
+
+ OntapResponse response = sanFeignClient.getIscsiServices(authHeader, queryParams);
+
+ if (response == null || response.getRecords() == null || response.getRecords().isEmpty()) {
+ throw new CloudRuntimeException("No iSCSI service found for SVM: " + storage.getSvmName());
+ }
+
+ IscsiService iscsiService = response.getRecords().get(0);
+
+ if (iscsiService.getTarget() == null || iscsiService.getTarget().getName() == null) {
+ throw new CloudRuntimeException("iSCSI target IQN not found for SVM: " + storage.getSvmName());
+ }
+
+ targetIqn = iscsiService.getTarget().getName();
+ s_logger.info("Retrieved iSCSI target IQN: {}", targetIqn);
+ return targetIqn;
+
+ } else if (storage.getProtocol() == ProtocolType.NFS3) {
+ // TODO: Implement NFS path retrieval logic
+ } else {
+ throw new CloudRuntimeException("Unsupported protocol for path retrieval: " + storage.getProtocol());
+ }
+
+ } catch (FeignException.FeignClientException e) {
+ s_logger.error("Exception while retrieving storage path for protocol {}: {}", storage.getProtocol(), e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to retrieve storage path: " + e.getMessage());
+ }
+ return targetIqn;
+ }
+
+
+
+ /**
+ * Get the network ip interface
+ *
+ * @return the network interface ip as a String
+ */
+
+ public String getNetworkInterface() {
+ // Feign call to get network interfaces
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ try {
+ Map queryParams = new HashMap<>();
+ queryParams.put(Constants.SVM_DOT_NAME, storage.getSvmName());
+ if (storage.getProtocol() != null) {
+ switch (storage.getProtocol()) {
+ case NFS3:
+ queryParams.put(Constants.SERVICES, Constants.DATA_NFS);
+ break;
+ case ISCSI:
+ queryParams.put(Constants.SERVICES, Constants.DATA_ISCSI);
+ break;
+ default:
+ s_logger.error("Unsupported protocol: " + storage.getProtocol());
+ throw new CloudRuntimeException("Unsupported protocol: " + storage.getProtocol());
+ }
+ }
+ queryParams.put(Constants.FIELDS, Constants.IP_ADDRESS);
+ queryParams.put(Constants.RETURN_RECORDS, Constants.TRUE);
+ OntapResponse response =
+ networkFeignClient.getNetworkIpInterfaces(authHeader, queryParams);
+ if (response != null && response.getRecords() != null && !response.getRecords().isEmpty()) {
+ IpInterface ipInterface = null;
+ // For simplicity, return the first interface's name (Of IPv4 type for NFS3)
+ if (storage.getProtocol() == ProtocolType.ISCSI) {
+ ipInterface = response.getRecords().get(0);
+ } else if (storage.getProtocol() == ProtocolType.NFS3) {
+ for (IpInterface iface : response.getRecords()) {
+ if (iface.getIp().getAddress().contains(".")) {
+ ipInterface = iface;
+ break;
+ }
+ }
+ }
+
+ s_logger.info("Retrieved network interface: " + ipInterface.getIp().getAddress());
+ return ipInterface.getIp().getAddress();
+ } else {
+ throw new CloudRuntimeException("No network interfaces found for SVM " + storage.getSvmName() +
+ " for protocol " + storage.getProtocol());
+ }
+ } catch (FeignException.FeignClientException e) {
+ s_logger.error("Exception while retrieving network interfaces: ", e);
+ throw new CloudRuntimeException("Failed to retrieve network interfaces: " + e.getMessage());
+ }
+ }
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses.
+ * it is going to mimic
+ * createLun for iSCSI, FC protocols
+ * createFile for NFS3.0 and NFS4.1 protocols
+ * createNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
+ * @param cloudstackVolume the CloudStack volume to create
+ * @return the created CloudStackVolume object
+ */
+ abstract public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses.
+ * it is going to mimic
+ * updateLun for iSCSI, FC protocols
+ * updateFile for NFS3.0 and NFS4.1 protocols
+ * updateNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
+ * @param cloudstackVolume the CloudStack volume to update
+ * @return the updated CloudStackVolume object
+ */
+ abstract CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses.
+ * it is going to mimic
+ * deleteLun for iSCSI, FC protocols
+ * deleteFile for NFS3.0 and NFS4.1 protocols
+ * deleteNameSpace for Nvme/TCP and Nvme/FC protocol
+ *
+ * @param cloudstackVolume the CloudStack volume to delete
+ */
+ abstract public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses.
+ * it is going to mimic
+ * cloneLun for iSCSI, FC protocols
+ * cloneFile for NFS3.0 and NFS4.1 protocols
+ * cloneNameSpace for Nvme/TCP and Nvme/FC protocol
+ * @param cloudstackVolume the CloudStack volume to copy
+ */
+ abstract public void copyCloudStackVolume(CloudStackVolume cloudstackVolume);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses.
+ * it is going to mimic
+ * getLun for iSCSI, FC protocols
+ * getFile for NFS3.0 and NFS4.1 protocols
+ * getNameSpace for Nvme/TCP and Nvme/FC protocol
+ * @param cloudStackVolumeMap the CloudStack volume to retrieve
+ * @return the retrieved CloudStackVolume object
+ */
+ abstract public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses
+ * createiGroup for iSCSI and FC protocols
+ * createExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * createSubsystem for Nvme/TCP and Nvme/FC protocols
+ * @param accessGroup the access group to create
+ * @return the created AccessGroup object
+ */
+ abstract public AccessGroup createAccessGroup(AccessGroup accessGroup);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses
+ * deleteiGroup for iSCSI and FC protocols
+ * deleteExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * deleteSubsystem for Nvme/TCP and Nvme/FC protocols
+ * @param accessGroup the access group to delete
+ */
+ abstract public void deleteAccessGroup(AccessGroup accessGroup);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses
+ * updateiGroup example add/remove-Iqn for iSCSI and FC protocols
+ * updateExportPolicy example add/remove-Rule for NFS 3.0 and NFS 4.1 protocols
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ * @param accessGroup the access group to update
+ * @return the updated AccessGroup object
+ */
+ abstract AccessGroup updateAccessGroup(AccessGroup accessGroup);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses
+ * e.g., getIGroup for iSCSI and FC protocols
+ * e.g., getExportPolicy for NFS 3.0 and NFS 4.1 protocols
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ * @param values map to get access group values like name, svm name etc.
+ */
+ abstract public AccessGroup getAccessGroup(Map values);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses
+ * lunMap for iSCSI and FC protocols
+ * //TODO for NFS 3.0 and NFS 4.1 protocols (e.g., export rule management)
+ * //TODO for Nvme/TCP and Nvme/FC protocols
+ * @param values map including SVM name, LUN name, and igroup name (for SAN) or equivalent for NAS
+ * @return map containing logical unit number for the new/existing mapping (SAN) or relevant info for NAS
+ */
+ abstract public Map enableLogicalAccess(Map values);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses
+ * lunUnmap for iSCSI and FC protocols
+ * @param values map including LUN UUID and iGroup UUID (for SAN) or equivalent for NAS
+ */
+ abstract public void disableLogicalAccess(Map values);
+
+ /**
+ * Method encapsulates the behavior based on the opted protocol in subclasses
+ * lunMap lookup for iSCSI/FC protocols (GET-only, no side-effects)
+ * @param values map with SVM name, LUN name, and igroup name (for SAN) or equivalent for NAS
+ * @return map containing logical unit number if mapping exists; otherwise null
+ */
+ abstract public Map getLogicalAccess(Map values);
+
+ private Boolean jobPollForSuccess(String jobUUID, int maxRetries, int sleepTimeInSecs) {
+ //Create URI for GET Job API
+ int jobRetryCount = 0;
+ Job jobResp = null;
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ while (jobResp == null || !jobResp.getState().equals(Constants.JOB_SUCCESS)) {
+ if (jobRetryCount >= maxRetries) {
+ s_logger.error("Job did not complete within expected time.");
+ throw new CloudRuntimeException("Job did not complete within expected time.");
+ }
+
+ try {
+ jobResp = jobFeignClient.getJobByUUID(authHeader, jobUUID);
+ if (jobResp == null) {
+ s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying...");
+ } else if (jobResp.getState().equals(Constants.JOB_FAILURE)) {
+ throw new CloudRuntimeException("Job failed with error: " + jobResp.getMessage());
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ }
+
+ jobRetryCount++;
+ Thread.sleep(sleepTimeInSecs * 1000);
+ }
+ if (jobResp == null || !jobResp.getState().equals(Constants.JOB_SUCCESS)) {
+ return false;
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ return true;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java
new file mode 100644
index 000000000000..c2aa4e462d2f
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedNASStrategy.java
@@ -0,0 +1,551 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.agent.api.Answer;
+import com.cloud.host.HostVO;
+import com.cloud.storage.Storage;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint;
+import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.storage.command.CreateObjectCommand;
+import org.apache.cloudstack.storage.command.DeleteCommand;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.JobFeignClient;
+import org.apache.cloudstack.storage.feign.client.NASFeignClient;
+import org.apache.cloudstack.storage.feign.client.VolumeFeignClient;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
+import org.apache.cloudstack.storage.feign.model.ExportRule;
+import org.apache.cloudstack.storage.feign.model.FileInfo;
+import org.apache.cloudstack.storage.feign.model.Job;
+import org.apache.cloudstack.storage.feign.model.Nas;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import org.apache.cloudstack.storage.feign.model.response.JobResponse;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.cloudstack.storage.volume.VolumeObject;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import javax.inject.Inject;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class UnifiedNASStrategy extends NASStrategy {
+
+ private static final Logger s_logger = LogManager.getLogger(UnifiedNASStrategy.class);
+ private final FeignClientFactory feignClientFactory;
+ private final NASFeignClient nasFeignClient;
+ private final VolumeFeignClient volumeFeignClient;
+ private final JobFeignClient jobFeignClient;
+ @Inject private VolumeDao volumeDao;
+ @Inject private EndPointSelector epSelector;
+ @Inject private StoragePoolDetailsDao storagePoolDetailsDao;
+
+ public UnifiedNASStrategy(OntapStorage ontapStorage) {
+ super(ontapStorage);
+ String baseURL = Constants.HTTPS + ontapStorage.getStorageIP();
+ this.feignClientFactory = new FeignClientFactory();
+ this.nasFeignClient = feignClientFactory.createClient(NASFeignClient.class, baseURL);
+ this.volumeFeignClient = feignClientFactory.createClient(VolumeFeignClient.class, baseURL);
+ this.jobFeignClient = feignClientFactory.createClient(JobFeignClient.class, baseURL );
+ }
+
+ public void setOntapStorage(OntapStorage ontapStorage) {
+ this.storage = ontapStorage;
+ }
+
+ @Override
+ public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) {
+ s_logger.info("createCloudStackVolume: Create cloudstack volume " + cloudstackVolume);
+ try {
+ // Step 1: set cloudstack volume metadata
+ String volumeUuid = updateCloudStackVolumeMetadata(cloudstackVolume.getDatastoreId(), cloudstackVolume.getVolumeInfo());
+ // Step 2: Send command to KVM host to create qcow2 file using qemu-img
+ Answer answer = createVolumeOnKVMHost(cloudstackVolume.getVolumeInfo());
+ if (answer == null || !answer.getResult()) {
+ String errMsg = answer != null ? answer.getDetails() : "Failed to create qcow2 on KVM host";
+ s_logger.error("createCloudStackVolume: " + errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ return cloudstackVolume;
+ }catch (Exception e) {
+ s_logger.error("createCloudStackVolume: error occured " + e);
+ throw new CloudRuntimeException(e);
+ }
+ }
+
+ @Override
+ CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) {
+ //TODO
+ return null;
+ }
+
+ @Override
+ public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {
+ s_logger.info("deleteCloudStackVolume: Delete cloudstack volume " + cloudstackVolume);
+ try {
+ // Step 1: Send command to KVM host to delete qcow2 file using qemu-img
+ Answer answer = deleteVolumeOnKVMHost(cloudstackVolume.getVolumeInfo());
+ if (answer == null || !answer.getResult()) {
+ String errMsg = answer != null ? answer.getDetails() : "Failed to delete qcow2 on KVM host";
+ s_logger.error("deleteCloudStackVolume: " + errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ }catch (Exception e) {
+ s_logger.error("deleteCloudStackVolume: error occured " + e);
+ throw new CloudRuntimeException(e);
+ }
+ }
+
+ @Override
+ public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {
+
+ }
+
+ @Override
+ public CloudStackVolume getCloudStackVolume(Map cloudStackVolumeMap) {
+ return null;
+ }
+
+ @Override
+ public AccessGroup createAccessGroup(AccessGroup accessGroup) {
+ s_logger.info("createAccessGroup: Create access group {}: " , accessGroup);
+ Map details = accessGroup.getPrimaryDataStoreInfo().getDetails();
+ String svmName = details.get(Constants.SVM_NAME);
+ String volumeUUID = details.get(Constants.VOLUME_UUID);
+ String volumeName = details.get(Constants.VOLUME_NAME);
+
+ // Create the export policy
+ ExportPolicy policyRequest = createExportPolicyRequest(accessGroup,svmName,volumeName);
+ try {
+ ExportPolicy createdPolicy = createExportPolicy(svmName, policyRequest);
+ s_logger.info("ExportPolicy created: {}, now attaching this policy to storage pool volume", createdPolicy.getName());
+ // attach export policy to volume of storage pool
+ assignExportPolicyToVolume(volumeUUID,createdPolicy.getName());
+ // save the export policy details in storage pool details
+ storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), Constants.EXPORT_POLICY_ID, String.valueOf(createdPolicy.getId()), true);
+ storagePoolDetailsDao.addDetail(accessGroup.getPrimaryDataStoreInfo().getId(), Constants.EXPORT_POLICY_NAME, createdPolicy.getName(), true);
+ s_logger.info("Successfully assigned exportPolicy {} to volume {}", policyRequest.getName(), volumeName);
+ accessGroup.setPolicy(policyRequest);
+ return accessGroup;
+ }catch(Exception e){
+ s_logger.error("Exception occurred while creating access group: " + e);
+ throw new CloudRuntimeException("Failed to create access group: " + e);
+ }
+ }
+
+ @Override
+ public void deleteAccessGroup(AccessGroup accessGroup) {
+ s_logger.info("deleteAccessGroup: Deleting export policy");
+
+ if (accessGroup == null) {
+ throw new CloudRuntimeException("deleteAccessGroup: Invalid accessGroup object - accessGroup is null");
+ }
+
+ // Get PrimaryDataStoreInfo from accessGroup
+ PrimaryDataStoreInfo primaryDataStoreInfo = accessGroup.getPrimaryDataStoreInfo();
+ if (primaryDataStoreInfo == null) {
+ throw new CloudRuntimeException("deleteAccessGroup: PrimaryDataStoreInfo is null in accessGroup");
+ }
+ s_logger.info("deleteAccessGroup: Deleting export policy for the storage pool {}", primaryDataStoreInfo.getName());
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ String svmName = storage.getSvmName();
+ // Determine export policy attached to the storage pool
+ String exportPolicyName = primaryDataStoreInfo.getDetails().get(Constants.EXPORT_POLICY_NAME);
+ String exportPolicyId = primaryDataStoreInfo.getDetails().get(Constants.EXPORT_POLICY_ID);
+
+ try {
+ nasFeignClient.deleteExportPolicyById(authHeader,exportPolicyId);
+ s_logger.info("deleteAccessGroup: Successfully deleted export policy '{}'", exportPolicyName);
+ } catch (Exception e) {
+ s_logger.error("deleteAccessGroup: Failed to delete export policy. Exception: {}", e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to delete export policy: " + e.getMessage(), e);
+ }
+ } catch (Exception e) {
+ s_logger.error("deleteAccessGroup: Failed to delete export policy. Exception: {}", e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to delete export policy: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public AccessGroup updateAccessGroup(AccessGroup accessGroup) {
+ //TODO
+ return null;
+ }
+
+ @Override
+ public AccessGroup getAccessGroup(Map values) {
+ return null; //TODO: This method need to be rewritten according to the signature in StorageStrategy interface
+ }
+
+// @Override
+// public AccessGroup getAccessGroup(AccessGroup accessGroup) {
+// s_logger.info("getAccessGroup: Get export policy");
+//
+// if (accessGroup == null) {
+// throw new CloudRuntimeException("getAccessGroup: Invalid accessGroup object - accessGroup is null");
+// }
+//
+// // Get PrimaryDataStoreInfo from accessGroup
+// PrimaryDataStoreInfo primaryDataStoreInfo = accessGroup.getPrimaryDataStoreInfo();
+// if (primaryDataStoreInfo == null) {
+// throw new CloudRuntimeException("getAccessGroup: PrimaryDataStoreInfo is null in accessGroup");
+// }
+// s_logger.info("getAccessGroup: Get export policy for the storage pool {}", primaryDataStoreInfo.getName());
+// try {
+// String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+// // Determine export policy attached to the storage pool
+// String exportPolicyName = primaryDataStoreInfo.getDetails().get(Constants.EXPORT_POLICY_NAME);
+// String exportPolicyId = primaryDataStoreInfo.getDetails().get(Constants.EXPORT_POLICY_ID);
+//
+// try {
+// ExportPolicy exportPolicy = nasFeignClient.getExportPolicyById(authHeader,exportPolicyId);
+// if(exportPolicy==null){
+// s_logger.error("getAccessGroup: Failed to retrieve export policy for export policy");
+// throw new CloudRuntimeException("getAccessGroup: Failed to retrieve export policy for export policy");
+// }
+// accessGroup.setPolicy(exportPolicy);
+// s_logger.info("getAccessGroup: Successfully fetched export policy '{}'", exportPolicyName);
+// } catch (Exception e) {
+// s_logger.error("getAccessGroup: Failed to delete export policy. Exception: {}", e.getMessage(), e);
+// throw new CloudRuntimeException("Failed to delete export policy: " + e.getMessage(), e);
+// }
+// } catch (Exception e) {
+// s_logger.error("getAccessGroup: Failed to delete export policy. Exception: {}", e.getMessage(), e);
+// throw new CloudRuntimeException("Failed to delete export policy: " + e.getMessage(), e);
+// }
+// return accessGroup;
+// }
+
+ @Override
+ public Map enableLogicalAccess(Map values) {
+ //TODO
+ return null;
+ }
+
+ @Override
+ public void disableLogicalAccess(Map values) {
+ //TODO
+ }
+
+ @Override
+ public Map getLogicalAccess(Map values) {
+ return Map.of();
+ }
+
+
+ private ExportPolicy createExportPolicy(String svmName, ExportPolicy policy) {
+ s_logger.info("Creating export policy: {} for SVM: {}", policy, svmName);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.createExportPolicy(authHeader, policy);
+ OntapResponse policiesResponse = null;
+ try {
+ Map queryParams = Map.of(Constants.NAME, policy.getName());
+ policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams);
+ if (policiesResponse == null || policiesResponse.getRecords().isEmpty()) {
+ throw new CloudRuntimeException("Export policy " + policy.getName() + " was not created on ONTAP. " +
+ "Received successful response but policy does not exist.");
+ }
+ s_logger.info("Export policy created and verified successfully: " + policy.getName());
+ } catch (FeignException e) {
+ s_logger.error("Failed to verify export policy creation: " + policy.getName(), e);
+ throw new CloudRuntimeException("Export policy creation verification failed: " + e.getMessage());
+ }
+ s_logger.info("Export policy created successfully with name {}", policy.getName());
+ return policiesResponse.getRecords().get(0);
+ } catch (FeignException e) {
+ s_logger.error("Failed to create export policy: {}", policy, e);
+ throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception while creating export policy: {}", policy, e);
+ throw new CloudRuntimeException("Failed to create export policy: " + e.getMessage());
+ }
+ }
+
+ private void deleteExportPolicy(String svmName, String policyName) {
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ Map queryParams = Map.of(Constants.NAME, policyName);
+ OntapResponse policiesResponse = nasFeignClient.getExportPolicyResponse(authHeader, queryParams);
+
+ if (policiesResponse == null ) {
+ s_logger.warn("Export policy not found for deletion: {}", policyName);
+ throw new CloudRuntimeException("Export policy not found : " + policyName);
+ }
+ String policyId = String.valueOf(policiesResponse.getRecords().get(0).getId());
+ nasFeignClient.deleteExportPolicyById(authHeader, policyId);
+ s_logger.info("Export policy deleted successfully: {}", policyName);
+ } catch (Exception e) {
+ s_logger.error("Failed to delete export policy: {}", policyName, e);
+ throw new CloudRuntimeException("Failed to delete export policy: " + policyName);
+ }
+ }
+
+ private void assignExportPolicyToVolume(String volumeUuid, String policyName) {
+ s_logger.info("Assigning export policy: {} to volume: {}", policyName, volumeUuid);
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ // Create Volume update object with NAS configuration
+ Volume volumeUpdate = new Volume();
+ Nas nas = new Nas();
+ ExportPolicy policy = new ExportPolicy();
+ policy.setName(policyName);
+ nas.setExportPolicy(policy);
+ volumeUpdate.setNas(nas);
+
+ try {
+ JobResponse jobResponse = volumeFeignClient.updateVolumeRebalancing(authHeader, volumeUuid, volumeUpdate);
+ if (jobResponse == null || jobResponse.getJob() == null) {
+ throw new CloudRuntimeException("Failed to attach policy " + policyName + "to volume " + volumeUuid);
+ }
+ String jobUUID = jobResponse.getJob().getUuid();
+ //Create URI for GET Job API
+ int jobRetryCount = 0;
+ Job createVolumeJob = null;
+ while(createVolumeJob == null || !createVolumeJob.getState().equals(Constants.JOB_SUCCESS)) {
+ if(jobRetryCount >= Constants.JOB_MAX_RETRIES) {
+ s_logger.error("Job to update volume " + volumeUuid + " did not complete within expected time.");
+ throw new CloudRuntimeException("Job to update volume " + volumeUuid + " did not complete within expected time.");
+ }
+ try {
+ createVolumeJob = jobFeignClient.getJobByUUID(authHeader, jobUUID);
+ if (createVolumeJob == null) {
+ s_logger.warn("Job with UUID " + jobUUID + " not found. Retrying...");
+ } else if (createVolumeJob.getState().equals(Constants.JOB_FAILURE)) {
+ throw new CloudRuntimeException("Job to update volume " + volumeUuid + " failed with error: " + createVolumeJob.getMessage());
+ }
+ } catch (FeignException.FeignClientException e) {
+ throw new CloudRuntimeException("Failed to fetch job status: " + e.getMessage());
+ }
+ jobRetryCount++;
+ Thread.sleep(Constants.CREATE_VOLUME_CHECK_SLEEP_TIME); // Sleep for 2 seconds before polling again
+ }
+ } catch (Exception e) {
+ s_logger.error("Exception while updating volume: ", e);
+ throw new CloudRuntimeException("Failed to update volume: " + e.getMessage());
+ }
+ s_logger.info("Export policy successfully assigned to volume: {}", volumeUuid);
+ } catch (FeignException e) {
+ s_logger.error("Failed to assign export policy to volume: {}", volumeUuid, e);
+ throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception while assigning export policy to volume: {}", volumeUuid, e);
+ throw new CloudRuntimeException("Failed to assign export policy: " + e.getMessage());
+ }
+ }
+
+ private boolean createFile(String volumeUuid, String filePath, FileInfo fileInfo) {
+ s_logger.info("Creating file: {} in volume: {}", filePath, volumeUuid);
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.createFile(authHeader, volumeUuid, filePath, fileInfo);
+ s_logger.info("File created successfully: {} in volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to create file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e) {
+ s_logger.error("Exception while creating file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+ private boolean deleteFile(String volumeUuid, String filePath) {
+ s_logger.info("Deleting file: {} from volume: {}", filePath, volumeUuid);
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.deleteFile(authHeader, volumeUuid, filePath);
+ s_logger.info("File deleted successfully: {} from volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to delete file: {} from volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e) {
+ s_logger.error("Exception while deleting file: {} from volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+ private OntapResponse getFileInfo(String volumeUuid, String filePath) {
+ s_logger.debug("Getting file info for: {} in volume: {}", filePath, volumeUuid);
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ OntapResponse response = nasFeignClient.getFileResponse(authHeader, volumeUuid, filePath);
+ s_logger.debug("Retrieved file info for: {} in volume: {}", filePath, volumeUuid);
+ return response;
+ } catch (FeignException e){
+ if (e.status() == 404) {
+ s_logger.debug("File not found: {} in volume: {}", filePath, volumeUuid);
+ return null;
+ }
+ s_logger.error("Failed to get file info: {} in volume: {}", filePath, volumeUuid, e);
+ throw new CloudRuntimeException("Failed to get file info: " + e.getMessage());
+ } catch (Exception e){
+ s_logger.error("Exception while getting file info: {} in volume: {}", filePath, volumeUuid, e);
+ throw new CloudRuntimeException("Failed to get file info: " + e.getMessage());
+ }
+ }
+
+ private boolean updateFile(String volumeUuid, String filePath, FileInfo fileInfo) {
+ s_logger.info("Updating file: {} in volume: {}", filePath, volumeUuid);
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ nasFeignClient.updateFile( authHeader, volumeUuid, filePath, fileInfo);
+ s_logger.info("File updated successfully: {} in volume: {}", filePath, volumeUuid);
+ return true;
+ } catch (FeignException e) {
+ s_logger.error("Failed to update file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ } catch (Exception e){
+ s_logger.error("Exception while updating file: {} in volume: {}", filePath, volumeUuid, e);
+ return false;
+ }
+ }
+
+
+ private ExportPolicy createExportPolicyRequest(AccessGroup accessGroup,String svmName , String volumeName){
+
+ String exportPolicyName = Utility.generateExportPolicyName(svmName,volumeName);
+ ExportPolicy exportPolicy = new ExportPolicy();
+
+ List rules = new ArrayList<>();
+ ExportRule exportRule = new ExportRule();
+
+ List exportClients = new ArrayList<>();
+ List hosts = accessGroup.getHostsToConnect();
+ for (HostVO host : hosts) {
+ String hostStorageIp = host.getStorageIpAddress();
+ String ip = (hostStorageIp != null && !hostStorageIp.isEmpty())
+ ? hostStorageIp
+ : host.getPrivateIpAddress();
+ String ipToUse = ip + "/32";
+ ExportRule.ExportClient exportClient = new ExportRule.ExportClient();
+ exportClient.setMatch(ipToUse);
+ exportClients.add(exportClient);
+ }
+ exportRule.setClients(exportClients);
+ exportRule.setProtocols(List.of(ExportRule.ProtocolsEnum.nfs3));
+ exportRule.setRoRule(List.of("sys"));
+ exportRule.setRwRule(List.of("sys"));
+ exportRule.setSuperuser(List.of("sys"));
+ rules.add(exportRule);
+
+ Svm svm = new Svm();
+ svm.setName(svmName);
+ exportPolicy.setSvm(svm);
+ exportPolicy.setRules(rules);
+ exportPolicy.setName(exportPolicyName);
+
+ return exportPolicy;
+ }
+
+ private String updateCloudStackVolumeMetadata(String dataStoreId, DataObject volumeInfo) {
+ s_logger.info("updateCloudStackVolumeMetadata called with datastoreID: {} volumeInfo: {} ", dataStoreId, volumeInfo );
+ try {
+ VolumeObject volumeObject = (VolumeObject) volumeInfo;
+ long volumeId = volumeObject.getId();
+ s_logger.info("VolumeInfo ID from VolumeObject: {}", volumeId);
+ VolumeVO volume = volumeDao.findById(volumeId);
+ if (volume == null) {
+ throw new CloudRuntimeException("Volume not found with id: " + volumeId);
+ }
+ String volumeUuid = volumeInfo.getUuid();
+ volume.setPoolType(Storage.StoragePoolType.NetworkFilesystem);
+ volume.setPoolId(Long.parseLong(dataStoreId));
+ volume.setPath(volumeUuid); // Filename for qcow2 file
+ volumeDao.update(volume.getId(), volume);
+ return volumeUuid;
+ }catch (Exception e){
+ s_logger.error("Exception while updating volumeInfo: {} in volume: {}", dataStoreId, volumeInfo.getUuid(), e);
+ throw new CloudRuntimeException("Exception while updating volumeInfo: " + e.getMessage());
+ }
+ }
+
+ private Answer createVolumeOnKVMHost(DataObject volumeInfo) {
+ s_logger.info("createVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo);
+
+ try {
+ s_logger.info("createVolumeOnKVMHost: Sending CreateObjectCommand to KVM agent for volume: {}", volumeInfo.getUuid());
+ CreateObjectCommand cmd = new CreateObjectCommand(volumeInfo.getTO());
+ EndPoint ep = epSelector.select(volumeInfo);
+ if (ep == null) {
+ String errMsg = "No remote endpoint to send CreateObjectCommand, check if host is up";
+ s_logger.error(errMsg);
+ return new Answer(cmd, false, errMsg);
+ }
+ s_logger.info("createVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr());
+ Answer answer = ep.sendMessage(cmd);
+ if (answer != null && answer.getResult()) {
+ s_logger.info("createVolumeOnKVMHost: Successfully created qcow2 file on KVM host");
+ } else {
+ s_logger.error("createVolumeOnKVMHost: Failed to create qcow2 file: {}",
+ answer != null ? answer.getDetails() : "null answer");
+ }
+ return answer;
+ } catch (Exception e) {
+ s_logger.error("createVolumeOnKVMHost: Exception sending CreateObjectCommand", e);
+ return new Answer(null, false, e.toString());
+ }
+ }
+
+ private Answer deleteVolumeOnKVMHost(DataObject volumeInfo) {
+ s_logger.info("deleteVolumeOnKVMHost called with volumeInfo: {} ", volumeInfo);
+
+ try {
+ s_logger.info("deleteVolumeOnKVMHost: Sending DeleteCommand to KVM agent for volume: {}", volumeInfo.getUuid());
+ DeleteCommand cmd = new DeleteCommand(volumeInfo.getTO());
+ EndPoint ep = epSelector.select(volumeInfo);
+ if (ep == null) {
+ String errMsg = "No remote endpoint to send DeleteCommand, check if host is up";
+ s_logger.error(errMsg);
+ return new Answer(cmd, false, errMsg);
+ }
+ s_logger.info("deleteVolumeOnKVMHost: Sending command to endpoint: {}", ep.getHostAddr());
+ Answer answer = ep.sendMessage(cmd);
+ if (answer != null && answer.getResult()) {
+ s_logger.info("deleteVolumeOnKVMHost: Successfully deleted qcow2 file on KVM host");
+ } else {
+ s_logger.error("deleteVolumeOnKVMHost: Failed to delete qcow2 file: {}",
+ answer != null ? answer.getDetails() : "null answer");
+ }
+ return answer;
+ } catch (Exception e) {
+ s_logger.error("deleteVolumeOnKVMHost: Exception sending DeleteCommand", e);
+ return new Answer(null, false, e.toString());
+ }
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java
new file mode 100644
index 000000000000..c42e5cb6f516
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/UnifiedSANStrategy.java
@@ -0,0 +1,646 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service;
+
+import com.cloud.host.HostVO;
+import com.cloud.hypervisor.Hypervisor;
+import com.cloud.utils.exception.CloudRuntimeException;
+import feign.FeignException;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.storage.feign.FeignClientFactory;
+import org.apache.cloudstack.storage.feign.client.SANFeignClient;
+import org.apache.cloudstack.storage.feign.model.Igroup;
+import org.apache.cloudstack.storage.feign.model.Initiator;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.feign.model.LunMap;
+import org.apache.cloudstack.storage.feign.model.response.OntapResponse;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class UnifiedSANStrategy extends SANStrategy {
+
+ private static final Logger s_logger = LogManager.getLogger(UnifiedSANStrategy.class);
+ // Replace @Inject Feign client with FeignClientFactory
+ private final FeignClientFactory feignClientFactory;
+ private final SANFeignClient sanFeignClient;
+
+ public UnifiedSANStrategy(OntapStorage ontapStorage) {
+ super(ontapStorage);
+ String baseURL = Constants.HTTPS + ontapStorage.getStorageIP();
+ // Initialize FeignClientFactory and create SAN client
+ this.feignClientFactory = new FeignClientFactory();
+ this.sanFeignClient = feignClientFactory.createClient(SANFeignClient.class, baseURL);
+ }
+
+ public void setOntapStorage(OntapStorage ontapStorage) {
+ this.storage = ontapStorage;
+ }
+
+ @Override
+ public CloudStackVolume createCloudStackVolume(CloudStackVolume cloudstackVolume) {
+ s_logger.info("createCloudStackVolume : Creating Lun with cloudstackVolume request {} ", cloudstackVolume);
+ if (cloudstackVolume == null || cloudstackVolume.getLun() == null) {
+ s_logger.error("createCloudStackVolume: LUN creation failed. Invalid request: {}", cloudstackVolume);
+ throw new CloudRuntimeException("createCloudStackVolume : Failed to create Lun, invalid request");
+ }
+ try {
+ // Get AuthHeader
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ // Create URI for lun creation
+ //TODO: It is possible that Lun creation will take time and we may need to handle through async job.
+ OntapResponse createdLun = sanFeignClient.createLun(authHeader, true, cloudstackVolume.getLun());
+ if (createdLun == null || createdLun.getRecords() == null || createdLun.getRecords().size() == 0) {
+ s_logger.error("createCloudStackVolume: LUN creation failed for Lun {}", cloudstackVolume.getLun().getName());
+ throw new CloudRuntimeException("Failed to create Lun: " + cloudstackVolume.getLun().getName());
+ }
+ Lun lun = createdLun.getRecords().get(0);
+ s_logger.debug("createCloudStackVolume: LUN created successfully. Lun: {}", lun);
+ s_logger.info("createCloudStackVolume: LUN created successfully. LunName: {}", lun.getName());
+
+ CloudStackVolume createdCloudStackVolume = new CloudStackVolume();
+ createdCloudStackVolume.setLun(lun);
+ return createdCloudStackVolume;
+ } catch (FeignException e) {
+ s_logger.error("FeignException occurred while creating LUN: {}, Status: {}, Exception: {}",
+ cloudstackVolume.getLun().getName(), e.status(), e.getMessage());
+ throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while creating LUN: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage());
+ throw new CloudRuntimeException("Failed to create Lun: " + e.getMessage());
+ }
+ }
+
+ @Override
+ CloudStackVolume updateCloudStackVolume(CloudStackVolume cloudstackVolume) {
+ //TODO
+ return null;
+ }
+
+ @Override
+ public void deleteCloudStackVolume(CloudStackVolume cloudstackVolume) {
+ if (cloudstackVolume == null || cloudstackVolume.getLun() == null) {
+ s_logger.error("deleteCloudStackVolume: Lun deletion failed. Invalid request: {}", cloudstackVolume);
+ throw new CloudRuntimeException("deleteCloudStackVolume : Failed to delete Lun, invalid request");
+ }
+ s_logger.info("deleteCloudStackVolume : Deleting Lun: {}", cloudstackVolume.getLun().getName());
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ Map queryParams = Map.of("allow_delete_while_mapped", "true");
+ try {
+ sanFeignClient.deleteLun(authHeader, cloudstackVolume.getLun().getUuid(), queryParams);
+ } catch (FeignException feignEx) {
+ if (feignEx.status() == 404) {
+ s_logger.warn("deleteCloudStackVolume: Lun {} does not exist (status 404), skipping deletion", cloudstackVolume.getLun().getName());
+ return;
+ }
+ throw feignEx;
+ }
+ s_logger.info("deleteCloudStackVolume: Lun deleted successfully. LunName: {}", cloudstackVolume.getLun().getName());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while deleting Lun: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage());
+ throw new CloudRuntimeException("Failed to delete Lun: " + e.getMessage());
+ }
+ }
+
+ @Override
+ public void copyCloudStackVolume(CloudStackVolume cloudstackVolume) {
+ if (cloudstackVolume == null || cloudstackVolume.getLun() == null) {
+ s_logger.error("copyCloudStackVolume: Lun clone creation failed. Invalid request: {}", cloudstackVolume);
+ throw new CloudRuntimeException("copyCloudStackVolume : Failed to create Lun clone, invalid request");
+ }
+ s_logger.debug("copyCloudStackVolume: Creating clone of the cloudstack volume: {}", cloudstackVolume.getLun().getName());
+
+ try {
+ // Get AuthHeader
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ // Create URI for lun clone creation
+ Lun lunCloneRequest = cloudstackVolume.getLun();
+ Lun.Clone clone = new Lun.Clone();
+ Lun.Source source = new Lun.Source();
+ source.setName(cloudstackVolume.getLun().getName());
+ clone.setSource(source);
+ lunCloneRequest.setClone(clone);
+ String lunCloneName = cloudstackVolume.getLun().getName() + "_clone";
+ lunCloneRequest.setName(lunCloneName);
+ sanFeignClient.createLun(authHeader, true, lunCloneRequest);
+ } catch (FeignException e) {
+ s_logger.error("FeignException occurred while creating Lun clone: {}, Status: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.status(), e.getMessage());
+ throw new CloudRuntimeException("Failed to create Lun clone: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while creating Lun clone: {}, Exception: {}", cloudstackVolume.getLun().getName(), e.getMessage());
+ throw new CloudRuntimeException("Failed to create Lun clone: " + e.getMessage());
+ }
+ }
+
+ @Override
+ public CloudStackVolume getCloudStackVolume(Map values) {
+ s_logger.info("getCloudStackVolume : fetching Lun");
+ s_logger.debug("getCloudStackVolume : fetching Lun with params {} ", values);
+ if (values == null || values.isEmpty()) {
+ s_logger.error("getCloudStackVolume: get Lun failed. Invalid request: {}", values);
+ throw new CloudRuntimeException("getCloudStackVolume : get Lun Failed, invalid request");
+ }
+ String svmName = values.get(Constants.SVM_DOT_NAME);
+ String lunName = values.get(Constants.NAME);
+ if (svmName == null || lunName == null || svmName.isEmpty() || lunName.isEmpty()) {
+ s_logger.error("getCloudStackVolume: get Lun failed. Invalid svm:{} or Lun name: {}", svmName, lunName);
+ throw new CloudRuntimeException("getCloudStackVolume : Failed to get Lun, invalid request");
+ }
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ Map queryParams = Map.of(Constants.SVM_DOT_NAME, svmName, Constants.NAME, lunName);
+ OntapResponse lunResponse = sanFeignClient.getLunResponse(authHeader, queryParams);
+ if (lunResponse == null || lunResponse.getRecords() == null || lunResponse.getRecords().isEmpty()) {
+ s_logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found. Returning null.", lunName, svmName);
+ return null;
+ }
+ Lun lun = lunResponse.getRecords().get(0);
+ s_logger.debug("getCloudStackVolume: Lun Details : {}", lun);
+ s_logger.info("getCloudStackVolume: Fetched the Lun successfully. LunName: {}", lun.getName());
+
+ CloudStackVolume cloudStackVolume = new CloudStackVolume();
+ cloudStackVolume.setLun(lun);
+ return cloudStackVolume;
+ } catch (FeignException e) {
+ if (e.status() == 404) {
+ s_logger.warn("getCloudStackVolume: Lun '{}' on SVM '{}' not found (status 404). Returning null.", lunName, svmName);
+ return null;
+ }
+ s_logger.error("FeignException occurred while fetching Lun, Status: {}, Exception: {}", e.status(), e.getMessage());
+ throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while fetching Lun, Exception: {}", e.getMessage());
+ throw new CloudRuntimeException("Failed to fetch Lun details: " + e.getMessage());
+ }
+ }
+
+ @Override
+ public AccessGroup createAccessGroup(AccessGroup accessGroup) {
+ s_logger.info("createAccessGroup : Create Igroup");
+ String igroupName = "unknown";
+ s_logger.debug("createAccessGroup : Creating Igroup with access group request {} ", accessGroup);
+ if (accessGroup == null) {
+ s_logger.error("createAccessGroup: Igroup creation failed. Invalid request: {}", accessGroup);
+ throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid request");
+ }
+ try {
+ // Get StoragePool details
+ if (accessGroup.getPrimaryDataStoreInfo() == null || accessGroup.getPrimaryDataStoreInfo().getDetails() == null
+ || accessGroup.getPrimaryDataStoreInfo().getDetails().isEmpty()) {
+ throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, invalid datastore details in the request");
+ }
+ Map dataStoreDetails = accessGroup.getPrimaryDataStoreInfo().getDetails();
+ s_logger.debug("createAccessGroup: Successfully fetched datastore details.");
+
+ // Get AuthHeader
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+
+ // Generate Igroup request
+ Igroup igroupRequest = new Igroup();
+ List hostsIdentifier = new ArrayList<>();
+ String svmName = dataStoreDetails.get(Constants.SVM_NAME);
+ String storagePoolUuid = accessGroup.getPrimaryDataStoreInfo().getUuid();
+ igroupName = Utility.getIgroupName(svmName, storagePoolUuid);
+ Hypervisor.HypervisorType hypervisorType = accessGroup.getPrimaryDataStoreInfo().getHypervisor();
+
+ ProtocolType protocol = ProtocolType.valueOf(dataStoreDetails.get(Constants.PROTOCOL));
+ // Check if all hosts support the protocol
+ if (accessGroup.getHostsToConnect() == null || accessGroup.getHostsToConnect().isEmpty()) {
+ throw new CloudRuntimeException("createAccessGroup : Failed to create Igroup, no hosts to connect provided in the request");
+ }
+ if (!validateProtocolSupportAndFetchHostsIdentifier(accessGroup.getHostsToConnect(), protocol, hostsIdentifier)) {
+ String errMsg = "createAccessGroup: Not all hosts in the " + accessGroup.getScope().getScopeType().toString() + " support the protocol: " + protocol.name();
+ throw new CloudRuntimeException(errMsg);
+ }
+
+ if (svmName != null && !svmName.isEmpty()) {
+ Svm svm = new Svm();
+ svm.setName(svmName);
+ igroupRequest.setSvm(svm);
+ }
+
+ if (igroupName != null && !igroupName.isEmpty()) {
+ igroupRequest.setName(igroupName);
+ }
+
+// if (hypervisorType != null) {
+// String hypervisorName = hypervisorType.name();
+// igroupRequest.setOsType(Igroup.OsTypeEnum.valueOf(Utility.getOSTypeFromHypervisor(hypervisorName)));
+// } else if ( accessGroup.getScope().getScopeType() == ScopeType.ZONE) {
+// igroupRequest.setOsType(Igroup.OsTypeEnum.linux); // TODO: Defaulting to LINUX for zone scope for now, this has to be revisited when we support other hypervisors
+// }
+ igroupRequest.setOsType(Igroup.OsTypeEnum.linux);
+
+ if (hostsIdentifier != null && hostsIdentifier.size() > 0) {
+ List initiators = new ArrayList<>();
+ for (String hostIdentifier : hostsIdentifier) {
+ Initiator initiator = new Initiator();
+ initiator.setName(hostIdentifier);
+ initiators.add(initiator);
+ }
+ igroupRequest.setInitiators(initiators);
+ }
+ igroupRequest.setProtocol(Igroup.ProtocolEnum.valueOf(Constants.ISCSI));
+ // Create Igroup
+ s_logger.debug("createAccessGroup: About to call sanFeignClient.createIgroup with igroupName: {}", igroupName);
+ AccessGroup createdAccessGroup = new AccessGroup();
+ OntapResponse createdIgroup = null;
+ try {
+ createdIgroup = sanFeignClient.createIgroup(authHeader, true, igroupRequest);
+ } catch (FeignException feignEx) {
+ if (feignEx.status() == 409) {
+ s_logger.warn("createAccessGroup: Igroup with name {} already exists (status 409). Fetching existing Igroup.", igroupName);
+ // TODO: Currently we aren't doing anything with the returned AccessGroup object, so, haven't added code here to fetch the existing Igroup and set it in AccessGroup.
+ return createdAccessGroup;
+ }
+ s_logger.error("createAccessGroup: FeignException during Igroup creation: Status: {}, Exception: {}", feignEx.status(), feignEx.getMessage(), feignEx);
+ throw feignEx;
+ }
+
+ s_logger.debug("createAccessGroup: createdIgroup: {}", createdIgroup);
+ s_logger.debug("createAccessGroup: createdIgroup Records: {}", createdIgroup.getRecords());
+ if (createdIgroup == null || createdIgroup.getRecords() == null || createdIgroup.getRecords().isEmpty()) {
+ s_logger.error("createAccessGroup: Igroup creation failed for Igroup Name {}", igroupName);
+ throw new CloudRuntimeException("Failed to create Igroup: " + igroupName);
+ }
+ Igroup igroup = createdIgroup.getRecords().get(0);
+ s_logger.debug("createAccessGroup: Successfully extracted igroup from response: {}", igroup);
+ s_logger.info("createAccessGroup: Igroup created successfully. IgroupName: {}", igroup.getName());
+
+ createdAccessGroup.setIgroup(igroup);
+ s_logger.debug("createAccessGroup: Returning createdAccessGroup");
+ return createdAccessGroup;
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while creating Igroup: {}, Exception: {}", igroupName, e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to create Igroup: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public void deleteAccessGroup(AccessGroup accessGroup) {
+ s_logger.info("deleteAccessGroup: Deleting iGroup");
+
+ if (accessGroup == null) {
+ throw new CloudRuntimeException("deleteAccessGroup: Invalid accessGroup object - accessGroup is null");
+ }
+
+ // Get PrimaryDataStoreInfo from accessGroup
+ PrimaryDataStoreInfo primaryDataStoreInfo = accessGroup.getPrimaryDataStoreInfo();
+ if (primaryDataStoreInfo == null) {
+ throw new CloudRuntimeException("deleteAccessGroup: PrimaryDataStoreInfo is null in accessGroup");
+ }
+
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+
+ // Extract SVM name from storage (already initialized in constructor via OntapStorage)
+ String svmName = storage.getSvmName();
+ String storagePoolUuid = primaryDataStoreInfo.getUuid();
+
+ // Determine scope and generate iGroup name
+ String igroupName = Utility.getIgroupName(svmName, storagePoolUuid);
+ s_logger.info("deleteAccessGroup: Generated iGroup name '{}'", igroupName);
+ if (primaryDataStoreInfo.getClusterId() != null) {
+ igroupName = Utility.getIgroupName(svmName, storagePoolUuid);
+ s_logger.info("deleteAccessGroup: Deleting cluster-scoped iGroup '{}'", igroupName);
+ } else {
+ igroupName = Utility.getIgroupName(svmName, storagePoolUuid);
+ s_logger.info("deleteAccessGroup: Deleting zone-scoped iGroup '{}'", igroupName);
+ }
+
+ // Get the iGroup to retrieve its UUID
+ Map igroupParams = Map.of(
+ Constants.SVM_DOT_NAME, svmName,
+ Constants.NAME, igroupName
+ );
+
+ try {
+ OntapResponse igroupResponse = sanFeignClient.getIgroupResponse(authHeader, igroupParams);
+ if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().isEmpty()) {
+ s_logger.warn("deleteAccessGroup: iGroup '{}' not found, may have been already deleted", igroupName);
+ return;
+ }
+
+ Igroup igroup = igroupResponse.getRecords().get(0);
+ String igroupUuid = igroup.getUuid();
+
+ if (igroupUuid == null || igroupUuid.isEmpty()) {
+ throw new CloudRuntimeException("deleteAccessGroup: iGroup UUID is null or empty for iGroup: " + igroupName);
+ }
+
+ s_logger.info("deleteAccessGroup: Deleting iGroup '{}' with UUID '{}'", igroupName, igroupUuid);
+
+ // Delete the iGroup using the UUID
+ sanFeignClient.deleteIgroup(authHeader, igroupUuid);
+
+ s_logger.info("deleteAccessGroup: Successfully deleted iGroup '{}'", igroupName);
+
+ } catch (FeignException e) {
+ if (e.status() == 404) {
+ s_logger.warn("deleteAccessGroup: iGroup '{}' does not exist (status 404), skipping deletion", igroupName);
+ } else {
+ s_logger.error("deleteAccessGroup: FeignException occurred: Status: {}, Exception: {}", e.status(), e.getMessage(), e);
+ throw e;
+ }
+ } catch (Exception e) {
+ s_logger.error("deleteAccessGroup: Exception occurred: {}", e.getMessage(), e);
+ throw e;
+ }
+
+ } catch (FeignException e) {
+ s_logger.error("deleteAccessGroup: FeignException occurred while deleting iGroup. Status: {}, Exception: {}", e.status(), e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e);
+ } catch (Exception e) {
+ s_logger.error("deleteAccessGroup: Failed to delete iGroup. Exception: {}", e.getMessage(), e);
+ throw new CloudRuntimeException("Failed to delete iGroup: " + e.getMessage(), e);
+ }
+ }
+
+ private boolean validateProtocolSupportAndFetchHostsIdentifier(List hosts, ProtocolType protocolType, List hostIdentifiers) {
+ switch (protocolType) {
+ case ISCSI:
+ String protocolPrefix = Constants.IQN;
+ for (HostVO host : hosts) {
+ if (host == null || host.getStorageUrl() == null || host.getStorageUrl().trim().isEmpty()
+ || !host.getStorageUrl().startsWith(protocolPrefix)) {
+ return false;
+ }
+ hostIdentifiers.add(host.getStorageUrl());
+ }
+ break;
+ default:
+ throw new CloudRuntimeException("validateProtocolSupportAndFetchHostsIdentifier : Unsupported protocol: " + protocolType.name());
+ }
+ s_logger.info("validateProtocolSupportAndFetchHostsIdentifier: All hosts support the protocol: " + protocolType.name());
+ return true;
+ }
+
+ @Override
+ public AccessGroup updateAccessGroup(AccessGroup accessGroup) {
+ //TODO
+ return null;
+ }
+
+ @Override
+ public AccessGroup getAccessGroup(Map values) {
+ s_logger.info("getAccessGroup : fetch Igroup");
+ s_logger.debug("getAccessGroup : fetching Igroup with params {} ", values);
+ if (values == null || values.isEmpty()) {
+ s_logger.error("getAccessGroup: get Igroup failed. Invalid request: {}", values);
+ throw new CloudRuntimeException("getAccessGroup : get Igroup Failed, invalid request");
+ }
+ String svmName = values.get(Constants.SVM_DOT_NAME);
+ String igroupName = values.get(Constants.NAME);
+ if (svmName == null || igroupName == null || svmName.isEmpty() || igroupName.isEmpty()) {
+ s_logger.error("getAccessGroup: get Igroup failed. Invalid svm:{} or igroup name: {}", svmName, igroupName);
+ throw new CloudRuntimeException("getAccessGroup : Failed to get Igroup, invalid request");
+ }
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ Map queryParams = Map.of(Constants.SVM_DOT_NAME, svmName, Constants.NAME, igroupName, Constants.FIELDS, Constants.INITIATORS);
+ OntapResponse igroupResponse = sanFeignClient.getIgroupResponse(authHeader, queryParams);
+ if (igroupResponse == null || igroupResponse.getRecords() == null || igroupResponse.getRecords().isEmpty()) {
+ s_logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}'. Returning null.", igroupName, svmName);
+ return null;
+ }
+ Igroup igroup = igroupResponse.getRecords().get(0);
+ AccessGroup accessGroup = new AccessGroup();
+ accessGroup.setIgroup(igroup);
+ return accessGroup;
+ } catch (FeignException e) {
+ if (e.status() == 404) {
+ s_logger.warn("getAccessGroup: Igroup '{}' not found on SVM '{}' (status 404). Returning null.", igroupName, svmName);
+ return null;
+ }
+ s_logger.error("FeignException occurred while fetching Igroup, Status: {}, Exception: {}", e.status(), e.getMessage());
+ throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while fetching Igroup, Exception: {}", e.getMessage());
+ throw new CloudRuntimeException("Failed to fetch Igroup details: " + e.getMessage());
+ }
+ }
+
+ public Map enableLogicalAccess(Map values) {
+ s_logger.info("enableLogicalAccess : Create LunMap");
+ s_logger.debug("enableLogicalAccess : Creating LunMap with values {} ", values);
+ Map response = null;
+ if (values == null) {
+ s_logger.error("enableLogicalAccess: LunMap creation failed. Invalid request values: null");
+ throw new CloudRuntimeException("enableLogicalAccess : Failed to create LunMap, invalid request");
+ }
+ String svmName = values.get(Constants.SVM_DOT_NAME);
+ String lunName = values.get(Constants.LUN_DOT_NAME);
+ String igroupName = values.get(Constants.IGROUP_DOT_NAME);
+ if (svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) {
+ s_logger.error("enableLogicalAccess: LunMap creation failed. Invalid request values: {}", values);
+ throw new CloudRuntimeException("enableLogicalAccess : Failed to create LunMap, invalid request");
+ }
+ try {
+ // Get AuthHeader
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ // Create LunMap
+ LunMap lunMapRequest = new LunMap();
+ Svm svm = new Svm();
+ svm.setName(svmName);
+ lunMapRequest.setSvm(svm);
+ //Set Lun name
+ Lun lun = new Lun();
+ lun.setName(lunName);
+ lunMapRequest.setLun(lun);
+ //Set Igroup name
+ Igroup igroup = new Igroup();
+ igroup.setName(igroupName);
+ lunMapRequest.setIgroup(igroup);
+ try {
+ sanFeignClient.createLunMap(authHeader, true, lunMapRequest);
+ } catch (Exception feignEx) {
+ String errMsg = feignEx.getMessage();
+ if (errMsg != null && errMsg.contains(("LUN already mapped to this group"))) {
+ s_logger.warn("enableLogicalAccess: LunMap for Lun: {} and igroup: {} already exists.", lunName, igroupName);
+ } else {
+ s_logger.error("enableLogicalAccess: Exception during Feign call: {}", feignEx.getMessage(), feignEx);
+ throw feignEx;
+ }
+ }
+ // Get the LunMap details
+ OntapResponse lunMapResponse = null;
+ try {
+ lunMapResponse = sanFeignClient.getLunMapResponse(authHeader,
+ Map.of(
+ Constants.SVM_DOT_NAME, svmName,
+ Constants.LUN_DOT_NAME, lunName,
+ Constants.IGROUP_DOT_NAME, igroupName,
+ Constants.FIELDS, Constants.LOGICAL_UNIT_NUMBER
+ ));
+ response = Map.of(
+ Constants.LOGICAL_UNIT_NUMBER, lunMapResponse.getRecords().get(0).getLogicalUnitNumber().toString()
+ );
+ } catch (Exception e) {
+ s_logger.error("enableLogicalAccess: Failed to fetch LunMap details for Lun: {} and igroup: {}, Exception: {}", lunName, igroupName, e);
+ throw new CloudRuntimeException("Failed to fetch LunMap details for Lun: " + lunName + " and igroup: " + igroupName);
+ }
+ s_logger.debug("enableLogicalAccess: LunMap created successfully, LunMap: {}", lunMapResponse.getRecords().get(0));
+ s_logger.info("enableLogicalAccess: LunMap created successfully.");
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while creating LunMap", e);
+ throw new CloudRuntimeException("Failed to create LunMap: " + e.getMessage());
+ }
+ return response;
+ }
+
+ public void disableLogicalAccess(Map values) {
+ s_logger.info("disableLogicalAccess : Delete LunMap");
+ s_logger.debug("disableLogicalAccess : Deleting LunMap with values {} ", values);
+ if (values == null) {
+ s_logger.error("disableLogicalAccess: LunMap deletion failed. Invalid request values: null");
+ throw new CloudRuntimeException("disableLogicalAccess : Failed to delete LunMap, invalid request");
+ }
+ String lunUUID = values.get(Constants.LUN_DOT_UUID);
+ String igroupUUID = values.get(Constants.IGROUP_DOT_UUID);
+ if (lunUUID == null || igroupUUID == null || lunUUID.isEmpty() || igroupUUID.isEmpty()) {
+ s_logger.error("disableLogicalAccess: LunMap deletion failed. Invalid request values: {}", values);
+ throw new CloudRuntimeException("disableLogicalAccess : Failed to delete LunMap, invalid request");
+ }
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ sanFeignClient.deleteLunMap(authHeader, lunUUID, igroupUUID);
+ s_logger.info("disableLogicalAccess: LunMap deleted successfully.");
+ } catch (FeignException e) {
+ if (e.status() == 404) {
+ s_logger.warn("disableLogicalAccess: LunMap with Lun UUID: {} and igroup UUID: {} does not exist, skipping deletion", lunUUID, igroupUUID);
+ return;
+ }
+ s_logger.error("FeignException occurred while deleting LunMap, Status: {}, Exception: {}", e.status(), e.getMessage());
+ throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage());
+ } catch (Exception e) {
+ s_logger.error("Exception occurred while deleting LunMap, Exception: {}", e.getMessage());
+ throw new CloudRuntimeException("Failed to delete LunMap: " + e.getMessage());
+ }
+ }
+
+ // GET-only helper: fetch LUN-map and return logical unit number if it exists; otherwise return null
+ public Map getLogicalAccess(Map values) {
+ s_logger.info("getLogicalAccess : Fetch LunMap");
+ s_logger.debug("getLogicalAccess : Fetching LunMap with values {} ", values);
+ if (values == null) {
+ s_logger.error("getLogicalAccess: Invalid request values: null");
+ throw new CloudRuntimeException("getLogicalAccess : Invalid request");
+ }
+ String svmName = values.get(Constants.SVM_DOT_NAME);
+ String lunName = values.get(Constants.LUN_DOT_NAME);
+ String igroupName = values.get(Constants.IGROUP_DOT_NAME);
+ if (svmName == null || lunName == null || igroupName == null || svmName.isEmpty() || lunName.isEmpty() || igroupName.isEmpty()) {
+ s_logger.error("getLogicalAccess: Invalid request values: {}", values);
+ throw new CloudRuntimeException("getLogicalAccess : Invalid request");
+ }
+ try {
+ String authHeader = Utility.generateAuthHeader(storage.getUsername(), storage.getPassword());
+ OntapResponse lunMapResponse = sanFeignClient.getLunMapResponse(authHeader,
+ Map.of(
+ Constants.SVM_DOT_NAME, svmName,
+ Constants.LUN_DOT_NAME, lunName,
+ Constants.IGROUP_DOT_NAME, igroupName,
+ Constants.FIELDS, Constants.LOGICAL_UNIT_NUMBER
+ ));
+ if (lunMapResponse != null && lunMapResponse.getRecords() != null && !lunMapResponse.getRecords().isEmpty()) {
+ String lunNumber = lunMapResponse.getRecords().get(0).getLogicalUnitNumber() != null ?
+ lunMapResponse.getRecords().get(0).getLogicalUnitNumber().toString() : null;
+ return lunNumber != null ? Map.of(Constants.LOGICAL_UNIT_NUMBER, lunNumber) : null;
+ }
+ } catch (Exception e) {
+ s_logger.warn("getLogicalAccess: LunMap not found for Lun: {} and igroup: {} ({}).", lunName, igroupName, e.getMessage());
+ }
+ return null;
+ }
+
+ @Override
+ public String ensureLunMapped(String svmName, String lunName, String accessGroupName) {
+ s_logger.info("ensureLunMapped: Ensuring LUN [{}] is mapped to igroup [{}] on SVM [{}]", lunName, accessGroupName, svmName);
+
+ // Check existing map first
+ Map getMap = Map.of(
+ Constants.LUN_DOT_NAME, lunName,
+ Constants.SVM_DOT_NAME, svmName,
+ Constants.IGROUP_DOT_NAME, accessGroupName
+ );
+ Map mapResp = getLogicalAccess(getMap);
+ if (mapResp != null && mapResp.containsKey(Constants.LOGICAL_UNIT_NUMBER)) {
+ String lunNumber = mapResp.get(Constants.LOGICAL_UNIT_NUMBER);
+ s_logger.info("ensureLunMapped: Existing LunMap found for LUN [{}] in igroup [{}] with LUN number [{}]", lunName, accessGroupName, lunNumber);
+ return lunNumber;
+ }
+
+ // Create if not exists
+ Map enableMap = Map.of(
+ Constants.LUN_DOT_NAME, lunName,
+ Constants.SVM_DOT_NAME, svmName,
+ Constants.IGROUP_DOT_NAME, accessGroupName
+ );
+ Map response = enableLogicalAccess(enableMap);
+ if (response == null || !response.containsKey(Constants.LOGICAL_UNIT_NUMBER)) {
+ throw new CloudRuntimeException("ensureLunMapped: Failed to map LUN [" + lunName + "] to iGroup [" + accessGroupName + "]");
+ }
+ s_logger.info("ensureLunMapped: Successfully mapped LUN [{}] to igroup [{}] with LUN number [{}]", lunName, accessGroupName, response.get(Constants.LOGICAL_UNIT_NUMBER));
+ return response.get(Constants.LOGICAL_UNIT_NUMBER);
+ }
+
+ @Override
+ public boolean validateInitiatorInAccessGroup(String hostInitiator, String svmName, String accessGroupName) {
+ s_logger.info("validateInitiatorInAccessGroup: Validating initiator [{}] is in igroup [{}] on SVM [{}]", hostInitiator, accessGroupName, svmName);
+
+ if (hostInitiator == null || hostInitiator.isEmpty()) {
+ s_logger.warn("validateInitiatorInAccessGroup: host initiator is null or empty");
+ return false;
+ }
+
+ Map getAccessGroupMap = Map.of(
+ Constants.NAME, accessGroupName,
+ Constants.SVM_DOT_NAME, svmName
+ );
+ AccessGroup accessGroup = getAccessGroup(getAccessGroupMap);
+ if (accessGroup == null || accessGroup.getIgroup() == null) {
+ s_logger.warn("validateInitiatorInAccessGroup: iGroup [{}] not found on SVM [{}]", accessGroupName, svmName);
+ return false;
+ }
+
+ Igroup igroup = accessGroup.getIgroup();
+ if (igroup.getInitiators() != null) {
+ for (Initiator initiator : igroup.getInitiators()) {
+ if (initiator.getName().equalsIgnoreCase(hostInitiator)) {
+ s_logger.info("validateInitiatorInAccessGroup: Initiator [{}] validated successfully in igroup [{}]", hostInitiator, accessGroupName);
+ return true;
+ }
+ }
+ }
+ s_logger.warn("validateInitiatorInAccessGroup: Initiator [{}] NOT found in igroup [{}]", hostInitiator, accessGroupName);
+ return false;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
new file mode 100755
index 000000000000..9ff80e7cf8a9
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/AccessGroup.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service.model;
+
+import com.cloud.host.HostVO;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.Scope;
+import org.apache.cloudstack.storage.feign.model.ExportPolicy;
+import org.apache.cloudstack.storage.feign.model.Igroup;
+
+import java.util.List;
+
+public class AccessGroup {
+
+ private Igroup igroup;
+ private ExportPolicy exportPolicy;
+
+ private List hostsToConnect;
+ private PrimaryDataStoreInfo primaryDataStoreInfo;
+ private Scope scope;
+
+
+ public Igroup getIgroup() {
+ return igroup;
+ }
+
+ public void setIgroup(Igroup igroup) {
+ this.igroup = igroup;
+ }
+
+ public ExportPolicy getPolicy() {
+ return exportPolicy;
+ }
+
+ public void setPolicy(ExportPolicy policy) {
+ this.exportPolicy = policy;
+ }
+ public List getHostsToConnect() {
+ return hostsToConnect;
+ }
+ public void setHostsToConnect(List hostsToConnect) {
+ this.hostsToConnect = hostsToConnect;
+ }
+ public PrimaryDataStoreInfo getPrimaryDataStoreInfo() {
+ return primaryDataStoreInfo;
+ }
+ public void setPrimaryDataStoreInfo(PrimaryDataStoreInfo primaryDataStoreInfo) {
+ this.primaryDataStoreInfo = primaryDataStoreInfo;
+ }
+ public Scope getScope() {
+ return scope;
+ }
+ public void setScope(Scope scope) {
+ this.scope = scope;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java
new file mode 100644
index 000000000000..6c51e4630800
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/CloudStackVolume.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service.model;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.storage.feign.model.FileInfo;
+import org.apache.cloudstack.storage.feign.model.Lun;
+
+public class CloudStackVolume {
+
+ private FileInfo file;
+ private Lun lun;
+ private String datastoreId;
+ private DataObject volumeInfo; // This is needed as we need DataObject to be passed to agent to create volume
+ public FileInfo getFile() {
+ return file;
+ }
+
+ public void setFile(FileInfo file) {
+ this.file = file;
+ }
+
+ public Lun getLun() {
+ return lun;
+ }
+
+ public void setLun(Lun lun) {
+ this.lun = lun;
+ }
+ public String getDatastoreId() {
+ return datastoreId;
+ }
+ public void setDatastoreId(String datastoreId) {
+ this.datastoreId = datastoreId;
+ }
+ public DataObject getVolumeInfo() {
+ return volumeInfo;
+ }
+ public void setVolumeInfo(DataObject volumeInfo) {
+ this.volumeInfo = volumeInfo;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java
new file mode 100644
index 000000000000..00dca62480dc
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/service/model/ProtocolType.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.service.model;
+
+public enum ProtocolType {
+ NFS3,
+ ISCSI
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
new file mode 100644
index 000000000000..5e8729ad1917
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Constants.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.utils;
+
+
+public class Constants {
+
+ public static final String ONTAP_PLUGIN_NAME = "NetApp ONTAP";
+ public static final int NFS3_PORT = 2049;
+ public static final int ISCSI_PORT = 3260;
+
+ public static final String NFS = "nfs";
+ public static final String ISCSI = "iscsi";
+ public static final String SIZE = "size";
+ public static final String PROTOCOL = "protocol";
+ public static final String SVM_NAME = "svmName";
+ public static final String USERNAME = "username";
+ public static final String PASSWORD = "password";
+ public static final String DATA_LIF = "dataLIF";
+ public static final String STORAGE_IP = "storageIP";
+ public static final String VOLUME_NAME = "volumeName";
+ public static final String VOLUME_UUID = "volumeUUID";
+ public static final String EXPORT_POLICY_ID = "exportPolicyId";
+ public static final String EXPORT_POLICY_NAME = "exportPolicyName";
+ public static final String IS_DISAGGREGATED = "isDisaggregated";
+ public static final String RUNNING = "running";
+ public static final String EXPORT = "export";
+
+ public static final int ONTAP_PORT = 443;
+
+ public static final String JOB_RUNNING = "running";
+ public static final String JOB_QUEUE = "queued";
+ public static final String JOB_PAUSED = "paused";
+ public static final String JOB_FAILURE = "failure";
+ public static final String JOB_SUCCESS = "success";
+
+ public static final String TRUE = "true";
+ public static final String FALSE = "false";
+
+ // Query params
+ public static final String NAME = "name";
+ public static final String FIELDS = "fields";
+ public static final String INITIATORS = "initiators";
+ public static final String AGGREGATES = "aggregates";
+ public static final String STATE = "state";
+ public static final String DATA_NFS = "data_nfs";
+ public static final String DATA_ISCSI = "data_iscsi";
+ public static final String IP_ADDRESS = "ip.address";
+ public static final String SERVICES = "services";
+ public static final String RETURN_RECORDS = "return_records";
+
+ public static final int JOB_MAX_RETRIES = 100;
+ public static final int CREATE_VOLUME_CHECK_SLEEP_TIME = 2000;
+
+ public static final String SLASH = "/";
+ public static final String EQUALS = "=";
+ public static final String SEMICOLON = ";";
+ public static final String COMMA = ",";
+ public static final String HYPHEN = "-";
+
+ public static final String VOLUME_PATH_PREFIX = "/vol/";
+
+ public static final String ONTAP_NAME_REGEX = "^[a-zA-Z][a-zA-Z0-9_]*$";
+ public static final String KVM = "KVM";
+
+ public static final String HTTPS = "https://";
+ public static final String SVM_DOT_NAME = "svm.name";
+ public static final String LUN_DOT_NAME = "lun.name";
+ public static final String IQN = "iqn";
+ public static final String LUN_DOT_UUID = "lun.uuid";
+ public static final String LOGICAL_UNIT_NUMBER = "logical_unit_number";
+ public static final String IGROUP_DOT_NAME = "igroup.name";
+ public static final String IGROUP_DOT_UUID = "igroup.uuid";
+ public static final String UNDERSCORE = "_";
+ public static final String CS = "cs";
+}
diff --git a/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
new file mode 100644
index 000000000000..c8f3b924ae22
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/java/org/apache/cloudstack/storage/utils/Utility.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.cloudstack.storage.utils;
+
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.utils.StringUtils;
+import com.cloud.utils.exception.CloudRuntimeException;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataObject;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.feign.model.LunSpace;
+import org.apache.cloudstack.storage.feign.model.OntapStorage;
+import org.apache.cloudstack.storage.feign.model.Svm;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+import org.springframework.util.Base64Utils;
+
+import java.util.Map;
+
+public class Utility {
+
+ private static final Logger s_logger = LogManager.getLogger(Utility.class);
+
+ private static final String BASIC = "Basic";
+ private static final String AUTH_HEADER_COLON = ":";
+
+ /**
+ * Method generates authentication headers using storage backend credentials passed as normal string
+ *
+ * @param username -->> username of the storage backend
+ * @param password -->> normal decoded password of the storage backend
+ * @return
+ */
+ public static String generateAuthHeader (String username, String password) {
+ byte[] encodedBytes = Base64Utils.encode((username + AUTH_HEADER_COLON + password).getBytes());
+ return BASIC + StringUtils.SPACE + new String(encodedBytes);
+ }
+
+ public static CloudStackVolume createCloudStackVolumeRequestByProtocol(StoragePoolVO storagePool, Map details, DataObject volumeObject) {
+ CloudStackVolume cloudStackVolumeRequest = null;
+
+ String protocol = details.get(Constants.PROTOCOL);
+ ProtocolType protocolType = ProtocolType.valueOf(protocol);
+ switch (protocolType) {
+ case NFS3:
+ cloudStackVolumeRequest = new CloudStackVolume();
+ cloudStackVolumeRequest.setDatastoreId(String.valueOf(storagePool.getId()));
+ cloudStackVolumeRequest.setVolumeInfo(volumeObject);
+ break;
+ case ISCSI:
+ Svm svm = new Svm();
+ svm.setName(details.get(Constants.SVM_NAME));
+ cloudStackVolumeRequest = new CloudStackVolume();
+ Lun lunRequest = new Lun();
+ lunRequest.setSvm(svm);
+
+ LunSpace lunSpace = new LunSpace();
+ lunSpace.setSize(volumeObject.getSize());
+ lunRequest.setSpace(lunSpace);
+ //Lun name is full path like in unified "/vol/VolumeName/LunName"
+ String lunName = volumeObject.getName().replace(Constants.HYPHEN, Constants.UNDERSCORE);
+ if(!isValidName(lunName)) {
+ String errMsg = "createAsync: Invalid dataObject name [" + lunName + "]. It must start with a letter and can only contain letters, digits, and underscores, and be up to 200 characters long.";
+ throw new InvalidParameterValueException(errMsg);
+ }
+ String lunFullName = getLunName(storagePool.getName(), lunName);
+ lunRequest.setName(lunFullName);
+
+ String osType = getOSTypeFromHypervisor(storagePool.getHypervisor().name());
+ lunRequest.setOsType(Lun.OsTypeEnum.valueOf(osType));
+
+ cloudStackVolumeRequest.setLun(lunRequest);
+ break;
+ default:
+ throw new CloudRuntimeException("createCloudStackVolumeRequestByProtocol: Unsupported protocol " + protocol);
+
+ }
+ return cloudStackVolumeRequest;
+ }
+
+ public static boolean isValidName(String name) {
+ // Check for null and length constraint first
+ if (name == null || name.length() > 200) {
+ return false;
+ }
+ // Regex: Starts with a letter, followed by letters, digits, or underscores
+ return name.matches(Constants.ONTAP_NAME_REGEX);
+ }
+
+ public static String getOSTypeFromHypervisor(String hypervisorType){
+ switch (hypervisorType) {
+ case Constants.KVM:
+ return Lun.OsTypeEnum.LINUX.name();
+ default:
+ String errMsg = "getOSTypeFromHypervisor : Unsupported hypervisor type " + hypervisorType + " for ONTAP storage";
+ s_logger.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ }
+
+ public static StorageStrategy getStrategyByStoragePoolDetails(Map details) {
+ if (details == null || details.isEmpty()) {
+ s_logger.error("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
+ throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Storage pool details are null or empty");
+ }
+ String protocol = details.get(Constants.PROTOCOL);
+ OntapStorage ontapStorage = new OntapStorage(details.get(Constants.USERNAME), details.get(Constants.PASSWORD),
+ details.get(Constants.STORAGE_IP), details.get(Constants.SVM_NAME), Long.parseLong(details.get(Constants.SIZE)),
+ ProtocolType.valueOf(protocol));
+ StorageStrategy storageStrategy = StorageProviderFactory.getStrategy(ontapStorage);
+ boolean isValid = storageStrategy.connect();
+ if (isValid) {
+ s_logger.info("Connection to Ontap SVM [{}] successful", details.get(Constants.SVM_NAME));
+ return storageStrategy;
+ } else {
+ s_logger.error("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed");
+ throw new CloudRuntimeException("getStrategyByStoragePoolDetails: Connection to Ontap SVM [" + details.get(Constants.SVM_NAME) + "] failed");
+ }
+ }
+
+ public static String getIgroupName(String svmName, String poolUuid) {
+ //Igroup name format: cs_svmName_poolUuid
+ return Constants.CS + Constants.UNDERSCORE + svmName + Constants.UNDERSCORE + poolUuid;
+ }
+
+ public static String generateExportPolicyName(String svmName, String volumeName){
+ return Constants.EXPORT + Constants.HYPHEN + svmName + Constants.HYPHEN + volumeName;
+ }
+
+ public static String getLunName(String volName, String lunName) {
+ //LUN name in ONTAP unified format: "/vol/VolumeName/LunName"
+ return Constants.VOLUME_PATH_PREFIX + volName + Constants.SLASH + lunName;
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/logback-spring.xml b/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/logback-spring.xml
new file mode 100644
index 000000000000..15872c82a64e
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/logback-spring.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+ logs/feign-requests.log
+
+ logs/feign-requests.%d{yyyy-MM-dd}.log
+ 30
+
+
+ %d{yyyy-MM-dd HH:mm:ss} %-5level %logger{36} - %msg%n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/module.properties b/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/module.properties
new file mode 100644
index 000000000000..67fd086eba10
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/module.properties
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+name=storage-volume-ontap
+parent=storage
diff --git a/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/spring-storage-volume-ontap-context.xml b/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/spring-storage-volume-ontap-context.xml
new file mode 100644
index 000000000000..6ab9c46fcf9d
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/main/resources/META-INF/cloudstack/storage-volume-ontap/spring-storage-volume-ontap-context.xml
@@ -0,0 +1,33 @@
+
+
+
+
+
+
diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriverTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriverTest.java
new file mode 100644
index 000000000000..d050d379563c
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/driver/OntapPrimaryDatastoreDriverTest.java
@@ -0,0 +1,549 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.driver;
+
+import com.cloud.exception.InvalidParameterValueException;
+import com.cloud.host.Host;
+import com.cloud.storage.ScopeType;
+import com.cloud.storage.Storage;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.VolumeDetailVO;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.storage.dao.VolumeDetailsDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.dao.VMInstanceDao;
+import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
+import org.apache.cloudstack.framework.async.AsyncCompletionCallback;
+import org.apache.cloudstack.storage.command.CommandResult;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.cloudstack.storage.feign.model.Lun;
+import org.apache.cloudstack.storage.service.UnifiedSANStrategy;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import org.apache.cloudstack.storage.service.model.CloudStackVolume;
+import org.apache.cloudstack.storage.service.model.ProtocolType;
+import org.apache.cloudstack.storage.utils.Constants;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.ArgumentCaptor;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.MockedStatic;
+import org.mockito.junit.jupiter.MockitoExtension;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static com.cloud.agent.api.to.DataObjectType.VOLUME;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.argThat;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.mockStatic;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+@ExtendWith(MockitoExtension.class)
+class OntapPrimaryDatastoreDriverTest {
+
+ @Mock
+ private StoragePoolDetailsDao storagePoolDetailsDao;
+
+ @Mock
+ private PrimaryDataStoreDao storagePoolDao;
+
+ @Mock
+ private VMInstanceDao vmDao;
+
+ @Mock
+ private VolumeDao volumeDao;
+
+ @Mock
+ private VolumeDetailsDao volumeDetailsDao;
+
+ @Mock
+ private DataStore dataStore;
+
+ @Mock
+ private VolumeInfo volumeInfo;
+
+ @Mock
+ private StoragePoolVO storagePool;
+
+ @Mock
+ private VolumeVO volumeVO;
+
+ @Mock
+ private Host host;
+
+ @Mock
+ private UnifiedSANStrategy sanStrategy;
+
+ @Mock
+ private AsyncCompletionCallback createCallback;
+
+ @Mock
+ private AsyncCompletionCallback commandCallback;
+
+ @InjectMocks
+ private OntapPrimaryDatastoreDriver driver;
+
+ private Map storagePoolDetails;
+
+ @BeforeEach
+ void setUp() {
+ storagePoolDetails = new HashMap<>();
+ storagePoolDetails.put(Constants.PROTOCOL, ProtocolType.ISCSI.name());
+ storagePoolDetails.put(Constants.SVM_NAME, "svm1");
+ }
+
+ @Test
+ void testGetCapabilities() {
+ Map capabilities = driver.getCapabilities();
+
+ assertNotNull(capabilities);
+ assertEquals(Boolean.FALSE.toString(), capabilities.get("STORAGE_SYSTEM_SNAPSHOT"));
+ assertEquals(Boolean.FALSE.toString(), capabilities.get("CAN_CREATE_VOLUME_FROM_SNAPSHOT"));
+ }
+
+ @Test
+ void testCreateAsync_NullDataObject_ThrowsException() {
+ assertThrows(InvalidParameterValueException.class,
+ () -> driver.createAsync(dataStore, null, createCallback));
+ }
+
+ @Test
+ void testCreateAsync_NullDataStore_ThrowsException() {
+ assertThrows(InvalidParameterValueException.class,
+ () -> driver.createAsync(null, volumeInfo, createCallback));
+ }
+
+ @Test
+ void testCreateAsync_NullCallback_ThrowsException() {
+ assertThrows(InvalidParameterValueException.class,
+ () -> driver.createAsync(dataStore, volumeInfo, null));
+ }
+
+ @Test
+ void testCreateAsync_VolumeWithISCSI_Success() {
+ // Setup
+ when(dataStore.getId()).thenReturn(1L);
+ when(dataStore.getUuid()).thenReturn("pool-uuid-123");
+ when(dataStore.getName()).thenReturn("ontap-pool");
+ when(volumeInfo.getType()).thenReturn(VOLUME);
+ when(volumeInfo.getId()).thenReturn(100L);
+ when(volumeInfo.getName()).thenReturn("test-volume");
+
+ when(storagePoolDao.findById(1L)).thenReturn(storagePool);
+ when(storagePool.getId()).thenReturn(1L);
+ when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+ when(storagePool.getPath()).thenReturn("iqn.1992-08.com.netapp:sn.123456");
+
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
+ when(volumeDao.findById(100L)).thenReturn(volumeVO);
+ when(volumeVO.getId()).thenReturn(100L);
+
+ Lun mockLun = new Lun();
+ mockLun.setName("/vol/vol1/lun1");
+ mockLun.setUuid("lun-uuid-123");
+ CloudStackVolume mockCloudStackVolume = new CloudStackVolume();
+ mockCloudStackVolume.setLun(mockLun);
+
+ try (MockedStatic utilityMock = mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(storagePoolDetails))
+ .thenReturn(sanStrategy);
+ utilityMock.when(() -> Utility.createCloudStackVolumeRequestByProtocol(
+ any(), any(), any())).thenReturn(mockCloudStackVolume);
+ utilityMock.when(() -> Utility.getIgroupName(anyString(), anyString()))
+ .thenReturn("igroup1");
+
+ when(sanStrategy.createCloudStackVolume(any())).thenReturn(mockCloudStackVolume);
+ when(sanStrategy.ensureLunMapped(anyString(), anyString(), anyString())).thenReturn("0");
+
+ // Execute
+ driver.createAsync(dataStore, volumeInfo, createCallback);
+
+ // Verify
+ ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CreateCmdResult.class);
+ verify(createCallback).complete(resultCaptor.capture());
+
+ CreateCmdResult result = resultCaptor.getValue();
+ assertNotNull(result);
+ assertTrue(result.isSuccess());
+
+ verify(volumeDetailsDao).addDetail(eq(100L), eq(Constants.LUN_DOT_UUID), eq("lun-uuid-123"), eq(false));
+ verify(volumeDetailsDao).addDetail(eq(100L), eq(Constants.LUN_DOT_NAME), eq("/vol/vol1/lun1"), eq(false));
+ verify(volumeDao).update(eq(100L), any(VolumeVO.class));
+ }
+ }
+
+ @Test
+ void testCreateAsync_VolumeWithNFS_Success() {
+ // Setup
+ storagePoolDetails.put(Constants.PROTOCOL, ProtocolType.NFS3.name());
+
+ when(dataStore.getId()).thenReturn(1L);
+ when(dataStore.getUuid()).thenReturn("pool-uuid-123");
+ when(dataStore.getName()).thenReturn("ontap-pool");
+ when(volumeInfo.getType()).thenReturn(VOLUME);
+ when(volumeInfo.getId()).thenReturn(100L);
+ when(volumeInfo.getName()).thenReturn("test-volume");
+
+ when(storagePoolDao.findById(1L)).thenReturn(storagePool);
+ when(storagePool.getId()).thenReturn(1L);
+ when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
+ when(volumeDao.findById(100L)).thenReturn(volumeVO);
+ when(volumeVO.getId()).thenReturn(100L);
+
+ CloudStackVolume mockCloudStackVolume = new CloudStackVolume();
+
+ try (MockedStatic utilityMock = mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(storagePoolDetails))
+ .thenReturn(sanStrategy);
+ utilityMock.when(() -> Utility.createCloudStackVolumeRequestByProtocol(
+ any(), any(), any())).thenReturn(mockCloudStackVolume);
+
+ when(sanStrategy.createCloudStackVolume(any())).thenReturn(mockCloudStackVolume);
+
+ // Execute
+ driver.createAsync(dataStore, volumeInfo, createCallback);
+
+ // Verify
+ ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CreateCmdResult.class);
+ verify(createCallback).complete(resultCaptor.capture());
+
+ CreateCmdResult result = resultCaptor.getValue();
+ assertNotNull(result);
+ assertTrue(result.isSuccess());
+ verify(volumeDao).update(eq(100L), any(VolumeVO.class));
+ }
+ }
+
+ @Test
+ void testDeleteAsync_NullStore_ThrowsException() {
+ ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CommandResult.class);
+
+ driver.deleteAsync(null, volumeInfo, commandCallback);
+
+ verify(commandCallback).complete(resultCaptor.capture());
+ CommandResult result = resultCaptor.getValue();
+ assertFalse(result.isSuccess());
+ assertTrue(result.getResult().contains("store or data is null"));
+ }
+
+ @Test
+ void testDeleteAsync_ISCSIVolume_Success() {
+ // Setup
+ when(dataStore.getId()).thenReturn(1L);
+ when(volumeInfo.getType()).thenReturn(VOLUME);
+ when(volumeInfo.getId()).thenReturn(100L);
+
+ when(storagePoolDao.findById(1L)).thenReturn(storagePool);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
+
+ VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, Constants.LUN_DOT_NAME, "/vol/vol1/lun1", false);
+ VolumeDetailVO lunUuidDetail = new VolumeDetailVO(100L, Constants.LUN_DOT_UUID, "lun-uuid-123", false);
+
+ when(volumeDetailsDao.findDetail(100L, Constants.LUN_DOT_NAME)).thenReturn(lunNameDetail);
+ when(volumeDetailsDao.findDetail(100L, Constants.LUN_DOT_UUID)).thenReturn(lunUuidDetail);
+
+ try (MockedStatic utilityMock = mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(storagePoolDetails))
+ .thenReturn(sanStrategy);
+
+ doNothing().when(sanStrategy).deleteCloudStackVolume(any());
+
+ // Execute
+ driver.deleteAsync(dataStore, volumeInfo, commandCallback);
+
+ // Verify
+ ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CommandResult.class);
+ verify(commandCallback).complete(resultCaptor.capture());
+
+ CommandResult result = resultCaptor.getValue();
+ assertNotNull(result);
+ assertTrue(result.isSuccess());
+ verify(sanStrategy).deleteCloudStackVolume(any(CloudStackVolume.class));
+ }
+ }
+
+ @Test
+ void testDeleteAsync_NFSVolume_Success() {
+ // Setup
+ storagePoolDetails.put(Constants.PROTOCOL, ProtocolType.NFS3.name());
+
+ when(dataStore.getId()).thenReturn(1L);
+ when(volumeInfo.getType()).thenReturn(VOLUME);
+
+ when(storagePoolDao.findById(1L)).thenReturn(storagePool);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
+
+ // Execute
+ driver.deleteAsync(dataStore, volumeInfo, commandCallback);
+
+ // Verify
+ ArgumentCaptor resultCaptor = ArgumentCaptor.forClass(CommandResult.class);
+ verify(commandCallback).complete(resultCaptor.capture());
+
+ CommandResult result = resultCaptor.getValue();
+ assertNotNull(result);
+ // NFS deletion doesn't fail, handled by hypervisor
+ }
+
+ @Test
+ void testGrantAccess_NullParameters_ThrowsException() {
+ assertThrows(CloudRuntimeException.class,
+ () -> driver.grantAccess(null, host, dataStore));
+
+ assertThrows(CloudRuntimeException.class,
+ () -> driver.grantAccess(volumeInfo, null, dataStore));
+
+ assertThrows(CloudRuntimeException.class,
+ () -> driver.grantAccess(volumeInfo, host, null));
+ }
+
+ @Test
+ void testGrantAccess_ClusterScope_Success() {
+ // Setup
+ when(dataStore.getId()).thenReturn(1L);
+ when(dataStore.getUuid()).thenReturn("pool-uuid-123");
+ when(volumeInfo.getType()).thenReturn(VOLUME);
+ when(volumeInfo.getId()).thenReturn(100L);
+
+ when(storagePoolDao.findById(1L)).thenReturn(storagePool);
+ when(storagePool.getId()).thenReturn(1L);
+ when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER);
+ when(storagePool.getPath()).thenReturn("iqn.1992-08.com.netapp:sn.123456");
+ when(storagePool.getPoolType()).thenReturn(Storage.StoragePoolType.NetworkFilesystem);
+
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
+ when(volumeDao.findById(100L)).thenReturn(volumeVO);
+ when(volumeVO.getId()).thenReturn(100L);
+
+ when(host.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1");
+
+ VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, Constants.LUN_DOT_NAME, "/vol/vol1/lun1", false);
+ when(volumeDetailsDao.findDetail(100L, Constants.LUN_DOT_NAME)).thenReturn(lunNameDetail);
+
+ try (MockedStatic utilityMock = mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(storagePoolDetails))
+ .thenReturn(sanStrategy);
+ utilityMock.when(() -> Utility.getIgroupName(anyString(), anyString()))
+ .thenReturn("igroup1");
+
+ when(sanStrategy.validateInitiatorInAccessGroup(anyString(), anyString(), anyString()))
+ .thenReturn(true);
+ when(sanStrategy.ensureLunMapped(anyString(), anyString(), anyString())).thenReturn("0");
+
+ // Execute
+ boolean result = driver.grantAccess(volumeInfo, host, dataStore);
+
+ // Verify
+ assertTrue(result);
+ verify(volumeDao).update(eq(100L), any(VolumeVO.class));
+ verify(sanStrategy).validateInitiatorInAccessGroup(anyString(), anyString(), anyString());
+ verify(sanStrategy).ensureLunMapped(anyString(), anyString(), anyString());
+ }
+ }
+
+ @Test
+ void testGrantAccess_InitiatorNotInIgroup_ThrowsException() {
+ // Setup
+ when(dataStore.getId()).thenReturn(1L);
+ when(dataStore.getUuid()).thenReturn("pool-uuid-123");
+ when(volumeInfo.getType()).thenReturn(VOLUME);
+ when(volumeInfo.getId()).thenReturn(100L);
+
+ when(storagePoolDao.findById(1L)).thenReturn(storagePool);
+ when(storagePool.getId()).thenReturn(1L);
+ when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
+ when(volumeDao.findById(100L)).thenReturn(volumeVO);
+ when(volumeVO.getId()).thenReturn(100L);
+
+ when(host.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1");
+
+ VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, Constants.LUN_DOT_NAME, "/vol/vol1/lun1", false);
+ when(volumeDetailsDao.findDetail(100L, Constants.LUN_DOT_NAME)).thenReturn(lunNameDetail);
+
+ try (MockedStatic utilityMock = mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(storagePoolDetails))
+ .thenReturn(sanStrategy);
+ utilityMock.when(() -> Utility.getIgroupName(anyString(), anyString()))
+ .thenReturn("igroup1");
+
+ when(sanStrategy.validateInitiatorInAccessGroup(anyString(), anyString(), anyString()))
+ .thenReturn(false);
+
+ // Execute & Verify
+ CloudRuntimeException exception = assertThrows(CloudRuntimeException.class,
+ () -> driver.grantAccess(volumeInfo, host, dataStore));
+
+ assertTrue(exception.getMessage().contains("is not present in iGroup"));
+ }
+ }
+
+ @Test
+ void testRevokeAccess_VolumeAttachedToRunningVM_SkipsRevoke() {
+ // Setup
+ when(volumeInfo.getType()).thenReturn(VOLUME);
+ when(volumeInfo.getId()).thenReturn(100L);
+
+ VolumeVO mockVolume = mock(VolumeVO.class);
+ when(mockVolume.getInstanceId()).thenReturn(200L);
+ when(volumeDao.findById(100L)).thenReturn(mockVolume);
+
+ VMInstanceVO vm = mock(VMInstanceVO.class);
+ when(vm.getState()).thenReturn(VirtualMachine.State.Running);
+ when(vm.getInstanceName()).thenReturn("i-2-100-VM");
+ when(vmDao.findById(200L)).thenReturn(vm);
+
+ // Execute
+ driver.revokeAccess(volumeInfo, host, dataStore);
+
+ // Verify - should skip revoke for running VM
+ verify(storagePoolDao, never()).findById(anyLong());
+ }
+
+ @Test
+ void testRevokeAccess_ISCSIVolume_Success() {
+ // Setup
+ when(dataStore.getId()).thenReturn(1L);
+ when(volumeInfo.getType()).thenReturn(VOLUME);
+ when(volumeInfo.getId()).thenReturn(100L);
+
+ when(volumeDao.findById(100L)).thenReturn(volumeVO);
+ when(volumeVO.getId()).thenReturn(100L);
+ when(volumeVO.getInstanceId()).thenReturn(null);
+ when(volumeVO.getName()).thenReturn("test-volume");
+
+ when(storagePoolDao.findById(1L)).thenReturn(storagePool);
+ when(storagePool.getId()).thenReturn(1L);
+ when(storagePool.getScope()).thenReturn(ScopeType.CLUSTER);
+ when(storagePool.getUuid()).thenReturn("pool-uuid-123");
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(storagePoolDetails);
+
+ when(host.getStorageUrl()).thenReturn("iqn.1993-08.org.debian:01:host1");
+ when(host.getName()).thenReturn("host1");
+
+ VolumeDetailVO lunNameDetail = new VolumeDetailVO(100L, Constants.LUN_DOT_NAME, "/vol/vol1/lun1", false);
+ when(volumeDetailsDao.findDetail(100L, Constants.LUN_DOT_NAME)).thenReturn(lunNameDetail);
+
+ Lun mockLun = new Lun();
+ mockLun.setName("/vol/vol1/lun1");
+ mockLun.setUuid("lun-uuid-123");
+ CloudStackVolume mockCloudStackVolume = new CloudStackVolume();
+ mockCloudStackVolume.setLun(mockLun);
+
+ org.apache.cloudstack.storage.feign.model.Igroup mockIgroup = mock(org.apache.cloudstack.storage.feign.model.Igroup.class);
+ when(mockIgroup.getName()).thenReturn("igroup1");
+ when(mockIgroup.getUuid()).thenReturn("igroup-uuid-123");
+ AccessGroup mockAccessGroup = new AccessGroup();
+ mockAccessGroup.setIgroup(mockIgroup);
+
+ try (MockedStatic utilityMock = mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(storagePoolDetails))
+ .thenReturn(sanStrategy);
+ utilityMock.when(() -> Utility.getIgroupName(anyString(), anyString()))
+ .thenReturn("igroup1");
+
+ // Mock the methods called by getCloudStackVolumeByName and getAccessGroupByName
+ when(sanStrategy.getCloudStackVolume(argThat(map ->
+ map != null &&
+ "/vol/vol1/lun1".equals(map.get("name")) &&
+ "svm1".equals(map.get("svm.name"))
+ ))).thenReturn(mockCloudStackVolume);
+
+ when(sanStrategy.getAccessGroup(argThat(map ->
+ map != null &&
+ "igroup1".equals(map.get("name")) &&
+ "svm1".equals(map.get("svm.name"))
+ ))).thenReturn(mockAccessGroup);
+
+ when(sanStrategy.validateInitiatorInAccessGroup(
+ eq("iqn.1993-08.org.debian:01:host1"),
+ eq("svm1"),
+ eq("igroup1")
+ )).thenReturn(true);
+
+ doNothing().when(sanStrategy).disableLogicalAccess(argThat(map ->
+ map != null &&
+ "lun-uuid-123".equals(map.get("lun.uuid")) &&
+ "igroup-uuid-123".equals(map.get("igroup.uuid"))
+ ));
+
+ // Execute
+ driver.revokeAccess(volumeInfo, host, dataStore);
+
+ // Verify
+ verify(sanStrategy).getCloudStackVolume(any());
+ verify(sanStrategy).getAccessGroup(any());
+ verify(sanStrategy).validateInitiatorInAccessGroup(anyString(), anyString(), anyString());
+ verify(sanStrategy).disableLogicalAccess(any());
+ }
+ }
+
+ @Test
+ void testCanHostAccessStoragePool_ReturnsTrue() {
+ assertTrue(driver.canHostAccessStoragePool(host, storagePool));
+ }
+
+ @Test
+ void testIsVmInfoNeeded_ReturnsTrue() {
+ assertTrue(driver.isVmInfoNeeded());
+ }
+
+ @Test
+ void testIsStorageSupportHA_ReturnsTrue() {
+ assertTrue(driver.isStorageSupportHA(Storage.StoragePoolType.NetworkFilesystem));
+ }
+
+ @Test
+ void testGetChapInfo_ReturnsNull() {
+ assertNull(driver.getChapInfo(volumeInfo));
+ }
+
+ @Test
+ void testCanProvideStorageStats_ReturnsFalse() {
+ assertFalse(driver.canProvideStorageStats());
+ }
+
+ @Test
+ void testCanProvideVolumeStats_ReturnsFalse() {
+ assertFalse(driver.canProvideVolumeStats());
+ }
+}
diff --git a/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java
new file mode 100644
index 000000000000..0f6a7c6b77dc
--- /dev/null
+++ b/plugins/storage/volume/ontap/src/test/java/org/apache/cloudstack/storage/lifecycle/OntapPrimaryDatastoreLifecycleTest.java
@@ -0,0 +1,805 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.cloudstack.storage.lifecycle;
+
+import org.apache.cloudstack.storage.utils.Constants;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.MockedStatic;
+import org.mockito.Mockito;
+import org.mockito.junit.jupiter.MockitoExtension;
+import org.mockito.junit.jupiter.MockitoSettings;
+import org.mockito.quality.Strictness;
+import org.apache.cloudstack.storage.feign.model.Volume;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.utils.exception.CloudRuntimeException;
+import com.cloud.dc.ClusterVO;
+import com.cloud.host.HostVO;
+import com.cloud.resource.ResourceManager;
+import com.cloud.storage.StorageManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
+import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao;
+import org.apache.cloudstack.storage.service.model.AccessGroup;
+import com.cloud.hypervisor.Hypervisor;
+import java.util.Map;
+import java.util.List;
+import java.util.ArrayList;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.withSettings;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import java.util.HashMap;
+import org.apache.cloudstack.storage.provider.StorageProviderFactory;
+import org.apache.cloudstack.storage.service.StorageStrategy;
+import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper;
+import org.apache.cloudstack.storage.utils.Utility;
+import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+
+
+@ExtendWith(MockitoExtension.class)
+@MockitoSettings(strictness = Strictness.LENIENT)
+public class OntapPrimaryDatastoreLifecycleTest {
+ @InjectMocks
+ private OntapPrimaryDatastoreLifecycle ontapPrimaryDatastoreLifecycle;
+
+ @Mock
+ private ClusterDao _clusterDao;
+
+ @Mock
+ private StorageStrategy storageStrategy;
+
+ @Mock
+ private PrimaryDataStoreHelper _dataStoreHelper;
+
+ @Mock
+ private ResourceManager _resourceMgr;
+
+ @Mock
+ private StorageManager _storageMgr;
+
+ @Mock
+ private StoragePoolDetailsDao storagePoolDetailsDao;
+
+ @Mock
+ private PrimaryDataStoreDao storagePoolDao;
+
+ // Mock object that implements both DataStore and PrimaryDataStoreInfo
+ // This is needed because attachCluster(DataStore) casts DataStore to PrimaryDataStoreInfo internally
+ private DataStore dataStore;
+
+ @Mock
+ private ClusterScope clusterScope;
+
+ @Mock
+ private ZoneScope zoneScope;
+
+ private List mockHosts;
+ private Map poolDetails;
+
+ @BeforeEach
+ void setUp() {
+ // Create a mock that implements both DataStore and PrimaryDataStoreInfo interfaces
+ dataStore = Mockito.mock(DataStore.class, withSettings()
+ .extraInterfaces(PrimaryDataStoreInfo.class));
+
+ ClusterVO clusterVO = new ClusterVO(1L, 1L, "clusterName");
+ clusterVO.setHypervisorType("KVM");
+ when(_clusterDao.findById(1L)).thenReturn(clusterVO);
+
+ when(storageStrategy.connect()).thenReturn(true);
+ when(storageStrategy.getNetworkInterface()).thenReturn("testNetworkInterface");
+
+ Volume volume = new Volume();
+ volume.setUuid("test-volume-uuid");
+ volume.setName("testVolume");
+ when(storageStrategy.createStorageVolume(any(), any())).thenReturn(volume);
+
+ // Setup for attachCluster tests
+ // Configure dataStore mock with necessary methods (works for both DataStore and PrimaryDataStoreInfo)
+ when(dataStore.getId()).thenReturn(1L);
+ when(((PrimaryDataStoreInfo) dataStore).getClusterId()).thenReturn(1L);
+
+ // Mock the setDetails method to prevent NullPointerException
+ Mockito.doNothing().when(((PrimaryDataStoreInfo) dataStore)).setDetails(any());
+
+ // Mock storagePoolDao to return a valid StoragePoolVO
+ StoragePoolVO mockStoragePoolVO = new StoragePoolVO();
+ mockStoragePoolVO.setId(1L);
+ when(storagePoolDao.findById(1L)).thenReturn(mockStoragePoolVO);
+
+ mockHosts = new ArrayList<>();
+ HostVO host1 = new HostVO("host1-guid");
+ host1.setPrivateIpAddress("192.168.1.10");
+ host1.setStorageIpAddress("192.168.1.10");
+ host1.setClusterId(1L);
+ HostVO host2 = new HostVO("host2-guid");
+ host2.setPrivateIpAddress("192.168.1.11");
+ host2.setStorageIpAddress("192.168.1.11");
+ host2.setClusterId(1L);
+ mockHosts.add(host1);
+ mockHosts.add(host2);
+ poolDetails = new HashMap<>();
+ poolDetails.put("username", "admin");
+ poolDetails.put("password", "password");
+ poolDetails.put("svmName", "svm1");
+ poolDetails.put("protocol", "NFS3");
+ poolDetails.put("storageIP", "192.168.1.100");
+ }
+
+ @Test
+ public void testInitialize_positive() {
+
+ HashMap detailsMap = new HashMap();
+ detailsMap.put(Constants.USERNAME, "testUser");
+ detailsMap.put(Constants.PASSWORD, "testPassword");
+ detailsMap.put(Constants.STORAGE_IP, "10.10.10.10");
+ detailsMap.put(Constants.SVM_NAME, "vs0");
+ detailsMap.put(Constants.PROTOCOL, "NFS3");
+
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",1L);
+ dsInfos.put("podId",1L);
+ dsInfos.put("clusterId", 1L);
+ dsInfos.put("name", "testStoragePool");
+ dsInfos.put("providerName", "testProvider");
+ dsInfos.put("capacityBytes",200000L);
+ dsInfos.put("managed",true);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", detailsMap);
+
+ try(MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
+ }
+ }
+
+ @Test
+ public void testInitialize_null_Arg() {
+ Exception ex = assertThrows(CloudRuntimeException.class,() ->
+ ontapPrimaryDatastoreLifecycle.initialize(null));
+ assertTrue(ex.getMessage().contains("Datastore info map is null, cannot create primary storage"));
+ }
+
+ @Test
+ public void testInitialize_missingRequiredDetailKey() {
+
+ HashMap detailsMap = new HashMap();
+ detailsMap.put(Constants.USERNAME, "testUser");
+ detailsMap.put(Constants.PASSWORD, "testPassword");
+ detailsMap.put(Constants.STORAGE_IP, "10.10.10.10");
+ detailsMap.put(Constants.SVM_NAME, "vs0");
+
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",1L);
+ dsInfos.put("podId",1L);
+ dsInfos.put("clusterId", 1L);
+ dsInfos.put("name", "testStoragePool");
+ dsInfos.put("providerName", "testProvider");
+ dsInfos.put("capacityBytes",200000L);
+ dsInfos.put("managed",true);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", detailsMap);
+
+ try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ Exception ex = assertThrows(CloudRuntimeException.class, () -> ontapPrimaryDatastoreLifecycle.initialize(dsInfos));
+ assertTrue(ex.getMessage().contains("missing detail"));
+ }
+ }
+
+ @Test
+ public void testInitialize_invalidCapacityBytes() {
+
+ HashMap detailsMap = new HashMap();
+ detailsMap.put(Constants.USERNAME, "testUser");
+ detailsMap.put(Constants.PASSWORD, "testPassword");
+ detailsMap.put(Constants.STORAGE_IP, "10.10.10.10");
+ detailsMap.put(Constants.SVM_NAME, "vs0");
+ detailsMap.put(Constants.PROTOCOL, "NFS3");
+
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",1L);
+ dsInfos.put("podId",1L);
+ dsInfos.put("clusterId", 1L);
+ dsInfos.put("name", "testStoragePool");
+ dsInfos.put("providerName", "testProvider");
+ dsInfos.put("capacityBytes",-1L);
+ dsInfos.put("managed",true);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", detailsMap);
+
+ try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
+ }
+ }
+
+ @Test
+ public void testInitialize_unmanagedStorage() {
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",1L);
+ dsInfos.put("podId",1L);
+ dsInfos.put("clusterId", 1L);
+ dsInfos.put("name", "testStoragePool");
+ dsInfos.put("providerName", "testProvider");
+ dsInfos.put("capacityBytes",200000L);
+ dsInfos.put("managed",false);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", new HashMap());
+
+ Exception ex = assertThrows(CloudRuntimeException.class, () -> {
+ try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
+ }
+ });
+ assertTrue(ex.getMessage().contains("must be managed"));
+ }
+
+ @Test
+ public void testInitialize_nullStoragePoolName() {
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",1L);
+ dsInfos.put("podId",1L);
+ dsInfos.put("clusterId", 1L);
+ dsInfos.put("name", null);
+ dsInfos.put("providerName", "testProvider");
+ dsInfos.put("capacityBytes",200000L);
+ dsInfos.put("managed",true);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", new HashMap());
+
+ Exception ex = assertThrows(CloudRuntimeException.class, () -> {
+ try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
+ }
+ });
+ assertTrue(ex.getMessage().contains("Storage pool name is null or empty"));
+ }
+
+ @Test
+ public void testInitialize_nullProviderName() {
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",1L);
+ dsInfos.put("podId",1L);
+ dsInfos.put("clusterId", 1L);
+ dsInfos.put("name", "testStoragePool");
+ dsInfos.put("providerName", null);
+ dsInfos.put("capacityBytes",200000L);
+ dsInfos.put("managed",true);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", new HashMap());
+
+ Exception ex = assertThrows(CloudRuntimeException.class, () -> {
+ try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
+ }
+ });
+ assertTrue(ex.getMessage().contains("Provider name is null or empty"));
+ }
+
+ @Test
+ public void testInitialize_nullPodAndClusterAndZone() {
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",null);
+ dsInfos.put("podId",null);
+ dsInfos.put("clusterId", null);
+ dsInfos.put("name", "testStoragePool");
+ dsInfos.put("providerName", "testProvider");
+ dsInfos.put("capacityBytes",200000L);
+ dsInfos.put("managed",true);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", new HashMap());
+
+ Exception ex = assertThrows(CloudRuntimeException.class, () -> {
+ try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
+ }
+ });
+ assertTrue(ex.getMessage().contains("Pod Id, Cluster Id and Zone Id are all null"));
+ }
+
+ @Test
+ public void testInitialize_clusterNotKVM() {
+ ClusterVO clusterVO = new ClusterVO(2L, 1L, "clusterName");
+ clusterVO.setHypervisorType("XenServer");
+ when(_clusterDao.findById(2L)).thenReturn(clusterVO);
+
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",1L);
+ dsInfos.put("podId",1L);
+ dsInfos.put("clusterId", 2L);
+ dsInfos.put("name", "testStoragePool");
+ dsInfos.put("providerName", "testProvider");
+ dsInfos.put("capacityBytes",200000L);
+ dsInfos.put("managed",true);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", new HashMap());
+
+ Exception ex = assertThrows(CloudRuntimeException.class, () -> {
+ try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
+ }
+ });
+ assertTrue(ex.getMessage().contains("ONTAP primary storage is supported only for KVM hypervisor"));
+ }
+
+ @Test
+ public void testInitialize_unexpectedDetailKey() {
+
+ HashMap detailsMap = new HashMap();
+ detailsMap.put(Constants.USERNAME, "testUser");
+ detailsMap.put(Constants.PASSWORD, "testPassword");
+ detailsMap.put(Constants.STORAGE_IP, "10.10.10.10");
+ detailsMap.put(Constants.SVM_NAME, "vs0");
+ detailsMap.put(Constants.PROTOCOL, "NFS3");
+ detailsMap.put("unexpectedKey", "unexpectedValue");
+
+ Map dsInfos = new HashMap<>();
+ dsInfos.put("zoneId",1L);
+ dsInfos.put("podId",1L);
+ dsInfos.put("clusterId", 1L);
+ dsInfos.put("name", "testStoragePool");
+ dsInfos.put("providerName", "testProvider");
+ dsInfos.put("capacityBytes",200000L);
+ dsInfos.put("managed",true);
+ dsInfos.put("tags", "testTag");
+ dsInfos.put("isTagARule", false);
+ dsInfos.put("details", detailsMap);
+
+ Exception ex = assertThrows(CloudRuntimeException.class, () -> {
+ try (MockedStatic storageProviderFactory = Mockito.mockStatic(StorageProviderFactory.class)) {
+ storageProviderFactory.when(() -> StorageProviderFactory.getStrategy(any())).thenReturn(storageStrategy);
+ ontapPrimaryDatastoreLifecycle.initialize(dsInfos);
+ }
+ });
+ assertTrue(ex.getMessage().contains("Unexpected ONTAP detail key in URL"));
+ }
+
+ // ========== attachCluster Tests ==========
+
+ @Test
+ public void testAttachCluster_positive() throws Exception {
+ // Setup
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
+ .thenReturn(mockHosts);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+ when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
+
+ try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any()))
+ .thenReturn(storageStrategy);
+ when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
+
+ // Mock successful host connections
+ when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
+
+ // Execute
+ boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
+ dataStore, clusterScope);
+
+ // Verify
+ assertTrue(result, "attachCluster should return true on success");
+ verify(_resourceMgr, times(1))
+ .getEligibleUpAndEnabledHostsInClusterForStorageConnection(any());
+ verify(storagePoolDetailsDao, times(1)).listDetailsKeyPairs(1L);
+ verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
+ verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L));
+ verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class));
+ }
+ }
+
+ @Test
+ public void testAttachCluster_withSingleHost() throws Exception {
+ // Setup - only one host in cluster
+ List singleHost = new ArrayList<>();
+ singleHost.add(mockHosts.get(0));
+
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
+ .thenReturn(singleHost);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+ when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
+
+ try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any()))
+ .thenReturn(storageStrategy);
+ when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
+ when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
+
+ // Execute
+ boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
+ dataStore, clusterScope);
+
+ // Verify
+ assertTrue(result, "attachCluster should return true with single host");
+ verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L));
+ verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class));
+ }
+ }
+
+ @Test
+ public void testAttachCluster_withMultipleHosts() throws Exception {
+ // Setup - add more hosts
+ HostVO host3 = new HostVO("host3-guid");
+ host3.setPrivateIpAddress("192.168.1.12");
+ host3.setStorageIpAddress("192.168.1.12");
+ host3.setClusterId(1L);
+ mockHosts.add(host3);
+
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
+ .thenReturn(mockHosts);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+ when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
+
+ try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any()))
+ .thenReturn(storageStrategy);
+ when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
+ when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
+
+ // Execute
+ boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
+ dataStore, clusterScope);
+
+ // Verify
+ assertTrue(result, "attachCluster should return true with multiple hosts");
+ verify(_storageMgr, times(3)).connectHostToSharedPool(any(HostVO.class), eq(1L));
+ verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class));
+ }
+ }
+
+ @Test
+ public void testAttachCluster_hostConnectionFailure() throws Exception {
+ // Setup
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
+ .thenReturn(mockHosts);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+
+ try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any()))
+ .thenReturn(storageStrategy);
+ when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
+
+ // Mock host connection failure for first host
+ when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong()))
+ .thenThrow(new CloudRuntimeException("Connection failed"));
+
+ // Execute
+ boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
+ dataStore, clusterScope);
+
+ // Verify
+ assertFalse(result, "attachCluster should return false on host connection failure");
+ verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
+ verify(_storageMgr, times(1)).connectHostToSharedPool(any(HostVO.class), eq(1L));
+ // _dataStoreHelper.attachCluster should NOT be called due to early return
+ verify(_dataStoreHelper, times(0)).attachCluster(any(DataStore.class));
+ }
+ }
+
+ @Test
+ public void testAttachCluster_emptyHostList() throws Exception {
+ // Setup - no hosts in cluster
+ List emptyHosts = new ArrayList<>();
+
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
+ .thenReturn(emptyHosts);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+ when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
+
+ try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any()))
+ .thenReturn(storageStrategy);
+ when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
+
+ // Execute
+ boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
+ dataStore, clusterScope);
+
+ // Verify
+ assertTrue(result, "attachCluster should return true even with no hosts");
+ verify(_storageMgr, times(0)).connectHostToSharedPool(any(HostVO.class), anyLong());
+ verify(_dataStoreHelper, times(1)).attachCluster(any(DataStore.class));
+ }
+ }
+
+ @Test
+ public void testAttachCluster_secondHostConnectionFails() throws Exception {
+ // Setup
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
+ .thenReturn(mockHosts);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+
+ try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any()))
+ .thenReturn(storageStrategy);
+ when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
+
+ // Mock: first host succeeds, second host fails
+ when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong()))
+ .thenReturn(true)
+ .thenThrow(new CloudRuntimeException("Connection failed"));
+
+ // Execute
+ boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
+ dataStore, clusterScope);
+
+ // Verify
+ assertFalse(result, "attachCluster should return false when any host connection fails");
+ verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L));
+ verify(_dataStoreHelper, times(0)).attachCluster(any(DataStore.class));
+ }
+ }
+
+ @Test
+ public void testAttachCluster_createAccessGroupCalled() throws Exception {
+ // Setup
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInClusterForStorageConnection(any()))
+ .thenReturn(mockHosts);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+ when(_dataStoreHelper.attachCluster(any(DataStore.class))).thenReturn(dataStore);
+
+ try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any()))
+ .thenReturn(storageStrategy);
+ when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
+ when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
+
+ // Execute
+ boolean result = ontapPrimaryDatastoreLifecycle.attachCluster(
+ dataStore, clusterScope);
+
+ // Verify - createAccessGroup is called with correct AccessGroup structure
+ assertTrue(result);
+ verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
+ }
+ }
+
+ // ========== attachZone Tests ==========
+
+ @Test
+ public void testAttachZone_positive() throws Exception {
+ // Setup
+ when(zoneScope.getScopeId()).thenReturn(1L);
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
+ .thenReturn(mockHosts);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+ when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore);
+
+ try (MockedStatic utilityMock = Mockito.mockStatic(Utility.class)) {
+ utilityMock.when(() -> Utility.getStrategyByStoragePoolDetails(any()))
+ .thenReturn(storageStrategy);
+ when(storageStrategy.createAccessGroup(any(AccessGroup.class))).thenReturn(null);
+
+ // Mock successful host connections
+ when(_storageMgr.connectHostToSharedPool(any(HostVO.class), anyLong())).thenReturn(true);
+
+ // Execute
+ boolean result = ontapPrimaryDatastoreLifecycle.attachZone(
+ dataStore, zoneScope, Hypervisor.HypervisorType.KVM);
+
+ // Verify
+ assertTrue(result, "attachZone should return true on success");
+ verify(_resourceMgr, times(1))
+ .getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM));
+ verify(storagePoolDetailsDao, times(1)).listDetailsKeyPairs(1L);
+ verify(storageStrategy, times(1)).createAccessGroup(any(AccessGroup.class));
+ verify(_storageMgr, times(2)).connectHostToSharedPool(any(HostVO.class), eq(1L));
+ verify(_dataStoreHelper, times(1)).attachZone(any(DataStore.class));
+ }
+ }
+
+ @Test
+ public void testAttachZone_withSingleHost() throws Exception {
+ // Setup - only one host in zone
+ List singleHost = new ArrayList<>();
+ singleHost.add(mockHosts.get(0));
+
+ when(zoneScope.getScopeId()).thenReturn(1L);
+ when(_resourceMgr.getEligibleUpAndEnabledHostsInZoneForStorageConnection(any(), eq(1L), eq(Hypervisor.HypervisorType.KVM)))
+ .thenReturn(singleHost);
+ when(storagePoolDetailsDao.listDetailsKeyPairs(1L)).thenReturn(poolDetails);
+ when(_dataStoreHelper.attachZone(any(DataStore.class))).thenReturn(dataStore);
+
+ try (MockedStatic