diff --git a/.github/boring-cyborg.yml b/.github/boring-cyborg.yml
index dfab81d0f79..90fd24c8180 100644
--- a/.github/boring-cyborg.yml
+++ b/.github/boring-cyborg.yml
@@ -46,8 +46,6 @@ labelPRBasedOnFilePath:
"component:dpdk":
- server/src/main/java/com/cloud/hypervisor/kvm/dpdk/*
- plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/dpdk/*
- "component:hyperv":
- - plugins/hypervisors/hyperv/*
"component:integration-test":
- test/integration/*
"component:ipv6":
diff --git a/api/src/main/java/com/cloud/hypervisor/Hypervisor.java b/api/src/main/java/com/cloud/hypervisor/Hypervisor.java
index 1f8741d3b7b..13ef7a6871d 100644
--- a/api/src/main/java/com/cloud/hypervisor/Hypervisor.java
+++ b/api/src/main/java/com/cloud/hypervisor/Hypervisor.java
@@ -47,7 +47,6 @@ public class Hypervisor {
public static final HypervisorType XenServer = new HypervisorType("XenServer", ImageFormat.VHD, EnumSet.of(RootDiskSizeOverride, VmStorageMigration));
public static final HypervisorType KVM = new HypervisorType("KVM", ImageFormat.QCOW2, EnumSet.of(DirectDownloadTemplate, RootDiskSizeOverride, VmStorageMigration));
public static final HypervisorType VMware = new HypervisorType("VMware", ImageFormat.OVA, EnumSet.of(RootDiskSizeOverride, VmStorageMigration, VmStorageMigrationWithSnapshots));
- public static final HypervisorType Hyperv = new HypervisorType("Hyperv");
public static final HypervisorType VirtualBox = new HypervisorType("VirtualBox");
public static final HypervisorType Parralels = new HypervisorType("Parralels");
public static final HypervisorType BareMetal = new HypervisorType("BareMetal");
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
index d8fa2123d22..68de3836321 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java
@@ -65,7 +65,7 @@ public class AddClusterCmd extends BaseCmd {
@Parameter(name = ApiConstants.HYPERVISOR,
type = CommandType.STRING,
required = true,
- description = "Hypervisor type of the cluster: XenServer,KVM,VMware,Hyperv,BareMetal,Simulator,Ovm3,External")
+ description = "Hypervisor type of the cluster: XenServer,KVM,VMware,BareMetal,Simulator,External")
private String hypervisor;
@Parameter(name = ApiConstants.ARCH, type = CommandType.STRING,
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java
index e202dfad77b..d0b9049a3d4 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java
@@ -104,7 +104,7 @@ public class ListHostsCmd extends BaseListCmd {
@Parameter(name = ApiConstants.HA_HOST, type = CommandType.BOOLEAN, description = "If true, list only hosts dedicated to HA")
private Boolean haHost;
- @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "Hypervisor type of host: XenServer,KVM,VMware,Hyperv,BareMetal,Simulator")
+ @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "Hypervisor type of host: XenServer,KVM,VMware,BareMetal,Simulator")
private String hypervisor;
@Parameter(name = ApiConstants.MANAGEMENT_SERVER_ID, type = CommandType.UUID, entityType = ManagementServerResponse.class, description = "the id of the management server", since="4.21.0")
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java
index 2ba3b321887..ff8dfdd14f2 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java
@@ -67,11 +67,13 @@ public class AddTrafficTypeCmd extends BaseAsyncCreateCmd {
description = "The network name label of the physical device dedicated to this traffic on a VMware host")
private String vmwareLabel;
+ @Deprecated
@Parameter(name = ApiConstants.HYPERV_NETWORK_LABEL,
type = CommandType.STRING,
description = "The network name label of the physical device dedicated to this traffic on a Hyperv host")
private String hypervLabel;
+ @Deprecated
@Parameter(name = ApiConstants.OVM3_NETWORK_LABEL,
type = CommandType.STRING,
description = "The network name of the physical device dedicated to this traffic on an OVM3 host")
@@ -108,19 +110,11 @@ public class AddTrafficTypeCmd extends BaseAsyncCreateCmd {
return vmwareLabel;
}
- public String getHypervLabel() {
- return hypervLabel;
- }
-
public String getSimulatorLabel() {
//simulators will have no labels
return null;
}
- public String getOvm3Label() {
- return ovm3Label;
- }
-
public void setVlan(String vlan) {
this.vlan = vlan;
}
@@ -163,7 +157,7 @@ public class AddTrafficTypeCmd extends BaseAsyncCreateCmd {
public void create() throws ResourceAllocationException {
PhysicalNetworkTrafficType result =
_networkService.addTrafficTypeToPhysicalNetwork(getPhysicalNetworkId(), getTrafficType(), getIsolationMethod(), getXenLabel(), getKvmLabel(), getVmwareLabel(),
- getSimulatorLabel(), getVlan(), getHypervLabel(), getOvm3Label());
+ getSimulatorLabel(), getVlan(), null, null);
if (result != null) {
setEntityId(result.getId());
setEntityUuid(result.getUuid());
diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java
index 0de4cfb7edd..a8778255149 100644
--- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java
+++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java
@@ -57,11 +57,13 @@ public class UpdateTrafficTypeCmd extends BaseAsyncCmd {
description = "The network name label of the physical device dedicated to this traffic on a VMware host")
private String vmwareLabel;
+ @Deprecated
@Parameter(name = ApiConstants.HYPERV_NETWORK_LABEL,
type = CommandType.STRING,
description = "The network name label of the physical device dedicated to this traffic on a Hyperv host")
private String hypervLabel;
+ @Deprecated
@Parameter(name = ApiConstants.OVM3_NETWORK_LABEL,
type = CommandType.STRING,
description = "The network name of the physical device dedicated to this traffic on an OVM3 host")
@@ -87,14 +89,6 @@ public class UpdateTrafficTypeCmd extends BaseAsyncCmd {
return vmwareLabel;
}
- public String getHypervLabel() {
- return hypervLabel;
- }
-
- public String getOvm3Label() {
- return ovm3Label;
- }
-
/////////////////////////////////////////////////////
/////////////// API Implementation///////////////////
/////////////////////////////////////////////////////
@@ -106,7 +100,7 @@ public class UpdateTrafficTypeCmd extends BaseAsyncCmd {
@Override
public void execute() {
- PhysicalNetworkTrafficType result = _networkService.updatePhysicalNetworkTrafficType(getId(), getXenLabel(), getKvmLabel(), getVmwareLabel(), getHypervLabel(), getOvm3Label());
+ PhysicalNetworkTrafficType result = _networkService.updatePhysicalNetworkTrafficType(getId(), getXenLabel(), getKvmLabel(), getVmwareLabel(), null, null);
if (result != null) {
TrafficTypeResponse response = _responseGenerator.createTrafficTypeResponse(result);
response.setResponseName(getCommandName());
diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TrafficTypeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TrafficTypeResponse.java
index 2b8af97f160..9d00d01eeb8 100644
--- a/api/src/main/java/org/apache/cloudstack/api/response/TrafficTypeResponse.java
+++ b/api/src/main/java/org/apache/cloudstack/api/response/TrafficTypeResponse.java
@@ -52,10 +52,6 @@ public class TrafficTypeResponse extends BaseResponse {
@Param(description = "The Network name label of the physical device dedicated to this traffic on a VMware host")
private String vmwareNetworkLabel;
- @SerializedName(ApiConstants.HYPERV_NETWORK_LABEL)
- @Param(description = "The Network name label of the physical device dedicated to this traffic on a HyperV host")
- private String hypervNetworkLabel;
-
@SerializedName(ApiConstants.VLAN)
@Param(description = "The VLAN id to be used for Management traffic by VMware host")
private String vlan;
@@ -64,10 +60,6 @@ public class TrafficTypeResponse extends BaseResponse {
@Param(description = "isolation methods for the physical network traffic")
private String isolationMethods;
- @SerializedName(ApiConstants.OVM3_NETWORK_LABEL)
- @Param(description = "The Network name of the physical device dedicated to this traffic on an OVM3 host")
- private String ovm3NetworkLabel;
-
@Override
public String getObjectId() {
return this.id;
@@ -105,18 +97,10 @@ public class TrafficTypeResponse extends BaseResponse {
return kvmNetworkLabel;
}
- public String getHypervLabel() {
- return hypervNetworkLabel;
- }
-
public void setXenLabel(String xenLabel) {
this.xenNetworkLabel = xenLabel;
}
- public void setHypervLabel(String hypervLabel) {
- this.hypervNetworkLabel = hypervLabel;
- }
-
public void setKvmLabel(String kvmLabel) {
this.kvmNetworkLabel = kvmLabel;
}
@@ -129,14 +113,6 @@ public class TrafficTypeResponse extends BaseResponse {
return vmwareNetworkLabel;
}
- public String getOvm3Label() {
- return ovm3NetworkLabel;
- }
-
- public void setOvm3Label(String ovm3Label) {
- this.ovm3NetworkLabel = ovm3Label;
- }
-
public String getIsolationMethods() {
return isolationMethods;
}
diff --git a/engine/orchestration/pom.xml b/engine/orchestration/pom.xml
index fda63d2558b..a8e7001baf8 100755
--- a/engine/orchestration/pom.xml
+++ b/engine/orchestration/pom.xml
@@ -78,6 +78,12 @@
cloud-plugin-hypervisor-external
${project.version}
+
+ org.apache.cloudstack
+ cloud-api
+ 4.23.0.0-SNAPSHOT
+ compile
+
diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
index 86f45630611..20b7521d8dc 100755
--- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
+++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java
@@ -2010,7 +2010,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
return ExecuteInSequence.value();
}
- if (Set.of(HypervisorType.KVM, HypervisorType.XenServer, HypervisorType.Hyperv, HypervisorType.LXC).contains(hypervisorType)) {
+ if (Set.of(HypervisorType.KVM, HypervisorType.XenServer, HypervisorType.LXC).contains(hypervisorType)) {
return false;
} else if (hypervisorType.equals(HypervisorType.VMware)) {
return StorageManager.shouldExecuteInSequenceOnVmware();
@@ -5406,8 +5406,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
, vm, vm.getState(), vm.getPowerState());
if((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && vm.getState() == State.Running
&& HaVmRestartHostUp.value()
- && vm.getHypervisorType() != HypervisorType.VMware
- && vm.getHypervisorType() != HypervisorType.Hyperv) {
+ && vm.getHypervisorType() != HypervisorType.VMware) {
logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm);
if (!_haMgr.hasPendingHaWork(vm.getId())) {
_haMgr.scheduleRestart(vm, true);
diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index a07fd13e1da..c3c480693db 100644
--- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -1156,8 +1156,6 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
return ImageFormat.OVA;
} else if (hyperType == HypervisorType.Ovm) {
return ImageFormat.RAW;
- } else if (hyperType == HypervisorType.Hyperv) {
- return ImageFormat.VHDX;
} else {
return null;
}
@@ -1165,15 +1163,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati
private boolean isSupportedImageFormatForCluster(VolumeInfo volume, HypervisorType rootDiskHyperType) {
ImageFormat volumeFormat = volume.getFormat();
- if (rootDiskHyperType == HypervisorType.Hyperv) {
- if (volumeFormat.equals(ImageFormat.VHDX) || volumeFormat.equals(ImageFormat.VHD)) {
- return true;
- } else {
- return false;
- }
- } else {
- return volume.getFormat().equals(getSupportedImageFormatForCluster(rootDiskHyperType));
- }
+ return volume.getFormat().equals(getSupportedImageFormatForCluster(rootDiskHyperType));
}
private VolumeInfo copyVolume(StoragePool rootDiskPool, VolumeInfo volumeInfo, VirtualMachine vm, VirtualMachineTemplate rootDiskTmplt, DataCenter dcVO, Pod pod, DiskOffering diskVO,
diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml
index 654cd14a25d..ec12346a8b0 100644
--- a/engine/schema/pom.xml
+++ b/engine/schema/pom.xml
@@ -57,6 +57,12 @@
ini4j
${cs.ini.version}
+
+ org.apache.cloudstack
+ cloud-api
+ 4.23.0.0-SNAPSHOT
+ compile
+
@@ -105,7 +111,6 @@
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-vmware")
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-xen")
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-ovm")
- templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-hyperv")
File file = new File("./engine/schema/dist/systemvm-templates/sha512sum.txt")
def lines = file.readLines()
for (template in templateList) {
@@ -321,41 +326,5 @@
-
- download-hyperv-systemvm-template
-
-
- systemvm-hyperv
-
-
-
-
-
- org.apache.maven.plugins
- maven-resources-plugin
- ${cs.resources-plugin.version}
-
-
- com.googlecode.maven-download-plugin
- download-maven-plugin
- 1.6.3
-
-
- download-hyperv-template
-
- wget
-
-
- true
- ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-hyperv.vhd.zip
- ${basedir}/dist/systemvm-templates/
- ${hyperv.checksum}
-
-
-
-
-
-
-
diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java
index 09d9f1d7fbf..90416d11770 100644
--- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java
+++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java
@@ -123,14 +123,8 @@ public class PhysicalNetworkTrafficTypeDaoImpl extends GenericDaoBase(Hypervisor.HypervisorType.KVM, CPU.CPUArch.arm64),
new Pair<>(Hypervisor.HypervisorType.VMware, CPU.CPUArch.amd64),
new Pair<>(Hypervisor.HypervisorType.XenServer, CPU.CPUArch.amd64),
- new Pair<>(Hypervisor.HypervisorType.Hyperv, CPU.CPUArch.amd64),
- new Pair<>(Hypervisor.HypervisorType.LXC, CPU.CPUArch.amd64),
- new Pair<>(Hypervisor.HypervisorType.Ovm3, CPU.CPUArch.amd64)
+ new Pair<>(Hypervisor.HypervisorType.LXC, CPU.CPUArch.amd64)
);
protected static final List METADATA_TEMPLATE_LIST = new ArrayList<>();
@@ -321,9 +319,7 @@ public class SystemVmTemplateRegistration {
put(Hypervisor.HypervisorType.KVM, "router.template.kvm");
put(Hypervisor.HypervisorType.VMware, "router.template.vmware");
put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver");
- put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv");
put(Hypervisor.HypervisorType.LXC, "router.template.lxc");
- put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3");
}
};
@@ -332,20 +328,16 @@ public class SystemVmTemplateRegistration {
put(Hypervisor.HypervisorType.KVM, ImageFormat.QCOW2);
put(Hypervisor.HypervisorType.XenServer, ImageFormat.VHD);
put(Hypervisor.HypervisorType.VMware, ImageFormat.OVA);
- put(Hypervisor.HypervisorType.Hyperv, ImageFormat.VHD);
put(Hypervisor.HypervisorType.LXC, ImageFormat.QCOW2);
- put(Hypervisor.HypervisorType.Ovm3, ImageFormat.RAW);
}
};
- protected static Map hypervisorGuestOsMap = new HashMap<>() {
+ public static Map hypervisorGuestOsMap = new HashMap<>() {
{
put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID);
put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID);
- put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID);
put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
- put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID);
}
};
@@ -400,7 +392,7 @@ public class SystemVmTemplateRegistration {
}
protected static void cleanupStore(Long templateId, String filePath) {
- String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId);
+ String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + templateId;
try {
Files.deleteIfExists(Paths.get(destTempFolder));
} catch (IOException e) {
@@ -411,8 +403,8 @@ public class SystemVmTemplateRegistration {
protected static Pair readTemplatePropertiesSizes(String path) {
File tmpFile = new File(path);
Long size = null;
- Long physicalSize = 0L;
- try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr);) {
+ long physicalSize = 0L;
+ try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr)) {
String line = null;
while ((line = brf.readLine()) != null) {
if (line.startsWith("size=")) {
@@ -543,7 +535,7 @@ public class SystemVmTemplateRegistration {
try {
Files.deleteIfExists(Paths.get(filePath));
} catch (IOException e) {
- LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e);
+ LOGGER.error("Failed to cleanup mounted store at: {}", filePath, e);
}
} catch (Exception e) {
String msg = String.format("Failed to unmount store mounted at %s", filePath);
@@ -597,7 +589,7 @@ public class SystemVmTemplateRegistration {
Long templateId = vmTemplateDao.getNextInSequence(Long.class, "id");
VMTemplateVO template = new VMTemplateVO();
template.setUuid(details.getUuid());
- template.setUniqueName(String.format("routing-%s" , String.valueOf(templateId)));
+ template.setUniqueName(String.format("routing-%s" , templateId));
template.setName(details.getName());
template.setPublicTemplate(false);
template.setFeatured(false);
@@ -707,9 +699,7 @@ public class SystemVmTemplateRegistration {
LOGGER.debug("Updating system VM Template guest OS [{}] ID", DEFAULT_SYSTEM_VM_GUEST_OS_NAME);
SystemVmTemplateRegistration.LINUX_12_ID = Math.toIntExact(guestOS.getId());
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
- hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID);
hypervisorGuestOsMap.put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
- hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID);
} catch (Exception e) {
LOGGER.warn("Couldn't update System VM template guest OS ID, due to {}", e.getMessage());
}
@@ -932,7 +922,6 @@ public class SystemVmTemplateRegistration {
/**
* Validate that templates for the provided hypervisor/architecture pairs which are in use and are valid.
- *
* If a template is missing or validation fails for any required pair, a
* {@link CloudRuntimeException} is thrown to abort the upgrade. If system VM Template for a hypervisor/arch is
* not considered available then validation is skipped for that pair.
@@ -965,7 +954,6 @@ public class SystemVmTemplateRegistration {
/**
* Register or ensure system VM Templates are present on the NFS store for a given zone.
- *
* Mounts the zone image store, enumerates hypervisors and architectures in the zone,
* and for each template either adds an existing template to the store or registers
* a new template as required.
@@ -1263,7 +1251,6 @@ public class SystemVmTemplateRegistration {
/**
* Update or register system VM Templates based on metadata.
- *
* Runs the registration logic inside a database transaction: obtains the
* set of hypervisors/architectures in use, iterates over metadata entries
* and attempts to register or update each template.
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java
index a78f93fbdd4..a66aa69798a 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java
@@ -45,6 +45,10 @@ import com.cloud.utils.exception.CloudRuntimeException;
public class Upgrade410to420 extends DbUpgradeAbstractImpl {
+ public static final String UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS = "Unable to persist vswitch configuration of VMware clusters.";
+ public static final String INSERT_MODIFIED_ROWS = "Insert modified {} rows";
+ public static final String UPDATE_MODIFIED_ROWS = "Update modified {} rows";
+
@Override
public String[] getUpgradableVersionRange() {
return new String[] {"4.1.0", "4.2.0"};
@@ -55,11 +59,6 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
return "4.2.0";
}
- @Override
- public boolean supportsRollingUpgrade() {
- return false;
- }
-
@Override
public InputStream[] getPrepareScripts() {
final String scriptFile = "META-INF/db/schema-410to420.sql";
@@ -117,12 +116,12 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void createFullCloneFlag(Connection conn) {
String update_sql;
int numRows = 0;
- try (PreparedStatement delete = conn.prepareStatement("delete from `cloud`.`configuration` where name='vmware.create.full.clone';");)
+ try (PreparedStatement delete = conn.prepareStatement("delete from `cloud`.`configuration` where name='vmware.create.full.clone';"))
{
delete.executeUpdate();
- try(PreparedStatement query = conn.prepareStatement("select count(*) from `cloud`.`data_center`");)
+ try(PreparedStatement query = conn.prepareStatement("select count(*) from `cloud`.`data_center`"))
{
- try(ResultSet rs = query.executeQuery();) {
+ try(ResultSet rs = query.executeQuery()) {
if (rs.next()) {
numRows = rs.getInt(1);
}
@@ -131,7 +130,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
} else {
update_sql = "insert into `cloud`.`configuration` (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'UserVmManager', 'vmware.create.full.clone' , 'true', 'If set to true, creates VMs as full clones on ESX hypervisor');";
}
- try(PreparedStatement update_pstmt = conn.prepareStatement(update_sql);) {
+ try(PreparedStatement update_pstmt = conn.prepareStatement(update_sql)) {
update_pstmt.executeUpdate();
}catch (SQLException e) {
throw new CloudRuntimeException("Failed to set global flag vmware.create.full.clone: ", e);
@@ -148,7 +147,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void migrateVolumeOnSecondaryStorage(Connection conn) {
- try (PreparedStatement sql = conn.prepareStatement("update `cloud`.`volumes` set state='Uploaded' where state='UploadOp'");){
+ try (PreparedStatement sql = conn.prepareStatement("update `cloud`.`volumes` set state='Uploaded' where state='UploadOp'")){
sql.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Failed to upgrade volume state: ", e);
@@ -156,7 +155,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void persistVswitchConfiguration(Connection conn) {
- Long clusterId;
+ long clusterId;
String clusterHypervisorType;
final String NEXUS_GLOBAL_CONFIG_PARAM_NAME = "vmware.use.nexus.vswitch";
final String DVS_GLOBAL_CONFIG_PARAM_NAME = "vmware.use.dvswitch";
@@ -168,10 +167,10 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
boolean nexusEnabled = false;
String publicVswitchType = VMWARE_STANDARD_VSWITCH;
String guestVswitchType = VMWARE_STANDARD_VSWITCH;
- Map>> detailsMap = new HashMap>>();
+ Map>> detailsMap = new HashMap<>();
List> detailsList;
- try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL");){
- try(ResultSet clusters = clustersQuery.executeQuery();) {
+ try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL")){
+ try(ResultSet clusters = clustersQuery.executeQuery()) {
while (clusters.next()) {
clusterHypervisorType = clusters.getString("hypervisor_type");
clusterId = clusters.getLong("id");
@@ -186,20 +185,19 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
publicVswitchType = NEXUS_1000V_DVSWITCH;
guestVswitchType = NEXUS_1000V_DVSWITCH;
}
- detailsList = new ArrayList>();
- detailsList.add(new Pair(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, guestVswitchType));
- detailsList.add(new Pair(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, publicVswitchType));
+ detailsList = new ArrayList<>();
+ detailsList.add(new Pair<>(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, guestVswitchType));
+ detailsList.add(new Pair<>(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, publicVswitchType));
detailsMap.put(clusterId, detailsList);
updateClusterDetails(conn, detailsMap);
- logger.debug("Persist vSwitch Configuration: Successfully persisted vswitch configuration for cluster " + clusterId);
+ logger.debug("Persist vSwitch Configuration: Successfully persisted vswitch configuration for cluster {}", clusterId);
} else {
- logger.debug("Persist vSwitch Configuration: Ignoring cluster " + clusterId + " with hypervisor type " + clusterHypervisorType);
- continue;
+ logger.debug("Persist vSwitch Configuration: Ignoring cluster {} with hypervisor type {}", clusterId, clusterHypervisorType);
}
} // End cluster iteration
- }catch (SQLException e) {
- String msg = "Unable to persist vswitch configuration of VMware clusters." + e.getMessage();
+ } catch (SQLException e) {
+ String msg = UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS + e.getMessage();
logger.error(msg);
throw new CloudRuntimeException(msg, e);
}
@@ -209,10 +207,11 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
setConfigurationParameter(conn, VSWITCH_GLOBAL_CONFIG_PARAM_CATEGORY, DVS_GLOBAL_CONFIG_PARAM_NAME, "true");
}
} catch (SQLException e) {
- String msg = "Unable to persist vswitch configuration of VMware clusters." + e.getMessage();
+ String msg = UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS + e.getMessage();
logger.error(msg);
throw new CloudRuntimeException(msg, e);
}
+
}
private void updateClusterDetails(Connection conn, Map>> detailsMap) {
@@ -227,7 +226,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
while (clusterIt.hasNext()) {
clusterId = clusterIt.next();
keyValues = detailsMap.get(clusterId);
- try( PreparedStatement clusterDetailsInsert = conn.prepareStatement("INSERT INTO `cloud`.`cluster_details` (cluster_id, name, value) VALUES (?, ?, ?)");) {
+ try( PreparedStatement clusterDetailsInsert = conn.prepareStatement("INSERT INTO `cloud`.`cluster_details` (cluster_id, name, value) VALUES (?, ?, ?)")) {
for (Pair keyValuePair : keyValues) {
key = keyValuePair.first();
val = keyValuePair.second();
@@ -236,7 +235,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
clusterDetailsInsert.setString(3, val);
clusterDetailsInsert.executeUpdate();
}
- logger.debug("Inserted vswitch configuration details into cloud.cluster_details for cluster with id " + clusterId + ".");
+ logger.debug("Inserted vswitch configuration details into cloud.cluster_details for cluster with id {}.", clusterId);
}catch (SQLException e) {
throw new CloudRuntimeException("Unable insert cluster details into cloud.cluster_details table.", e);
}
@@ -248,12 +247,12 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private String getConfigurationParameter(Connection conn, String category, String paramName) {
try (PreparedStatement pstmt =
- conn.prepareStatement("select value from `cloud`.`configuration` where category=? and value is not NULL and name = ?;");)
+ conn.prepareStatement("select value from `cloud`.`configuration` where category=? and value is not NULL and name = ?;"))
{
pstmt.setString(1, category);
pstmt.setString(2, paramName);
- try(ResultSet rs = pstmt.executeQuery();) {
- while (rs.next()) {
+ try(ResultSet rs = pstmt.executeQuery()) {
+ if (rs.next()) {
return rs.getString("value");
}
}catch (SQLException e) {
@@ -266,11 +265,11 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void setConfigurationParameter(Connection conn, String category, String paramName, String paramVal) {
- try (PreparedStatement pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?;");)
+ try (PreparedStatement pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?;"))
{
pstmt.setString(1, paramVal);
pstmt.setString(2, paramName);
- logger.debug("Updating global configuration parameter " + paramName + " with value " + paramVal + ". Update SQL statement is " + pstmt);
+ logger.debug("Updating global configuration parameter {} with value {}. Update SQL statement is {}", paramName, paramVal, pstmt);
pstmt.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to set global configuration parameter " + paramName + " to " + paramVal + ". ", e);
@@ -279,25 +278,25 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void movePrivateZoneToDedicatedResource(Connection conn) {
String domainName = "";
- try (PreparedStatement sel_dc_dom_id = conn.prepareStatement("SELECT distinct(`domain_id`) FROM `cloud`.`data_center` WHERE `domain_id` IS NOT NULL AND removed IS NULL");) {
- try (ResultSet rs3 = sel_dc_dom_id.executeQuery();) {
+ try (PreparedStatement sel_dc_dom_id = conn.prepareStatement("SELECT distinct(`domain_id`) FROM `cloud`.`data_center` WHERE `domain_id` IS NOT NULL AND removed IS NULL")) {
+ try (ResultSet rs3 = sel_dc_dom_id.executeQuery()) {
while (rs3.next()) {
long domainId = rs3.getLong(1);
long affinityGroupId = 0;
// create or find an affinity group for this domain of type
// 'ExplicitDedication'
try (PreparedStatement sel_aff_grp_pstmt =
- conn.prepareStatement("SELECT affinity_group.id FROM `cloud`.`affinity_group` INNER JOIN `cloud`.`affinity_group_domain_map` ON affinity_group.id=affinity_group_domain_map.affinity_group_id WHERE affinity_group.type = 'ExplicitDedication' AND affinity_group.acl_type = 'Domain' AND (affinity_group_domain_map.domain_id = ?)");) {
+ conn.prepareStatement("SELECT affinity_group.id FROM `cloud`.`affinity_group` INNER JOIN `cloud`.`affinity_group_domain_map` ON affinity_group.id=affinity_group_domain_map.affinity_group_id WHERE affinity_group.type = 'ExplicitDedication' AND affinity_group.acl_type = 'Domain' AND (affinity_group_domain_map.domain_id = ?)")) {
sel_aff_grp_pstmt.setLong(1, domainId);
- try (ResultSet rs2 = sel_aff_grp_pstmt.executeQuery();) {
+ try (ResultSet rs2 = sel_aff_grp_pstmt.executeQuery()) {
if (rs2.next()) {
// group exists, use it
affinityGroupId = rs2.getLong(1);
} else {
// create new group
- try (PreparedStatement sel_dom_id_pstmt = conn.prepareStatement("SELECT name FROM `cloud`.`domain` where id = ?");) {
+ try (PreparedStatement sel_dom_id_pstmt = conn.prepareStatement("SELECT name FROM `cloud`.`domain` where id = ?")) {
sel_dom_id_pstmt.setLong(1, domainId);
- try (ResultSet sel_dom_id_res = sel_dom_id_pstmt.executeQuery();) {
+ try (ResultSet sel_dom_id_res = sel_dom_id_pstmt.executeQuery()) {
if (sel_dom_id_res.next()) {
domainName = sel_dom_id_res.getString(1);
}
@@ -309,18 +308,18 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
String type = "ExplicitDedication";
String uuid = UUID.randomUUID().toString();
String groupName = "DedicatedGrp-domain-" + domainName;
- logger.debug("Adding AffinityGroup of type " + type + " for domain id " + domainId);
+ logger.debug("Adding AffinityGroup of type {} for domain id {}", type, domainId);
String sql =
"INSERT INTO `cloud`.`affinity_group` (`name`, `type`, `uuid`, `description`, `domain_id`, `account_id`, `acl_type`) VALUES (?, ?, ?, ?, 1, 1, 'Domain')";
- try (PreparedStatement insert_pstmt = conn.prepareStatement(sql);) {
+ try (PreparedStatement insert_pstmt = conn.prepareStatement(sql)) {
insert_pstmt.setString(1, groupName);
insert_pstmt.setString(2, type);
insert_pstmt.setString(3, uuid);
insert_pstmt.setString(4, "dedicated resources group");
insert_pstmt.executeUpdate();
- try (PreparedStatement sel_aff_pstmt = conn.prepareStatement("SELECT affinity_group.id FROM `cloud`.`affinity_group` where uuid = ?");) {
+ try (PreparedStatement sel_aff_pstmt = conn.prepareStatement("SELECT affinity_group.id FROM `cloud`.`affinity_group` where uuid = ?")) {
sel_aff_pstmt.setString(1, uuid);
- try (ResultSet sel_aff_res = sel_aff_pstmt.executeQuery();) {
+ try (ResultSet sel_aff_res = sel_aff_pstmt.executeQuery()) {
if (sel_aff_res.next()) {
affinityGroupId = sel_aff_res.getLong(1);
}
@@ -335,7 +334,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
// add the domain map
String sqlMap = "INSERT INTO `cloud`.`affinity_group_domain_map` (`domain_id`, `affinity_group_id`) VALUES (?, ?)";
- try (PreparedStatement pstmtUpdate = conn.prepareStatement(sqlMap);) {
+ try (PreparedStatement pstmtUpdate = conn.prepareStatement(sqlMap)) {
pstmtUpdate.setLong(1, domainId);
pstmtUpdate.setLong(2, affinityGroupId);
pstmtUpdate.executeUpdate();
@@ -349,9 +348,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
} catch (SQLException e) {
throw new CloudRuntimeException("Exception while Moving private zone information to dedicated resources", e);
}
- try (PreparedStatement sel_pstmt = conn.prepareStatement("SELECT `id` FROM `cloud`.`data_center` WHERE `domain_id` = ? AND removed IS NULL");) {
+ try (PreparedStatement sel_pstmt = conn.prepareStatement("SELECT `id` FROM `cloud`.`data_center` WHERE `domain_id` = ? AND removed IS NULL")) {
sel_pstmt.setLong(1, domainId);
- try (ResultSet sel_pstmt_rs = sel_pstmt.executeQuery();) {
+ try (ResultSet sel_pstmt_rs = sel_pstmt.executeQuery()) {
while (sel_pstmt_rs.next()) {
long zoneId = sel_pstmt_rs.getLong(1);
dedicateZone(conn, zoneId, domainId, affinityGroupId);
@@ -371,21 +370,20 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
}
private void dedicateZone(Connection conn, long zoneId, long domainId, long affinityGroupId) {
- try( PreparedStatement pstmtUpdate2 = conn.prepareStatement("INSERT INTO `cloud`.`dedicated_resources` (`uuid`,`data_center_id`, `domain_id`, `affinity_group_id`) VALUES (?, ?, ?, ?)");) {
+ try( PreparedStatement pstmtUpdate2 = conn.prepareStatement("INSERT INTO `cloud`.`dedicated_resources` (`uuid`,`data_center_id`, `domain_id`, `affinity_group_id`) VALUES (?, ?, ?, ?)")) {
// create the dedicated resources entry
pstmtUpdate2.setString(1, UUID.randomUUID().toString());
pstmtUpdate2.setLong(2, zoneId);
pstmtUpdate2.setLong(3, domainId);
pstmtUpdate2.setLong(4, affinityGroupId);
pstmtUpdate2.executeUpdate();
- pstmtUpdate2.close();
} catch (SQLException e) {
throw new CloudRuntimeException("Exception while saving zone to dedicated resources", e);
}
}
private void fixBaremetalForeignKeys(Connection conn) {
- List keys = new ArrayList();
+ List keys = new ArrayList<>();
keys.add("fk_external_dhcp_devices_nsp_id");
keys.add("fk_external_dhcp_devices_host_id");
keys.add("fk_external_dhcp_devices_pod_id");
@@ -397,15 +395,15 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
keys.add("fk_external_pxe_devices_physical_network_id");
DbUpgradeUtils.dropKeysIfExist(conn, "baremetal_pxe_devices", keys, true);
- try (PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE");)
+ try (PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE"))
{
alter_pstmt.executeUpdate();
try(PreparedStatement alter_pstmt_id =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE");
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE")
) {
alter_pstmt_id.executeUpdate();
try(PreparedStatement alter_pstmt_phy_net =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE");)
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE"))
{
alter_pstmt_phy_net.executeUpdate();
}catch (SQLException e) {
@@ -419,14 +417,14 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
throw new CloudRuntimeException("Unable to add foreign keys to baremetal_dhcp_devices table", e);
}
try (PreparedStatement alter_pxe_pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE");)
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE"))
{
alter_pxe_pstmt.executeUpdate();
try(PreparedStatement alter_pxe_id_pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE");) {
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE")) {
alter_pxe_id_pstmt.executeUpdate();
try(PreparedStatement alter_pxe_phy_net_pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE");) {
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE")) {
alter_pxe_phy_net_pstmt.executeUpdate();
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to add foreign keys to baremetal_pxe_devices table", e);
@@ -442,13 +440,13 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void addIndexForAlert(Connection conn) {
//First drop if it exists. (Due to patches shipped to customers some will have the index and some won't.)
- List indexList = new ArrayList();
+ List indexList = new ArrayList<>();
logger.debug("Dropping index i_alert__last_sent if it exists");
indexList.add("last_sent"); // in 4.1, we created this index that is not in convention.
indexList.add("i_alert__last_sent");
DbUpgradeUtils.dropKeysIfExist(conn, "alert", indexList, false);
//Now add index.
- try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)");)
+ try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)"))
{
pstmt.executeUpdate();
logger.debug("Added index i_alert__last_sent for table alert");
@@ -457,76 +455,19 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
}
- private void dropUploadTable(Connection conn) {
- try(PreparedStatement pstmt0 = conn.prepareStatement("SELECT url, created, type_id, host_id from upload where type=?");) {
- // Read upload table - Templates
- logger.debug("Populating template_store_ref table");
- pstmt0.setString(1, "TEMPLATE");
- try(ResultSet rs0 = pstmt0.executeQuery();)
- {
- try(PreparedStatement pstmt1 = conn.prepareStatement("UPDATE template_store_ref SET download_url=?, download_url_created=? where template_id=? and store_id=?");) {
- //Update template_store_ref
- while (rs0.next()) {
- pstmt1.setString(1, rs0.getString("url"));
- pstmt1.setDate(2, rs0.getDate("created"));
- pstmt1.setLong(3, rs0.getLong("type_id"));
- pstmt1.setLong(4, rs0.getLong("host_id"));
- pstmt1.executeUpdate();
- }
- // Read upload table - Volumes
- logger.debug("Populating volume store ref table");
- try(PreparedStatement pstmt2 = conn.prepareStatement("SELECT url, created, type_id, host_id, install_path from upload where type=?");) {
- pstmt2.setString(1, "VOLUME");
- try(ResultSet rs2 = pstmt2.executeQuery();) {
-
- try(PreparedStatement pstmt3 =
- conn.prepareStatement("INSERT IGNORE INTO volume_store_ref (volume_id, store_id, zone_id, created, state, download_url, download_url_created, install_path) VALUES (?,?,?,?,?,?,?,?)");) {
- //insert into template_store_ref
- while (rs2.next()) {
- pstmt3.setLong(1, rs2.getLong("type_id"));
- pstmt3.setLong(2, rs2.getLong("host_id"));
- pstmt3.setLong(3, 1l);// ???
- pstmt3.setDate(4, rs2.getDate("created"));
- pstmt3.setString(5, "Ready");
- pstmt3.setString(6, rs2.getString("url"));
- pstmt3.setDate(7, rs2.getDate("created"));
- pstmt3.setString(8, rs2.getString("install_path"));
- pstmt3.executeUpdate();
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
-
- } catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }
-
//KVM snapshot flag: only turn on if Customers is using snapshot;
private void setKVMSnapshotFlag(Connection conn) {
logger.debug("Verify and set the KVM snapshot flag if snapshot was used. ");
- try(PreparedStatement pstmt = conn.prepareStatement("select count(*) from `cloud`.`snapshots` where hypervisor_type = 'KVM'");)
+ try(PreparedStatement pstmt = conn.prepareStatement("select count(*) from `cloud`.`snapshots` where hypervisor_type = 'KVM'"))
{
int numRows = 0;
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
if (rs.next()) {
numRows = rs.getInt(1);
}
if (numRows > 0) {
//Add the configuration flag
- try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = 'kvm.snapshot.enabled'");) {
+ try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = 'kvm.snapshot.enabled'")) {
update_pstmt.setString(1, "true");
update_pstmt.executeUpdate();
}catch (SQLException e) {
@@ -543,19 +484,19 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void updatePrimaryStore(Connection conn) {
- try(PreparedStatement sql = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type = 'Filesystem' or pool_type = 'LVM'");) {
+ try(PreparedStatement sql = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type = 'Filesystem' or pool_type = 'LVM'")) {
sql.setString(1, DataStoreProvider.DEFAULT_PRIMARY);
sql.setString(2, "HOST");
sql.executeUpdate();
- try(PreparedStatement sql2 = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type != 'Filesystem' and pool_type != 'LVM'");) {
+ try(PreparedStatement sql2 = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type != 'Filesystem' and pool_type != 'LVM'")) {
sql2.setString(1, DataStoreProvider.DEFAULT_PRIMARY);
sql2.setString(2, "CLUSTER");
sql2.executeUpdate();
}catch (SQLException e) {
- throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString());
+ throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e, e);
}
} catch (SQLException e) {
- throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString());
+ throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e, e);
}
}
@@ -565,20 +506,20 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
PreparedStatement pstmt = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` WHERE removed IS NULL");
PreparedStatement pstmt1 = conn.prepareStatement("INSERT INTO `cloud`.`cluster_details` (cluster_id, name, value) VALUES(?, 'cpuOvercommitRatio', ?)");
PreparedStatement pstmt2 = conn.prepareStatement("INSERT INTO `cloud`.`cluster_details` (cluster_id, name, value) VALUES(?, 'memoryOvercommitRatio', ?)");
- PreparedStatement pstmt3 = conn.prepareStatement("select value from `cloud`.`configuration` where name=?");) {
+ PreparedStatement pstmt3 = conn.prepareStatement("select value from `cloud`.`configuration` where name=?")) {
String global_cpu_overprovisioning_factor = "1";
String global_mem_overprovisioning_factor = "1";
pstmt3.setString(1, "cpu.overprovisioning.factor");
- try (ResultSet rscpu_global = pstmt3.executeQuery();) {
+ try (ResultSet rscpu_global = pstmt3.executeQuery()) {
if (rscpu_global.next())
global_cpu_overprovisioning_factor = rscpu_global.getString(1);
}
pstmt3.setString(1, "mem.overprovisioning.factor");
- try (ResultSet rsmem_global = pstmt3.executeQuery();) {
+ try (ResultSet rsmem_global = pstmt3.executeQuery()) {
if (rsmem_global.next())
global_mem_overprovisioning_factor = rsmem_global.getString(1);
}
- try (ResultSet rs1 = pstmt.executeQuery();) {
+ try (ResultSet rs1 = pstmt.executeQuery()) {
while (rs1.next()) {
long id = rs1.getLong(1);
String hypervisor_type = rs1.getString(2);
@@ -643,29 +584,34 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
String trafficTypeVswitchParamValue;
try (PreparedStatement pstmt =
- conn.prepareStatement("select name,value from `cloud`.`configuration` where category='Hidden' and value is not NULL and name REGEXP 'vmware*.vswitch';");)
+ conn.prepareStatement("select name,value from `cloud`.`configuration` where category='Hidden' and value is not NULL and name REGEXP 'vmware*.vswitch';"))
{
// update the existing vmware traffic labels
- try(ResultSet rsParams = pstmt.executeQuery();) {
+ try(ResultSet rsParams = pstmt.executeQuery()) {
while (rsParams.next()) {
trafficTypeVswitchParam = rsParams.getString("name");
trafficTypeVswitchParamValue = rsParams.getString("value");
// When upgraded from 4.0 to 4.1 update physical network traffic label with trafficTypeVswitchParam
- if (trafficTypeVswitchParam.equals("vmware.private.vswitch")) {
- trafficType = "Management"; //TODO(sateesh): Ignore storage traffic, as required physical network already implemented, anything else tobe done?
- } else if (trafficTypeVswitchParam.equals("vmware.public.vswitch")) {
- trafficType = "Public";
- } else if (trafficTypeVswitchParam.equals("vmware.guest.vswitch")) {
- trafficType = "Guest";
+ switch (trafficTypeVswitchParam) {
+ case "vmware.private.vswitch":
+ trafficType = "Management"; //TODO(sateesh): Ignore storage traffic, as required physical network already implemented, anything else tobe done?
+
+ break;
+ case "vmware.public.vswitch":
+ trafficType = "Public";
+ break;
+ case "vmware.guest.vswitch":
+ trafficType = "Guest";
+ break;
}
try(PreparedStatement sel_pstmt =
- conn.prepareStatement("select physical_network_id, traffic_type, vmware_network_label from physical_network_traffic_types where vmware_network_label is not NULL and traffic_type=?;");) {
+ conn.prepareStatement("select physical_network_id, traffic_type, vmware_network_label from physical_network_traffic_types where vmware_network_label is not NULL and traffic_type=?;")) {
pstmt.setString(1, trafficType);
- try(ResultSet rsLabel = sel_pstmt.executeQuery();) {
+ try(ResultSet rsLabel = sel_pstmt.executeQuery()) {
newLabel = getNewLabel(rsLabel, trafficTypeVswitchParamValue);
try(PreparedStatement update_pstmt =
- conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = ? where traffic_type = ? and vmware_network_label is not NULL;");) {
- logger.debug("Updating VMware label for " + trafficType + " traffic. Update SQL statement is " + pstmt);
+ conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = ? where traffic_type = ? and vmware_network_label is not NULL;")) {
+ logger.debug("Updating VMware label for {} traffic. Update SQL statement is {}", trafficType, pstmt);
pstmt.setString(1, newLabel);
pstmt.setString(2, trafficType);
update_pstmt.executeUpdate();
@@ -688,17 +634,17 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void persistLegacyZones(Connection conn) {
- List listOfLegacyZones = new ArrayList();
- List listOfNonLegacyZones = new ArrayList();
- Map> dcToZoneMap = new HashMap>();
+ List listOfLegacyZones = new ArrayList<>();
+ List listOfNonLegacyZones = new ArrayList<>();
+ Map> dcToZoneMap = new HashMap<>();
ResultSet clusters = null;
Long zoneId;
- Long clusterId;
+ long clusterId;
ArrayList dcList = null;
String clusterHypervisorType;
boolean legacyZone;
boolean ignoreZone;
- Long count;
+ long count;
String dcOfPreviousCluster = null;
String dcOfCurrentCluster = null;
String[] tokens;
@@ -706,15 +652,15 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
String vc = "";
String dcName = "";
- try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`data_center` where removed is NULL");) {
- try (ResultSet rs = pstmt.executeQuery();) {
+ try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`data_center` where removed is NULL")) {
+ try (ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
zoneId = rs.getLong("id");
- try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL AND data_center_id=?");) {
+ try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL AND data_center_id=?")) {
clustersQuery.setLong(1, zoneId);
legacyZone = false;
ignoreZone = true;
- dcList = new ArrayList();
+ dcList = new ArrayList<>();
count = 0L;
// Legacy zone term is meant only for VMware
// Legacy zone is a zone with at least 2 clusters & with multiple DCs or VCs
@@ -730,9 +676,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
if (clusterHypervisorType.equalsIgnoreCase("VMware")) {
ignoreZone = false;
try (PreparedStatement clusterDetailsQuery = conn
- .prepareStatement("select value from `cloud`.`cluster_details` where name='url' and cluster_id=?");) {
+ .prepareStatement("select value from `cloud`.`cluster_details` where name='url' and cluster_id=?")) {
clusterDetailsQuery.setLong(1, clusterId);
- try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery();) {
+ try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery()) {
clusterDetails.next();
url = clusterDetails.getString("value");
tokens = url.split("/"); // url format - http://vcenter/dc/cluster
@@ -746,7 +692,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
if (count > 0) {
if (!dcOfPreviousCluster.equalsIgnoreCase(dcOfCurrentCluster)) {
legacyZone = true;
- logger.debug("Marking the zone " + zoneId + " as legacy zone.");
+ logger.debug("Marking the zone {} as legacy zone.", zoneId);
}
}
} catch (SQLException e) {
@@ -756,7 +702,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e);
}
} else {
- logger.debug("Ignoring zone " + zoneId + " with hypervisor type " + clusterHypervisorType);
+ logger.debug("Ignoring zone {} with hypervisor type {}", zoneId, clusterHypervisorType);
break;
}
count++;
@@ -774,7 +720,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
listOfNonLegacyZones.add(zoneId);
}
for (String dc : dcList) {
- ArrayList dcZones = new ArrayList();
+ ArrayList dcZones = new ArrayList<>();
if (dcToZoneMap.get(dc) != null) {
dcZones = dcToZoneMap.get(dc);
}
@@ -796,22 +742,22 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
updateLegacyZones(conn, listOfLegacyZones);
updateNonLegacyZones(conn, listOfNonLegacyZones);
} catch (SQLException e) {
- logger.error("Unable to discover legacy zones." + e.getMessage(),e);
+ logger.error("Unable to discover legacy zones.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to discover legacy zones." + e.getMessage(), e);
}
}catch (SQLException e) {
- logger.error("Unable to discover legacy zones." + e.getMessage(),e);
+ logger.error("Unable to discover legacy zones.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to discover legacy zones." + e.getMessage(), e);
}
}
private void updateLegacyZones(Connection conn, List zones) {
//Insert legacy zones into table for legacy zones.
- try (PreparedStatement legacyZonesQuery = conn.prepareStatement("INSERT INTO `cloud`.`legacy_zones` (zone_id) VALUES (?)");){
+ try (PreparedStatement legacyZonesQuery = conn.prepareStatement("INSERT INTO `cloud`.`legacy_zones` (zone_id) VALUES (?)")){
for (Long zoneId : zones) {
legacyZonesQuery.setLong(1, zoneId);
legacyZonesQuery.executeUpdate();
- logger.debug("Inserted zone " + zoneId + " into cloud.legacyzones table");
+ logger.debug("Inserted zone {} into cloud.legacyzones table", zoneId);
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e);
@@ -821,22 +767,22 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void updateNonLegacyZones(Connection conn, List zones) {
try {
for (Long zoneId : zones) {
- logger.debug("Discovered non-legacy zone " + zoneId + ". Processing the zone to associate with VMware datacenter.");
+ logger.debug("Discovered non-legacy zone {}. Processing the zone to associate with VMware datacenter.", zoneId);
// All clusters in a non legacy zone will belong to the same VMware DC, hence pick the first cluster
- try (PreparedStatement clustersQuery = conn.prepareStatement("select id from `cloud`.`cluster` where removed is NULL AND data_center_id=?");) {
+ try (PreparedStatement clustersQuery = conn.prepareStatement("select id from `cloud`.`cluster` where removed is NULL AND data_center_id=?")) {
clustersQuery.setLong(1, zoneId);
- try (ResultSet clusters = clustersQuery.executeQuery();) {
+ try (ResultSet clusters = clustersQuery.executeQuery()) {
clusters.next();
- Long clusterId = clusters.getLong("id");
+ long clusterId = clusters.getLong("id");
// Get VMware datacenter details from cluster_details table
String user = null;
String password = null;
String url = null;
- try (PreparedStatement clusterDetailsQuery = conn.prepareStatement("select name, value from `cloud`.`cluster_details` where cluster_id=?");) {
+ try (PreparedStatement clusterDetailsQuery = conn.prepareStatement("select name, value from `cloud`.`cluster_details` where cluster_id=?")) {
clusterDetailsQuery.setLong(1, clusterId);
- try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery();) {
+ try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery()) {
while (clusterDetails.next()) {
String key = clusterDetails.getString(1);
String value = clusterDetails.getString(2);
@@ -854,7 +800,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
String guid = dcName + "@" + vc;
try (PreparedStatement insertVmWareDC = conn
- .prepareStatement("INSERT INTO `cloud`.`vmware_data_center` (uuid, name, guid, vcenter_host, username, password) values(?, ?, ?, ?, ?, ?)");) {
+ .prepareStatement("INSERT INTO `cloud`.`vmware_data_center` (uuid, name, guid, vcenter_host, username, password) values(?, ?, ?, ?, ?, ?)")) {
insertVmWareDC.setString(1, UUID.randomUUID().toString());
insertVmWareDC.setString(2, dcName);
insertVmWareDC.setString(3, guid);
@@ -863,16 +809,16 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
insertVmWareDC.setString(6, password);
insertVmWareDC.executeUpdate();
}
- try (PreparedStatement selectVmWareDC = conn.prepareStatement("SELECT id FROM `cloud`.`vmware_data_center` where guid=?");) {
+ try (PreparedStatement selectVmWareDC = conn.prepareStatement("SELECT id FROM `cloud`.`vmware_data_center` where guid=?")) {
selectVmWareDC.setString(1, guid);
- try (ResultSet vmWareDcInfo = selectVmWareDC.executeQuery();) {
- Long vmwareDcId = -1L;
+ try (ResultSet vmWareDcInfo = selectVmWareDC.executeQuery()) {
+ long vmwareDcId = -1L;
if (vmWareDcInfo.next()) {
vmwareDcId = vmWareDcInfo.getLong("id");
}
try (PreparedStatement insertMapping = conn
- .prepareStatement("INSERT INTO `cloud`.`vmware_data_center_zone_map` (zone_id, vmware_data_center_id) values(?, ?)");) {
+ .prepareStatement("INSERT INTO `cloud`.`vmware_data_center_zone_map` (zone_id, vmware_data_center_id) values(?, ?)")) {
insertMapping.setLong(1, zoneId);
insertMapping.setLong(2, vmwareDcId);
insertMapping.executeUpdate();
@@ -893,17 +839,17 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void createPlaceHolderNics(Connection conn) {
try (PreparedStatement pstmt =
- conn.prepareStatement("SELECT network_id, gateway, ip4_address FROM `cloud`.`nics` WHERE reserver_name IN ('DirectNetworkGuru','DirectPodBasedNetworkGuru') and vm_type='DomainRouter' AND removed IS null");)
+ conn.prepareStatement("SELECT network_id, gateway, ip4_address FROM `cloud`.`nics` WHERE reserver_name IN ('DirectNetworkGuru','DirectPodBasedNetworkGuru') and vm_type='DomainRouter' AND removed IS null"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
- Long networkId = rs.getLong(1);
+ long networkId = rs.getLong(1);
String gateway = rs.getString(2);
String ip = rs.getString(3);
String uuid = UUID.randomUUID().toString();
//Insert placeholder nic for each Domain router nic in Shared network
try(PreparedStatement insert_pstmt =
- conn.prepareStatement("INSERT INTO `cloud`.`nics` (uuid, ip4_address, gateway, network_id, state, strategy, vm_type, default_nic, created) VALUES (?, ?, ?, ?, 'Reserved', 'PlaceHolder', 'DomainRouter', 0, now())");) {
+ conn.prepareStatement("INSERT INTO `cloud`.`nics` (uuid, ip4_address, gateway, network_id, state, strategy, vm_type, default_nic, created) VALUES (?, ?, ?, ?, 'Reserved', 'PlaceHolder', 'DomainRouter', 0, now())")) {
insert_pstmt.setString(1, uuid);
insert_pstmt.setString(2, ip);
insert_pstmt.setString(3, gateway);
@@ -912,7 +858,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to create placeholder nics", e);
}
- logger.debug("Created placeholder nic for the ipAddress " + ip + " and network " + networkId);
+ logger.debug("Created placeholder nic for the ipAddress {} and network {}", ip, networkId);
}
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to create placeholder nics", e);
@@ -923,13 +869,13 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void updateRemoteAccessVpn(Connection conn) {
- try(PreparedStatement pstmt = conn.prepareStatement("SELECT vpn_server_addr_id FROM `cloud`.`remote_access_vpn`");) {
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(PreparedStatement pstmt = conn.prepareStatement("SELECT vpn_server_addr_id FROM `cloud`.`remote_access_vpn`")) {
+ try(ResultSet rs = pstmt.executeQuery()) {
long id = 1;
while (rs.next()) {
String uuid = UUID.randomUUID().toString();
- Long ipId = rs.getLong(1);
- try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`remote_access_vpn` set uuid=?, id=? where vpn_server_addr_id=?");) {
+ long ipId = rs.getLong(1);
+ try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`remote_access_vpn` set uuid=?, id=? where vpn_server_addr_id=?")) {
update_pstmt.setString(1, uuid);
update_pstmt.setLong(2, id);
update_pstmt.setLong(3, ipId);
@@ -949,44 +895,44 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void addEgressFwRulesForSRXGuestNw(Connection conn) {
ResultSet rs = null;
- try(PreparedStatement pstmt = conn.prepareStatement("select network_id FROM `cloud`.`ntwk_service_map` where service='Firewall' and provider='JuniperSRX' ");) {
+ try(PreparedStatement pstmt = conn.prepareStatement("select network_id FROM `cloud`.`ntwk_service_map` where service='Firewall' and provider='JuniperSRX' ")) {
rs = pstmt.executeQuery();
while (rs.next()) {
long netId = rs.getLong(1);
//checking for Isolated OR Virtual
try(PreparedStatement sel_net_pstmt =
- conn.prepareStatement("select account_id, domain_id FROM `cloud`.`networks` where (guest_type='Isolated' OR guest_type='Virtual') and traffic_type='Guest' and vpc_id is NULL and (state='implemented' OR state='Shutdown') and id=? ");) {
+ conn.prepareStatement("select account_id, domain_id FROM `cloud`.`networks` where (guest_type='Isolated' OR guest_type='Virtual') and traffic_type='Guest' and vpc_id is NULL and (state='implemented' OR state='Shutdown') and id=? ")) {
sel_net_pstmt.setLong(1, netId);
logger.debug("Getting account_id, domain_id from networks table: ");
- try(ResultSet rsNw = pstmt.executeQuery();)
+ try(ResultSet rsNw = pstmt.executeQuery())
{
if (rsNw.next()) {
long accountId = rsNw.getLong(1);
long domainId = rsNw.getLong(2);
//Add new rule for the existing networks
- logger.debug("Adding default egress firewall rule for network " + netId);
+ logger.debug("Adding default egress firewall rule for network {}", netId);
try (PreparedStatement insert_pstmt =
- conn.prepareStatement("INSERT INTO firewall_rules (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created, traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), 'Egress')");) {
+ conn.prepareStatement("INSERT INTO firewall_rules (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created, traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), 'Egress')")) {
insert_pstmt.setString(1, UUID.randomUUID().toString());
insert_pstmt.setLong(2, accountId);
insert_pstmt.setLong(3, domainId);
insert_pstmt.setLong(4, netId);
insert_pstmt.setString(5, UUID.randomUUID().toString());
- logger.debug("Inserting default egress firewall rule " + insert_pstmt);
+ logger.debug("Inserting default egress firewall rule {}", insert_pstmt);
insert_pstmt.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to set egress firewall rules ", e);
}
- try (PreparedStatement sel_firewall_pstmt = conn.prepareStatement("select id from firewall_rules where protocol='all' and network_id=?");) {
+ try (PreparedStatement sel_firewall_pstmt = conn.prepareStatement("select id from firewall_rules where protocol='all' and network_id=?")) {
sel_firewall_pstmt.setLong(1, netId);
- try (ResultSet rsId = sel_firewall_pstmt.executeQuery();) {
+ try (ResultSet rsId = sel_firewall_pstmt.executeQuery()) {
long firewallRuleId;
if (rsId.next()) {
firewallRuleId = rsId.getLong(1);
- try (PreparedStatement insert_pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')");) {
+ try (PreparedStatement insert_pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')")) {
insert_pstmt.setLong(1, firewallRuleId);
- logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + insert_pstmt);
+ logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id={} with statement {}", firewallRuleId, insert_pstmt);
insert_pstmt.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to set egress firewall rules ", e);
@@ -1008,15 +954,15 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void upgradeEIPNetworkOfferings(Connection conn) {
- try (PreparedStatement pstmt = conn.prepareStatement("select id, elastic_ip_service from `cloud`.`network_offerings` where traffic_type='Guest'");)
+ try (PreparedStatement pstmt = conn.prepareStatement("select id, elastic_ip_service from `cloud`.`network_offerings` where traffic_type='Guest'"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long id = rs.getLong(1);
// check if elastic IP service is enabled for network offering
if (rs.getLong(2) != 0) {
//update network offering with eip_associate_public_ip set to true
- try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`network_offerings` set eip_associate_public_ip=? where id=?");) {
+ try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`network_offerings` set eip_associate_public_ip=? where id=?")) {
update_pstmt.setBoolean(1, true);
update_pstmt.setLong(2, id);
update_pstmt.executeUpdate();
@@ -1060,24 +1006,24 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
PreparedStatement pstmtSelectFirewallCidrs = conn.prepareStatement(sqlSelectFirewallCidrs);
PreparedStatement pstmtDeleteFirewallCidr = conn.prepareStatement(sqlDeleteFirewallCidr);
PreparedStatement pstmtDeleteFirewallRules = conn.prepareStatement(sqlDeleteFirewallRules);
- ResultSet rsNetworkIds = pstmtSelectNetworkIds.executeQuery();) {
+ ResultSet rsNetworkIds = pstmtSelectNetworkIds.executeQuery()) {
//Get all VPC tiers
while (rsNetworkIds.next()) {
- Long networkId = rsNetworkIds.getLong(1);
- logger.debug("Updating network ACLs for network: " + networkId);
- Long vpcId = rsNetworkIds.getLong(2);
+ long networkId = rsNetworkIds.getLong(1);
+ logger.debug("Updating network ACLs for network: {}", networkId);
+ long vpcId = rsNetworkIds.getLong(2);
String tierUuid = rsNetworkIds.getString(3);
pstmtSelectFirewallRules.setLong(1, networkId);
boolean hasAcls = false;
Long aclId = null;
int number = 1;
- try (ResultSet rsAcls = pstmtSelectFirewallRules.executeQuery();) {
+ try (ResultSet rsAcls = pstmtSelectFirewallRules.executeQuery()) {
while (rsAcls.next()) {
if (!hasAcls) {
hasAcls = true;
aclId = nextAclId++;
//create ACL for the tier
- logger.debug("Creating network ACL for tier: " + tierUuid);
+ logger.debug("Creating network ACL for tier: {}", tierUuid);
pstmtInsertNetworkAcl.setLong(1, aclId);
pstmtInsertNetworkAcl.setLong(2, vpcId);
pstmtInsertNetworkAcl.setString(3, "ACL for tier " + tierUuid);
@@ -1085,13 +1031,13 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
pstmtInsertNetworkAcl.executeUpdate();
}
- Long fwRuleId = rsAcls.getLong(1);
+ long fwRuleId = rsAcls.getLong(1);
String cidr = null;
//get cidr from firewall_rules_cidrs
pstmtSelectFirewallCidrs.setLong(1, fwRuleId);
- try (ResultSet rsCidr = pstmtSelectFirewallCidrs.executeQuery();) {
+ try (ResultSet rsCidr = pstmtSelectFirewallCidrs.executeQuery()) {
while (rsCidr.next()) {
- Long cidrId = rsCidr.getLong(1);
+ long cidrId = rsCidr.getLong(1);
String sourceCidr = rsCidr.getString(2);
if (cidr == null) {
cidr = sourceCidr;
@@ -1105,20 +1051,20 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
String aclItemUuid = rsAcls.getString(2);
//Move acl to network_acl_item table
- logger.debug("Moving firewall rule: " + aclItemUuid);
+ logger.debug("Moving firewall rule: {}", aclItemUuid);
//uuid
pstmtInsertNetworkAclItem.setString(1, aclItemUuid);
//aclId
pstmtInsertNetworkAclItem.setLong(2, aclId);
//Start port
- Integer startPort = rsAcls.getInt(3);
+ int startPort = rsAcls.getInt(3);
if (rsAcls.wasNull()) {
pstmtInsertNetworkAclItem.setNull(3, Types.INTEGER);
} else {
pstmtInsertNetworkAclItem.setLong(3, startPort);
}
//End port
- Integer endPort = rsAcls.getInt(4);
+ int endPort = rsAcls.getInt(4);
if (rsAcls.wasNull()) {
pstmtInsertNetworkAclItem.setNull(4, Types.INTEGER);
} else {
@@ -1131,7 +1077,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
String protocol = rsAcls.getString(6);
pstmtInsertNetworkAclItem.setString(6, protocol);
//icmp_code
- Integer icmpCode = rsAcls.getInt(7);
+ int icmpCode = rsAcls.getInt(7);
if (rsAcls.wasNull()) {
pstmtInsertNetworkAclItem.setNull(7, Types.INTEGER);
} else {
@@ -1139,7 +1085,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
//icmp_type
- Integer icmpType = rsAcls.getInt(8);
+ int icmpType = rsAcls.getInt(8);
if (rsAcls.wasNull()) {
pstmtInsertNetworkAclItem.setNull(8, Types.INTEGER);
} else {
@@ -1183,8 +1129,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void updateGlobalDeploymentPlanner(Connection conn) {
- try (PreparedStatement pstmt = conn.prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'");){
- try(ResultSet rs = pstmt.executeQuery();)
+ try (PreparedStatement pstmt = conn.prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'")){
+ try(ResultSet rs = pstmt.executeQuery())
{
while (rs.next()) {
String globalValue = rs.getString(1);
@@ -1195,16 +1141,12 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
plannerName = "FirstFitPlanner";
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())) {
plannerName = "FirstFitPlanner";
- } else if (globalValue.equals("userconcentratedpod_firstfit")) {
- plannerName = "UserConcentratedPodPlanner";
- } else if (globalValue.equals("userconcentratedpod_random")) {
- plannerName = "UserConcentratedPodPlanner";
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString())) {
plannerName = "UserDispersingPlanner";
}
}
// update vm.deployment.planner global config
- try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'");) {
+ try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'")) {
update_pstmt.setString(1, plannerName);
update_pstmt.executeUpdate();
} catch (SQLException e) {
@@ -1221,13 +1163,13 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void upgradeDefaultVpcOffering(Connection conn) {
try(PreparedStatement pstmt =
- conn.prepareStatement("select distinct map.vpc_offering_id from `cloud`.`vpc_offering_service_map` map, `cloud`.`vpc_offerings` off where off.id=map.vpc_offering_id AND service='Lb'");)
+ conn.prepareStatement("select distinct map.vpc_offering_id from `cloud`.`vpc_offering_service_map` map, `cloud`.`vpc_offerings` off where off.id=map.vpc_offering_id AND service='Lb'"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long id = rs.getLong(1);
//Add internal LB vm as a supported provider for the load balancer service
- try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`vpc_offering_service_map` (vpc_offering_id, service, provider) VALUES (?,?,?)");) {
+ try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`vpc_offering_service_map` (vpc_offering_id, service, provider) VALUES (?,?,?)")) {
insert_pstmt.setLong(1, id);
insert_pstmt.setString(2, "Lb");
insert_pstmt.setString(3, "InternalLbVm");
@@ -1245,27 +1187,27 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) {
- try (PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where removed is null");){
- try(ResultSet rs = pstmt.executeQuery();) {
+ try (PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where removed is null")){
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long pNtwkId = rs.getLong(1);
String uuid = UUID.randomUUID().toString();
//Add internal LB VM to the list of physical network service providers
try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`physical_network_service_providers` "
+ "(uuid, physical_network_id, provider_name, state, load_balance_service_provided, destination_physical_network_id)"
- + " VALUES (?, ?, 'InternalLbVm', 'Enabled', 1, 0)");) {
+ + " VALUES (?, ?, 'InternalLbVm', 'Enabled', 1, 0)")) {
insert_pstmt.setString(1, uuid);
insert_pstmt.setLong(2, pNtwkId);
insert_pstmt.executeUpdate();
//Add internal lb vm to the list of physical network elements
try (PreparedStatement pstmt1 =
- conn.prepareStatement("SELECT id FROM `cloud`.`physical_network_service_providers`" + " WHERE physical_network_id=? AND provider_name='InternalLbVm'");) {
+ conn.prepareStatement("SELECT id FROM `cloud`.`physical_network_service_providers`" + " WHERE physical_network_id=? AND provider_name='InternalLbVm'")) {
pstmt1.setLong(1, pNtwkId);
- try (ResultSet rs1 = pstmt1.executeQuery();) {
+ try (ResultSet rs1 = pstmt1.executeQuery()) {
while (rs1.next()) {
long providerId = rs1.getLong(1);
uuid = UUID.randomUUID().toString();
- try(PreparedStatement insert_cloud_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`virtual_router_providers` (nsp_id, uuid, type, enabled) VALUES (?, ?, 'InternalLbVm', 1)");) {
+ try(PreparedStatement insert_cloud_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`virtual_router_providers` (nsp_id, uuid, type, enabled) VALUES (?, ?, 'InternalLbVm', 1)")) {
insert_cloud_pstmt.setLong(1, providerId);
insert_cloud_pstmt.setString(2, uuid);
insert_cloud_pstmt.executeUpdate();
@@ -1291,14 +1233,14 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void addHostDetailsIndex(Connection conn) {
logger.debug("Checking if host_details index exists, if not we will add it");
- try(PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` where KEY_NAME = 'fk_host_details__host_id'");)
+ try(PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` where KEY_NAME = 'fk_host_details__host_id'"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
if (rs.next()) {
logger.debug("Index already exists on host_details - not adding new one");
} else {
// add the index
- try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)");) {
+ try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)")) {
pstmtUpdate.executeUpdate();
logger.debug("Index did not exist on host_details - added new one");
}catch (SQLException e) {
@@ -1314,15 +1256,15 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void updateNetworksForPrivateGateways(Connection conn) {
- try(PreparedStatement pstmt = conn.prepareStatement("SELECT network_id, vpc_id FROM `cloud`.`vpc_gateways` WHERE type='Private' AND removed IS null");)
+ try(PreparedStatement pstmt = conn.prepareStatement("SELECT network_id, vpc_id FROM `cloud`.`vpc_gateways` WHERE type='Private' AND removed IS null"))
{
//1) get all non removed gateways
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
- Long networkId = rs.getLong(1);
- Long vpcId = rs.getLong(2);
+ long networkId = rs.getLong(1);
+ long vpcId = rs.getLong(2);
//2) Update networks with vpc_id if its set to NULL
- try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`networks` set vpc_id=? where id=? and vpc_id is NULL and removed is NULL");) {
+ try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`networks` set vpc_id=? where id=? and vpc_id is NULL and removed is NULL")) {
update_pstmt.setLong(1, vpcId);
update_pstmt.setLong(2, networkId);
update_pstmt.executeUpdate();
@@ -1339,13 +1281,13 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void removeFirewallServiceFromSharedNetworkOfferingWithSGService(Connection conn) {
- try(PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='DefaultSharedNetworkOfferingWithSGService'");)
+ try(PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='DefaultSharedNetworkOfferingWithSGService'"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long id = rs.getLong(1);
// remove Firewall service for SG shared network offering
- try(PreparedStatement del_pstmt = conn.prepareStatement("DELETE from `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'");) {
+ try(PreparedStatement del_pstmt = conn.prepareStatement("DELETE from `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'")) {
del_pstmt.setLong(1, id);
del_pstmt.executeUpdate();
}catch (SQLException e) {
@@ -1362,9 +1304,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void fix22xKVMSnapshots(Connection conn) {
logger.debug("Updating KVM snapshots");
- try (PreparedStatement pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null");)
+ try (PreparedStatement pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long id = rs.getLong(1);
String backUpPath = rs.getString(2);
@@ -1374,8 +1316,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
int index = backUpPath.indexOf("snapshots" + File.separator);
if (index > 1) {
String correctedPath = backUpPath.substring(index);
- logger.debug("Updating Snapshot with id: " + id + " original backup path: " + backUpPath + " updated backup path: " + correctedPath);
- try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?");) {
+ logger.debug("Updating Snapshot with id: {} original backup path: {} updated backup path: {}", id, backUpPath, correctedPath);
+ try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?")) {
update_pstmt.setString(1, correctedPath);
update_pstmt.setLong(2, id);
update_pstmt.executeUpdate();
@@ -1401,8 +1343,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
try (
PreparedStatement zoneSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`");
- ResultSet zoneResults = zoneSearchStmt.executeQuery();
- ){
+ ResultSet zoneResults = zoneSearchStmt.executeQuery()
+ ){
while (zoneResults.next()) {
long zoneId = zoneResults.getLong(1);
String networkType = zoneResults.getString(2);
@@ -1433,7 +1375,6 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
// balancers added in the zone
while (f5DevicesResult.next()) {
long f5HostId = f5DevicesResult.getLong(1);
- ;
addF5ServiceProvider(conn, physicalNetworkId, zoneId);
addF5LoadBalancer(conn, f5HostId, physicalNetworkId);
}
@@ -1443,7 +1384,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
try (PreparedStatement fetchSRXNspStmt =
conn.prepareStatement("SELECT id from `cloud`.`physical_network_service_providers` where physical_network_id=" + physicalNetworkId +
" and provider_name = 'JuniperSRX'");
- ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery();) {
+ ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery()) {
hasSrxNsp = rsSRXNSP.next();
}
@@ -1477,8 +1418,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
String insertF5 =
"INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, "
+ "device_name, capacity, is_dedicated, device_state, allocation_state, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
- try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertF5);) {
- logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId);
+ try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertF5)) {
+ logger.debug("Adding F5 Big IP load balancer with host id {} in to physical network{}", hostId, physicalNetworkId);
pstmtUpdate.setLong(1, physicalNetworkId);
pstmtUpdate.setLong(2, hostId);
pstmtUpdate.setString(3, "F5BigIp");
@@ -1499,8 +1440,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
String insertSrx =
"INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, "
+ "device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)";
- try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertSrx);) {
- logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId);
+ try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertSrx)) {
+ logger.debug("Adding SRX firewall device with host id {} in to physical network{}", hostId, physicalNetworkId);
pstmtUpdate.setLong(1, physicalNetworkId);
pstmtUpdate.setLong(2, hostId);
pstmtUpdate.setString(3, "JuniperSRX");
@@ -1522,9 +1463,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
+ "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`,"
+ "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`,"
+ "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,0,0,0,1,0,0,0,0)";
- try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP);) {
+ try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP)) {
// add physical network service provider - F5BigIp
- logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId);
+ logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp in to physical network{}", physicalNetworkId);
pstmtUpdate.setString(1, UUID.randomUUID().toString());
pstmtUpdate.setLong(2, physicalNetworkId);
pstmtUpdate.setString(3, "F5BigIp");
@@ -1541,7 +1482,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
+ "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`,"
+ "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`,"
+ "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,1,1,1,0,1,1,0,0)";
- try( PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP);) {
+ try( PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP)) {
// add physical network service provider - JuniperSRX
logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX");
pstmtUpdate.setString(1, UUID.randomUUID().toString());
@@ -1563,16 +1504,15 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
// they are made in lowercase. On upgrade change the host details name to lower case
private void fixZoneUsingExternalDevices(Connection conn) {
//Get zones to upgrade
- List zoneIds = new ArrayList();
- ResultSet rs = null;
+ List zoneIds = new ArrayList<>();
long networkOfferingId, networkId;
long f5DeviceId, f5HostId;
long srxDevivceId, srxHostId;
try(PreparedStatement sel_id_pstmt =
- conn.prepareStatement("select id from `cloud`.`data_center` where lb_provider='F5BigIp' or firewall_provider='JuniperSRX' or gateway_provider='JuniperSRX'");)
+ conn.prepareStatement("select id from `cloud`.`data_center` where lb_provider='F5BigIp' or firewall_provider='JuniperSRX' or gateway_provider='JuniperSRX'"))
{
- try(ResultSet sel_id_rs = sel_id_pstmt.executeQuery();) {
+ try(ResultSet sel_id_rs = sel_id_pstmt.executeQuery()) {
while (sel_id_rs.next()) {
zoneIds.add(sel_id_rs.getLong(1));
}
@@ -1583,14 +1523,14 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e);
}
- if (zoneIds.size() == 0) {
+ if (zoneIds.isEmpty()) {
return; // no zones using F5 and SRX devices so return
}
// find the default network offering created for external devices during upgrade from 2.2.14
- try(PreparedStatement sel_id_off_pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='Isolated with external providers' ");)
+ try(PreparedStatement sel_id_off_pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='Isolated with external providers' "))
{
- try(ResultSet sel_id_off_rs = sel_id_off_pstmt.executeQuery();) {
+ try(ResultSet sel_id_off_rs = sel_id_off_pstmt.executeQuery()) {
if (sel_id_off_rs.first()) {
networkOfferingId = sel_id_off_rs.getLong(1);
} else {
@@ -1605,9 +1545,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
for (Long zoneId : zoneIds) {
try {
// find the F5 device id in the zone
- try(PreparedStatement sel_id_host_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL");) {
+ try(PreparedStatement sel_id_host_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL")) {
sel_id_host_pstmt.setLong(1, zoneId);
- try(ResultSet sel_id_host_pstmt_rs = sel_id_host_pstmt.executeQuery();) {
+ try(ResultSet sel_id_host_pstmt_rs = sel_id_host_pstmt.executeQuery()) {
if (sel_id_host_pstmt_rs.first()) {
f5HostId = sel_id_host_pstmt_rs.getLong(1);
} else {
@@ -1619,9 +1559,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}catch (SQLException e) {
throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e);
}
- try(PreparedStatement sel_id_ext_pstmt = conn.prepareStatement("SELECT id FROM external_load_balancer_devices WHERE host_id=?");) {
+ try(PreparedStatement sel_id_ext_pstmt = conn.prepareStatement("SELECT id FROM external_load_balancer_devices WHERE host_id=?")) {
sel_id_ext_pstmt.setLong(1, f5HostId);
- try(ResultSet sel_id_ext_rs = sel_id_ext_pstmt.executeQuery();) {
+ try(ResultSet sel_id_ext_rs = sel_id_ext_pstmt.executeQuery()) {
if (sel_id_ext_rs.first()) {
f5DeviceId = sel_id_ext_rs.getLong(1);
} else {
@@ -1636,9 +1576,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
// find the SRX device id in the zone
- try(PreparedStatement sel_id_hostdc_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL");) {
+ try(PreparedStatement sel_id_hostdc_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL")) {
sel_id_hostdc_pstmt.setLong(1, zoneId);
- try(ResultSet sel_id_hostdc_pstmt_rs = sel_id_hostdc_pstmt.executeQuery();) {
+ try(ResultSet sel_id_hostdc_pstmt_rs = sel_id_hostdc_pstmt.executeQuery()) {
if (sel_id_hostdc_pstmt_rs.first()) {
srxHostId = sel_id_hostdc_pstmt_rs.getLong(1);
} else {
@@ -1651,9 +1591,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e);
}
- try(PreparedStatement sel_id_ext_frwl_pstmt = conn.prepareStatement("SELECT id FROM external_firewall_devices WHERE host_id=?");) {
+ try(PreparedStatement sel_id_ext_frwl_pstmt = conn.prepareStatement("SELECT id FROM external_firewall_devices WHERE host_id=?")) {
sel_id_ext_frwl_pstmt.setLong(1, srxHostId);
- try(ResultSet sel_id_ext_frwl_pstmt_rs = sel_id_ext_frwl_pstmt.executeQuery();) {
+ try(ResultSet sel_id_ext_frwl_pstmt_rs = sel_id_ext_frwl_pstmt.executeQuery()) {
if (sel_id_ext_frwl_pstmt_rs.first()) {
srxDevivceId = sel_id_ext_frwl_pstmt_rs.getLong(1);
} else {
@@ -1669,10 +1609,10 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
// check if network any uses F5 or SRX devices in the zone
try(PreparedStatement sel_id_cloud_pstmt =
- conn.prepareStatement("select id from `cloud`.`networks` where guest_type='Virtual' and data_center_id=? and network_offering_id=? and removed IS NULL");) {
+ conn.prepareStatement("select id from `cloud`.`networks` where guest_type='Virtual' and data_center_id=? and network_offering_id=? and removed IS NULL")) {
sel_id_cloud_pstmt.setLong(1, zoneId);
sel_id_cloud_pstmt.setLong(2, networkOfferingId);
- try(ResultSet sel_id_cloud_pstmt_rs = sel_id_cloud_pstmt.executeQuery();) {
+ try(ResultSet sel_id_cloud_pstmt_rs = sel_id_cloud_pstmt.executeQuery()) {
while (sel_id_cloud_pstmt_rs.next()) {
// get the network Id
networkId = sel_id_cloud_pstmt_rs.getLong(1);
@@ -1680,7 +1620,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
// add mapping for the network in network_external_lb_device_map
String insertLbMapping =
"INSERT INTO `cloud`.`network_external_lb_device_map` (uuid, network_id, external_load_balancer_device_id, created) VALUES ( ?, ?, ?, now())";
- try (PreparedStatement insert_lb_stmt = conn.prepareStatement(insertLbMapping);) {
+ try (PreparedStatement insert_lb_stmt = conn.prepareStatement(insertLbMapping)) {
insert_lb_stmt.setString(1, UUID.randomUUID().toString());
insert_lb_stmt.setLong(2, networkId);
insert_lb_stmt.setLong(3, f5DeviceId);
@@ -1688,12 +1628,12 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
} catch (SQLException e) {
throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e);
}
- logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId);
+ logger.debug("Successfully added entry in network_external_lb_device_map for network {} and F5 device ID {}", networkId, f5DeviceId);
// add mapping for the network in network_external_firewall_device_map
String insertFwMapping =
"INSERT INTO `cloud`.`network_external_firewall_device_map` (uuid, network_id, external_firewall_device_id, created) VALUES ( ?, ?, ?, now())";
- try (PreparedStatement insert_ext_firewall_stmt = conn.prepareStatement(insertFwMapping);) {
+ try (PreparedStatement insert_ext_firewall_stmt = conn.prepareStatement(insertFwMapping)) {
insert_ext_firewall_stmt.setString(1, UUID.randomUUID().toString());
insert_ext_firewall_stmt.setLong(2, networkId);
insert_ext_firewall_stmt.setLong(3, srxDevivceId);
@@ -1701,7 +1641,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
} catch (SQLException e) {
throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e);
}
- logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId);
+ logger.debug("Successfully added entry in network_external_firewall_device_map for network {} and SRX device ID {}", networkId, srxDevivceId);
}
}catch (SQLException e) {
throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e);
@@ -1711,10 +1651,10 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
// update host details for F5 and SRX devices
logger.debug("Updating the host details for F5 and SRX devices");
- try(PreparedStatement sel_pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?");) {
+ try(PreparedStatement sel_pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?")) {
sel_pstmt.setLong(1, f5HostId);
sel_pstmt.setLong(2, srxHostId);
- try(ResultSet sel_rs = sel_pstmt.executeQuery();) {
+ try(ResultSet sel_rs = sel_pstmt.executeQuery()) {
while (sel_rs.next()) {
long hostId = sel_rs.getLong(1);
String camlCaseName = sel_rs.getString(2);
@@ -1723,7 +1663,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
continue;
}
String lowerCaseName = camlCaseName.toLowerCase();
- try (PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`host_details` set name=? where host_id=? AND name=?");) {
+ try (PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`host_details` set name=? where host_id=? AND name=?")) {
update_pstmt.setString(1, lowerCaseName);
update_pstmt.setLong(2, hostId);
update_pstmt.setString(3, camlCaseName);
@@ -1750,7 +1690,6 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void migrateSecondaryStorageToImageStore(Connection conn) {
String sqlSelectS3Count = "select count(*) from `cloud`.`s3`";
String sqlSelectSwiftCount = "select count(*) from `cloud`.`swift`";
- String sqlInsertStoreDetail = "INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)";
String sqlUpdateHostAsRemoved = "UPDATE `cloud`.`host` SET removed = now() WHERE type = 'SecondaryStorage' and removed is null";
logger.debug("Migrating secondary storage to image store");
@@ -1758,7 +1697,6 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
try (
PreparedStatement pstmtSelectS3Count = conn.prepareStatement(sqlSelectS3Count);
PreparedStatement pstmtSelectSwiftCount = conn.prepareStatement(sqlSelectSwiftCount);
- PreparedStatement storeDetailInsert = conn.prepareStatement(sqlInsertStoreDetail);
PreparedStatement storeInsert =
conn.prepareStatement("INSERT INTO `cloud`.`image_store` (id, uuid, name, image_provider_name, protocol, url, data_center_id, scope, role, parent, total_size, created, removed) values(?, ?, ?, 'NFS', 'nfs', ?, ?, 'ZONE', ?, ?, ?, ?, ?)");
PreparedStatement nfsQuery =
@@ -1766,8 +1704,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
PreparedStatement pstmtUpdateHostAsRemoved = conn.prepareStatement(sqlUpdateHostAsRemoved);
ResultSet rsSelectS3Count = pstmtSelectS3Count.executeQuery();
ResultSet rsSelectSwiftCount = pstmtSelectSwiftCount.executeQuery();
- ResultSet rsNfs = nfsQuery.executeQuery();
- ) {
+ ResultSet rsNfs = nfsQuery.executeQuery()
+ ) {
logger.debug("Checking if we need to migrate NFS secondary storage to image store or staging store");
int numRows = 0;
if (rsSelectS3Count.next()) {
@@ -1786,11 +1724,11 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
store_role = "ImageCache";
}
- logger.debug("Migrating NFS secondary storage to " + store_role + " store");
+ logger.debug("Migrating NFS secondary storage to {} store", store_role);
// migrate NFS secondary storage, for nfs, keep previous host_id as the store_id
while (rsNfs.next()) {
- Long nfs_id = rsNfs.getLong("id");
+ long nfs_id = rsNfs.getLong("id");
String nfs_uuid = rsNfs.getString("uuid");
String nfs_url = rsNfs.getString("url");
String nfs_parent = rsNfs.getString("parent");
@@ -1832,19 +1770,19 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void migrateVolumeHostRef(Connection conn) {
logger.debug("Updating volume_store_ref table from volume_host_ref table");
try(PreparedStatement volStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`volume_store_ref` (store_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, update_count, ref_cnt, state) select host_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, 0, 0, 'Allocated' from `cloud`.`volume_host_ref`");)
+ conn.prepareStatement("INSERT INTO `cloud`.`volume_store_ref` (store_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, update_count, ref_cnt, state) select host_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, 0, 0, 'Allocated' from `cloud`.`volume_host_ref`"))
{
int rowCount = volStoreInsert.executeUpdate();
- logger.debug("Insert modified " + rowCount + " rows");
- try(PreparedStatement volStoreUpdate = conn.prepareStatement("update `cloud`.`volume_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'");) {
+ logger.debug(INSERT_MODIFIED_ROWS, rowCount);
+ try(PreparedStatement volStoreUpdate = conn.prepareStatement("update `cloud`.`volume_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'")) {
rowCount = volStoreUpdate.executeUpdate();
- logger.debug("Update modified " + rowCount + " rows");
+ logger.debug(UPDATE_MODIFIED_ROWS, rowCount);
}catch (SQLException e) {
- logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate volume_host_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate volume_host_ref." + e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate volume_host_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate volume_host_ref." + e.getMessage(),e);
}
logger.debug("Completed updating volume_store_ref table from volume_host_ref table");
@@ -1854,20 +1792,20 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void migrateTemplateHostRef(Connection conn) {
logger.debug("Updating template_store_ref table from template_host_ref table");
try (PreparedStatement tmplStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, update_count, ref_cnt, store_role, state) select host_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, 0, 0, 'Image', 'Allocated' from `cloud`.`template_host_ref`");)
+ conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, update_count, ref_cnt, store_role, state) select host_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, 0, 0, 'Image', 'Allocated' from `cloud`.`template_host_ref`"))
{
int rowCount = tmplStoreInsert.executeUpdate();
- logger.debug("Insert modified " + rowCount + " rows");
+ logger.debug(INSERT_MODIFIED_ROWS, rowCount);
- try(PreparedStatement tmplStoreUpdate = conn.prepareStatement("update `cloud`.`template_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'");) {
+ try(PreparedStatement tmplStoreUpdate = conn.prepareStatement("update `cloud`.`template_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'")) {
rowCount = tmplStoreUpdate.executeUpdate();
}catch (SQLException e) {
- logger.error("Unable to migrate template_host_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_host_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_host_ref." + e.getMessage(), e);
}
- logger.debug("Update modified " + rowCount + " rows");
+ logger.debug(UPDATE_MODIFIED_ROWS, rowCount);
} catch (SQLException e) {
- logger.error("Unable to migrate template_host_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_host_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_host_ref." + e.getMessage(), e);
}
logger.debug("Completed updating template_store_ref table from template_host_ref table");
@@ -1877,22 +1815,22 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void migrateSnapshotStoreRef(Connection conn) {
logger.debug("Updating snapshot_store_ref table from snapshots table");
try(PreparedStatement snapshotStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and sechost_id is not null and removed is null");
+ conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and sechost_id is not null and removed is null")
) {
//Update all snapshots except KVM snapshots
int rowCount = snapshotStoreInsert.executeUpdate();
- logger.debug("Inserted " + rowCount + " snapshots into snapshot_store_ref");
+ logger.debug("Inserted {} snapshots into snapshot_store_ref", rowCount);
//backsnap_id for KVM snapshots is complete path. CONCAT is not required
try(PreparedStatement snapshotStoreInsert_2 =
- conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, backup_snap_id, volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type = 'KVM' and sechost_id is not null and removed is null");) {
+ conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, backup_snap_id, volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type = 'KVM' and sechost_id is not null and removed is null")) {
rowCount = snapshotStoreInsert_2.executeUpdate();
- logger.debug("Inserted " + rowCount + " KVM snapshots into snapshot_store_ref");
+ logger.debug("Inserted {} KVM snapshots into snapshot_store_ref", rowCount);
}catch (SQLException e) {
- logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate snapshot_store_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate snapshot_store_ref." + e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate snapshot_store_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate snapshot_store_ref." + e.getMessage(),e);
}
logger.debug("Completed updating snapshot_store_ref table from snapshots table");
@@ -1901,7 +1839,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
// migrate secondary storages S3 from s3 tables to image_store table
private void migrateS3ToImageStore(Connection conn) {
Long storeId = null;
- Map s3_store_id_map = new HashMap();
+ Map s3_store_id_map = new HashMap<>();
logger.debug("Migrating S3 to image store");
try (
@@ -1913,8 +1851,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
"values(?, ?, 'S3', ?, 'REGION', 'Image', ?)");
PreparedStatement s3Query = conn.prepareStatement("select id, uuid, access_key, secret_key, end_point, bucket, https, connection_timeout, " +
"max_error_retry, socket_timeout, created from `cloud`.`s3`");
- ResultSet rs = s3Query.executeQuery();
- ) {
+ ResultSet rs = s3Query.executeQuery()
+ ) {
while (rs.next()) {
Long s3_id = rs.getLong("id");
@@ -1923,7 +1861,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
String s3_secretkey = rs.getString("secret_key");
String s3_endpoint = rs.getString("end_point");
String s3_bucket = rs.getString("bucket");
- boolean s3_https = rs.getObject("https") != null ? (rs.getInt("https") == 0 ? false : true) : false;
+ boolean s3_https = rs.getObject("https") != null && (rs.getInt("https") != 0);
Integer s3_connectiontimeout = rs.getObject("connection_timeout") != null ? rs.getInt("connection_timeout") : null;
Integer s3_retry = rs.getObject("max_error_retry") != null ? rs.getInt("max_error_retry") : null;
Integer s3_sockettimeout = rs.getObject("socket_timeout") != null ? rs.getInt("socket_timeout") : null;
@@ -1939,13 +1877,13 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
storeInsert.executeUpdate();
storeQuery.setString(1, s3_uuid);
- try (ResultSet storeInfo = storeQuery.executeQuery();) {
+ try (ResultSet storeInfo = storeQuery.executeQuery()) {
if (storeInfo.next()) {
storeId = storeInfo.getLong("id");
}
}
- Map detailMap = new HashMap();
+ Map detailMap = new HashMap<>();
detailMap.put(ApiConstants.S3_ACCESS_KEY, s3_accesskey);
detailMap.put(ApiConstants.S3_SECRET_KEY, s3_secretkey);
detailMap.put(ApiConstants.S3_BUCKET_NAME, s3_bucket);
@@ -1961,9 +1899,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
detailMap.put(ApiConstants.S3_SOCKET_TIMEOUT, String.valueOf(s3_sockettimeout));
}
- Iterator keyIt = detailMap.keySet().iterator();
- while (keyIt.hasNext()) {
- String key = keyIt.next();
+ for (String key : detailMap.keySet()) {
String val = detailMap.get(key);
storeDetailInsert.setLong(1, storeId);
storeDetailInsert.setString(2, key);
@@ -1991,18 +1927,18 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) {
logger.debug("Updating template_store_ref table from template_s3_ref table");
try(PreparedStatement tmplStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')");
+ conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')")
) {
try(PreparedStatement s3Query =
- conn.prepareStatement("select template_s3_ref.s3_id, template_s3_ref.template_id, template_s3_ref.created, template_s3_ref.size, template_s3_ref.physical_size, vm_template.account_id from `cloud`.`template_s3_ref`, `cloud`.`vm_template` where vm_template.id = template_s3_ref.template_id");) {
- try(ResultSet rs = s3Query.executeQuery();) {
+ conn.prepareStatement("select template_s3_ref.s3_id, template_s3_ref.template_id, template_s3_ref.created, template_s3_ref.size, template_s3_ref.physical_size, vm_template.account_id from `cloud`.`template_s3_ref`, `cloud`.`vm_template` where vm_template.id = template_s3_ref.template_id")) {
+ try(ResultSet rs = s3Query.executeQuery()) {
while (rs.next()) {
Long s3_id = rs.getLong("s3_id");
- Long s3_tmpl_id = rs.getLong("template_id");
+ long s3_tmpl_id = rs.getLong("template_id");
Date s3_created = rs.getDate("created");
Long s3_size = rs.getObject("size") != null ? rs.getLong("size") : null;
Long s3_psize = rs.getObject("physical_size") != null ? rs.getLong("physical_size") : null;
- Long account_id = rs.getLong("account_id");
+ long account_id = rs.getLong("account_id");
tmplStoreInsert.setLong(1, s3StoreMap.get(s3_id));
tmplStoreInsert.setLong(2, s3_tmpl_id);
tmplStoreInsert.setDate(3, s3_created);
@@ -2022,15 +1958,15 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
tmplStoreInsert.executeUpdate();
}
}catch (SQLException e) {
- logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e);
}
}catch (SQLException e) {
- logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e);
}
logger.debug("Completed migrating template_s3_ref table.");
@@ -2040,19 +1976,19 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) {
logger.debug("Updating snapshot_store_ref table from snapshots table for s3");
try(PreparedStatement snapshotStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')");
+ conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')")
) {
try(PreparedStatement s3Query =
- conn.prepareStatement("select s3_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and s3_id is not null and removed is null");) {
- try(ResultSet rs = s3Query.executeQuery();) {
+ conn.prepareStatement("select s3_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and s3_id is not null and removed is null")) {
+ try(ResultSet rs = s3Query.executeQuery()) {
while (rs.next()) {
Long s3_id = rs.getLong("s3_id");
- Long snapshot_id = rs.getLong("id");
+ long snapshot_id = rs.getLong("id");
Date s3_created = rs.getDate("created");
Long s3_size = rs.getObject("size") != null ? rs.getLong("size") : null;
Long s3_prev_id = rs.getObject("prev_snap_id") != null ? rs.getLong("prev_snap_id") : null;
String install_path = rs.getString(6);
- Long s3_vol_id = rs.getLong("volume_id");
+ long s3_vol_id = rs.getLong("volume_id");
snapshotStoreInsert.setLong(1, s3StoreMap.get(s3_id));
snapshotStoreInsert.setLong(2, snapshot_id);
@@ -2072,15 +2008,15 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
snapshotStoreInsert.executeUpdate();
}
}catch (SQLException e) {
- logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotS3Ref:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e);
}
}catch (SQLException e) {
- logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotS3Ref:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage());
+ logger.error("Unable to migrate s3 backedup snapshots to snapshot_store_ref.{}", e.getMessage());
throw new CloudRuntimeException("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage(), e);
}
logger.debug("Completed updating snapshot_store_ref table from s3 snapshots entries");
@@ -2089,7 +2025,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
// migrate secondary storages Swift from swift tables to image_store table
private void migrateSwiftToImageStore(Connection conn) {
Long storeId = null;
- Map swift_store_id_map = new HashMap();
+ Map swift_store_id_map = new HashMap<>();
logger.debug("Migrating Swift to image store");
try (
@@ -2100,8 +2036,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
PreparedStatement storeInsert =
conn.prepareStatement("INSERT INTO `cloud`.`image_store` (uuid, name, image_provider_name, protocol, url, scope, role, created) values(?, ?, 'Swift', 'http', ?, 'REGION', 'Image', ?)");
PreparedStatement swiftQuery = conn.prepareStatement("select id, uuid, url, account, username, swift.key, created from `cloud`.`swift`");
- ResultSet rs = swiftQuery.executeQuery();
- ) {
+ ResultSet rs = swiftQuery.executeQuery()
+ ) {
while (rs.next()) {
Long swift_id = rs.getLong("id");
String swift_uuid = rs.getString("uuid");
@@ -2120,20 +2056,18 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
storeInsert.executeUpdate();
storeQuery.setString(1, swift_uuid);
- try (ResultSet storeInfo = storeQuery.executeQuery();) {
+ try (ResultSet storeInfo = storeQuery.executeQuery()) {
if (storeInfo.next()) {
storeId = storeInfo.getLong("id");
}
}
- Map detailMap = new HashMap();
+ Map detailMap = new HashMap<>();
detailMap.put(ApiConstants.ACCOUNT, swift_account);
detailMap.put(ApiConstants.USERNAME, swift_username);
detailMap.put(ApiConstants.KEY, swift_key);
- Iterator keyIt = detailMap.keySet().iterator();
- while (keyIt.hasNext()) {
- String key = keyIt.next();
+ for (String key : detailMap.keySet()) {
String val = detailMap.get(key);
storeDetailInsert.setLong(1, storeId);
storeDetailInsert.setString(2, key);
@@ -2164,11 +2098,11 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
PreparedStatement tmplStoreInsert =
conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')");
PreparedStatement s3Query = conn.prepareStatement("select swift_id, template_id, created, path, size, physical_size from `cloud`.`template_swift_ref`");
- ResultSet rs = s3Query.executeQuery();
- ) {
+ ResultSet rs = s3Query.executeQuery()
+ ) {
while (rs.next()) {
Long swift_id = rs.getLong("swift_id");
- Long tmpl_id = rs.getLong("template_id");
+ long tmpl_id = rs.getLong("template_id");
Date created = rs.getDate("created");
String path = rs.getString("path");
Long size = rs.getObject("size") != null ? rs.getLong("size") : null;
@@ -2203,19 +2137,19 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void migrateSnapshotSwiftRef(Connection conn, Map swiftStoreMap) {
logger.debug("Updating snapshot_store_ref table from snapshots table for swift");
try (PreparedStatement snapshotStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')");
+ conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')")
){
try(PreparedStatement s3Query =
- conn.prepareStatement("select swift_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and swift_id is not null and removed is null");) {
- try(ResultSet rs = s3Query.executeQuery();) {
+ conn.prepareStatement("select swift_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and swift_id is not null and removed is null")) {
+ try(ResultSet rs = s3Query.executeQuery()) {
while (rs.next()) {
Long swift_id = rs.getLong("swift_id");
- Long snapshot_id = rs.getLong("id");
+ long snapshot_id = rs.getLong("id");
Date created = rs.getDate("created");
- Long size = rs.getLong("size");
- Long prev_id = rs.getLong("prev_snap_id");
+ long size = rs.getLong("size");
+ long prev_id = rs.getLong("prev_snap_id");
String install_path = rs.getString(6);
- Long vol_id = rs.getLong("volume_id");
+ long vol_id = rs.getLong("volume_id");
snapshotStoreInsert.setLong(1, swiftStoreMap.get(swift_id));
snapshotStoreInsert.setLong(2, snapshot_id);
@@ -2227,15 +2161,15 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
snapshotStoreInsert.executeUpdate();
}
}catch (SQLException e) {
- logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
}
}catch (SQLException e) {
- logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
}
logger.debug("Completed updating snapshot_store_ref table from swift snapshots entries");
@@ -2243,12 +2177,12 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void fixNiciraKeys(Connection conn) {
//First drop the key if it exists.
- List keys = new ArrayList();
+ List keys = new ArrayList<>();
logger.debug("Dropping foreign key fk_nicira_nvp_nic_map__nic from the table nicira_nvp_nic_map if it exists");
keys.add("fk_nicira_nvp_nic_map__nic");
DbUpgradeUtils.dropKeysIfExist(conn, "nicira_nvp_nic_map", keys, true);
//Now add foreign key.
- try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`nicira_nvp_nic_map` ADD CONSTRAINT `fk_nicira_nvp_nic_map__nic` FOREIGN KEY (`nic`) REFERENCES `nics` (`uuid`) ON DELETE CASCADE");)
+ try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`nicira_nvp_nic_map` ADD CONSTRAINT `fk_nicira_nvp_nic_map__nic` FOREIGN KEY (`nic`) REFERENCES `nics` (`uuid`) ON DELETE CASCADE"))
{
pstmt.executeUpdate();
logger.debug("Added foreign key fk_nicira_nvp_nic_map__nic to the table nicira_nvp_nic_map");
@@ -2259,13 +2193,13 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void fixRouterKeys(Connection conn) {
//First drop the key if it exists.
- List keys = new ArrayList();
+ List keys = new ArrayList<>();
logger.debug("Dropping foreign key fk_router_network_ref__router_id from the table router_network_ref if it exists");
keys.add("fk_router_network_ref__router_id");
DbUpgradeUtils.dropKeysIfExist(conn, "router_network_ref", keys, true);
//Now add foreign key.
try (PreparedStatement pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`router_network_ref` ADD CONSTRAINT `fk_router_network_ref__router_id` FOREIGN KEY (`router_id`) REFERENCES `domain_router` (`id`) ON DELETE CASCADE");)
+ conn.prepareStatement("ALTER TABLE `cloud`.`router_network_ref` ADD CONSTRAINT `fk_router_network_ref__router_id` FOREIGN KEY (`router_id`) REFERENCES `domain_router` (`id`) ON DELETE CASCADE"))
{
pstmt.executeUpdate();
logger.debug("Added foreign key fk_router_network_ref__router_id to the table router_network_ref");
@@ -2276,8 +2210,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void encryptSite2SitePSK(Connection conn) {
logger.debug("Encrypting Site2Site Customer Gateway pre-shared key");
- try (PreparedStatement select_pstmt = conn.prepareStatement("select id, ipsec_psk from `cloud`.`s2s_customer_gateway`");){
- try(ResultSet rs = select_pstmt.executeQuery();)
+ try (PreparedStatement select_pstmt = conn.prepareStatement("select id, ipsec_psk from `cloud`.`s2s_customer_gateway`")){
+ try(ResultSet rs = select_pstmt.executeQuery())
{
while (rs.next()) {
long id = rs.getLong(1);
@@ -2286,7 +2220,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
continue;
}
String encryptedValue = DBEncryptionUtil.encrypt(value);
- try(PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`s2s_customer_gateway` set ipsec_psk=? where id=?");) {
+ try(PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`s2s_customer_gateway` set ipsec_psk=? where id=?")) {
update_pstmt.setBytes(1, encryptedValue.getBytes("UTF-8"));
update_pstmt.setLong(2, id);
update_pstmt.executeUpdate();
@@ -2297,9 +2231,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}catch (SQLException e) {
throw new CloudRuntimeException("encryptSite2SitePSK:Exception:"+e.getMessage(), e);
}
- } catch (SQLException e) {
- throw new CloudRuntimeException("Unable to encrypt Site2Site Customer Gateway pre-shared key ", e);
- } catch (UnsupportedEncodingException e) {
+ } catch (SQLException | UnsupportedEncodingException e) {
throw new CloudRuntimeException("Unable to encrypt Site2Site Customer Gateway pre-shared key ", e);
}
logger.debug("Done encrypting Site2Site Customer Gateway pre-shared key");
@@ -2308,12 +2240,12 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
protected void updateConcurrentConnectionsInNetworkOfferings(Connection conn) {
try {
try (PreparedStatement sel_pstmt =
- conn.prepareStatement("SELECT * FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = 'cloud' AND TABLE_NAME = 'network_offerings' AND COLUMN_NAME = 'concurrent_connections'");)
+ conn.prepareStatement("SELECT * FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = 'cloud' AND TABLE_NAME = 'network_offerings' AND COLUMN_NAME = 'concurrent_connections'"))
{
- try(ResultSet rs = sel_pstmt.executeQuery();) {
+ try(ResultSet rs = sel_pstmt.executeQuery()) {
if (!rs.next()) {
try(PreparedStatement alter_pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `concurrent_connections` int(10) unsigned COMMENT 'Load Balancer(haproxy) maximum number of concurrent connections(global max)'");) {
+ conn.prepareStatement("ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `concurrent_connections` int(10) unsigned COMMENT 'Load Balancer(haproxy) maximum number of concurrent connections(global max)'")) {
alter_pstmt.executeUpdate();
}catch (SQLException e) {
throw new CloudRuntimeException("migration of concurrent connections from network_details failed");
@@ -2325,23 +2257,23 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
} catch (SQLException e) {
throw new CloudRuntimeException("migration of concurrent connections from network_details failed");
}
- try(PreparedStatement sel_net_pstmt = conn.prepareStatement("select network_id, value from `cloud`.`network_details` where name='maxconnections'");)
+ try(PreparedStatement sel_net_pstmt = conn.prepareStatement("select network_id, value from `cloud`.`network_details` where name='maxconnections'"))
{
- try(ResultSet rs = sel_net_pstmt.executeQuery();) {
+ try(ResultSet rs = sel_net_pstmt.executeQuery()) {
while (rs.next()) {
long networkId = rs.getLong(1);
int maxconnections = Integer.parseInt(rs.getString(2));
- try(PreparedStatement sel_net_off_pstmt = conn.prepareStatement("select network_offering_id from `cloud`.`networks` where id= ?");) {
+ try(PreparedStatement sel_net_off_pstmt = conn.prepareStatement("select network_offering_id from `cloud`.`networks` where id= ?")) {
sel_net_off_pstmt.setLong(1, networkId);
- try(ResultSet rs1 = sel_net_off_pstmt.executeQuery();) {
+ try(ResultSet rs1 = sel_net_off_pstmt.executeQuery()) {
if (rs1.next()) {
long network_offering_id = rs1.getLong(1);
- try(PreparedStatement pstmt = conn.prepareStatement("select concurrent_connections from `cloud`.`network_offerings` where id= ?");)
+ try(PreparedStatement pstmt = conn.prepareStatement("select concurrent_connections from `cloud`.`network_offerings` where id= ?"))
{
pstmt.setLong(1, network_offering_id);
- try(ResultSet rs2 = pstmt.executeQuery();) {
+ try(ResultSet rs2 = pstmt.executeQuery()) {
if ((!rs2.next()) || (rs2.getInt(1) < maxconnections)) {
- try(PreparedStatement update_net_pstmt = conn.prepareStatement("update network_offerings set concurrent_connections=? where id=?");)
+ try(PreparedStatement update_net_pstmt = conn.prepareStatement("update network_offerings set concurrent_connections=? where id=?"))
{
update_net_pstmt.setInt(1, maxconnections);
update_net_pstmt.setLong(2, network_offering_id);
@@ -2376,24 +2308,24 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
private void migrateDatafromIsoIdInVolumesTable(Connection conn) {
- try(PreparedStatement pstmt = conn.prepareStatement("SELECT iso_id1 From `cloud`.`volumes`");)
+ try(PreparedStatement pstmt = conn.prepareStatement("SELECT iso_id1 From `cloud`.`volumes`"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
if (rs.next()) {
- try(PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`volumes` DROP COLUMN `iso_id`");) {
+ try(PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`volumes` DROP COLUMN `iso_id`")) {
alter_pstmt.executeUpdate();
try(PreparedStatement alter_iso_pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`volumes` CHANGE COLUMN `iso_id1` `iso_id` bigint(20) unsigned COMMENT 'The id of the iso from which the volume was created'");) {
+ conn.prepareStatement("ALTER TABLE `cloud`.`volumes` CHANGE COLUMN `iso_id1` `iso_id` bigint(20) unsigned COMMENT 'The id of the iso from which the volume was created'")) {
alter_iso_pstmt.executeUpdate();
}catch (SQLException e) {
- logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage());
+ logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: {}", e.getMessage());
if (logger.isTraceEnabled()) {
logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e);
}
//implies iso_id1 is not present, so do nothing.
}
}catch (SQLException e) {
- logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage());
+ logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: {}", e.getMessage());
if (logger.isTraceEnabled()) {
logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e);
}
@@ -2401,14 +2333,14 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
}
}catch (SQLException e) {
- logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage());
+ logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: {}", e.getMessage());
if (logger.isTraceEnabled()) {
logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e);
}
//implies iso_id1 is not present, so do nothing.
}
} catch (SQLException e) {
- logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage());
+ logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: {}", e.getMessage());
if (logger.isTraceEnabled()) {
logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e);
}
@@ -2417,7 +2349,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
protected void setRAWformatForRBDVolumes(Connection conn) {
- try(PreparedStatement pstmt = conn.prepareStatement("UPDATE volumes SET format = 'RAW' WHERE pool_id IN(SELECT id FROM storage_pool WHERE pool_type = 'RBD')");)
+ try(PreparedStatement pstmt = conn.prepareStatement("UPDATE volumes SET format = 'RAW' WHERE pool_id IN(SELECT id FROM storage_pool WHERE pool_type = 'RBD')"))
{
logger.debug("Setting format to RAW for all volumes on RBD primary storage pools");
pstmt.executeUpdate();
@@ -2428,23 +2360,23 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private void upgradeVpcServiceMap(Connection conn) {
logger.debug("Upgrading VPC service Map");
- try(PreparedStatement listVpc = conn.prepareStatement("SELECT id, vpc_offering_id FROM `cloud`.`vpc` where removed is NULL");)
+ try(PreparedStatement listVpc = conn.prepareStatement("SELECT id, vpc_offering_id FROM `cloud`.`vpc` where removed is NULL"))
{
//Get all vpc Ids along with vpc offering Id
- try(ResultSet rs = listVpc.executeQuery();) {
+ try(ResultSet rs = listVpc.executeQuery()) {
while (rs.next()) {
long vpc_id = rs.getLong(1);
long offering_id = rs.getLong(2);
//list all services and providers in offering
- try(PreparedStatement listServiceProviders = conn.prepareStatement("SELECT service, provider FROM `cloud`.`vpc_offering_service_map` where vpc_offering_id = ?");) {
+ try(PreparedStatement listServiceProviders = conn.prepareStatement("SELECT service, provider FROM `cloud`.`vpc_offering_service_map` where vpc_offering_id = ?")) {
listServiceProviders.setLong(1, offering_id);
- try(ResultSet rs1 = listServiceProviders.executeQuery();) {
+ try(ResultSet rs1 = listServiceProviders.executeQuery()) {
//Insert entries in vpc_service_map
while (rs1.next()) {
String service = rs1.getString(1);
String provider = rs1.getString(2);
try (PreparedStatement insertProviders =
- conn.prepareStatement("INSERT INTO `cloud`.`vpc_service_map` (`vpc_id`, `service`, `provider`, `created`) VALUES (?, ?, ?, now());");) {
+ conn.prepareStatement("INSERT INTO `cloud`.`vpc_service_map` (`vpc_id`, `service`, `provider`, `created`) VALUES (?, ?, ?, now());")) {
insertProviders.setLong(1, vpc_id);
insertProviders.setString(2, service);
insertProviders.setString(3, provider);
@@ -2459,7 +2391,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}catch (SQLException e) {
throw new CloudRuntimeException("Error during VPC service map upgrade", e);
}
- logger.debug("Upgraded service map for VPC: " + vpc_id);
+ logger.debug("Upgraded service map for VPC: {}", vpc_id);
}
}
} catch (SQLException e) {
@@ -2471,8 +2403,8 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
logger.debug("upgradeResourceCount start");
try(
PreparedStatement sel_dom_pstmt = conn.prepareStatement("select id, domain_id FROM `cloud`.`account` where removed is NULL ");
- ResultSet rsAccount = sel_dom_pstmt.executeQuery();
- ) {
+ ResultSet rsAccount = sel_dom_pstmt.executeQuery()
+ ) {
while (rsAccount.next()) {
long account_id = rsAccount.getLong(1);
long domain_id = rsAccount.getLong(2);
@@ -2480,9 +2412,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
try(PreparedStatement sel_sum_pstmt =
conn.prepareStatement("SELECT SUM(service_offering.cpu), SUM(service_offering.ram_size)" + " FROM `cloud`.`vm_instance`, `cloud`.`service_offering`"
+ " WHERE vm_instance.service_offering_id = service_offering.id AND vm_instance.account_id = ?" + " AND vm_instance.removed is NULL"
- + " AND vm_instance.vm_type='User' AND state not in ('Destroyed', 'Error', 'Expunging')");) {
+ + " AND vm_instance.vm_type='User' AND state not in ('Destroyed', 'Error', 'Expunging')")) {
sel_sum_pstmt.setLong(1, account_id);
- try(ResultSet sel_sum_pstmt_res = sel_sum_pstmt.executeQuery();) {
+ try(ResultSet sel_sum_pstmt_res = sel_sum_pstmt.executeQuery()) {
if (sel_sum_pstmt_res.next()) {
upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", sel_sum_pstmt_res.getLong(1));
upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", sel_sum_pstmt_res.getLong(2));
@@ -2494,9 +2426,9 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
try(PreparedStatement sel_cloud_vol_pstmt =
conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?"
+ " AND (path is not NULL OR state in ('Allocated')) AND removed is NULL"
- + " AND instance_id IN (SELECT id FROM `cloud`.`vm_instance` WHERE vm_type='User')");) {
+ + " AND instance_id IN (SELECT id FROM `cloud`.`vm_instance` WHERE vm_type='User')")) {
sel_cloud_vol_pstmt.setLong(1, account_id);
- try(ResultSet sel_cloud_vol_count = sel_cloud_vol_pstmt.executeQuery();) {
+ try(ResultSet sel_cloud_vol_count = sel_cloud_vol_pstmt.executeQuery()) {
if (sel_cloud_vol_count.next()) {
upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", sel_cloud_vol_count.getLong(1));
} else {
@@ -2514,24 +2446,24 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
long totalTemplatesSize = 0;
try(PreparedStatement sel_cloud_vol_alloc_pstmt =
conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?"
- + " AND path is NULL AND state not in ('Allocated') AND removed is NULL");) {
+ + " AND path is NULL AND state not in ('Allocated') AND removed is NULL")) {
sel_cloud_vol_alloc_pstmt.setLong(1, account_id);
- try(ResultSet sel_cloud_vol_res = sel_cloud_vol_alloc_pstmt.executeQuery();) {
+ try(ResultSet sel_cloud_vol_res = sel_cloud_vol_alloc_pstmt.executeQuery()) {
if (sel_cloud_vol_res.next()) {
totalVolumesSize = sel_cloud_vol_res.getLong(1);
}
- try(PreparedStatement sel_cloud_snapshot_pstmt = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`snapshots` WHERE account_id= ? AND removed is NULL");)
+ try(PreparedStatement sel_cloud_snapshot_pstmt = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`snapshots` WHERE account_id= ? AND removed is NULL"))
{
sel_cloud_snapshot_pstmt.setLong(1, account_id);
- try(ResultSet sel_cloud_snapshot_res = sel_cloud_snapshot_pstmt.executeQuery();) {
+ try(ResultSet sel_cloud_snapshot_res = sel_cloud_snapshot_pstmt.executeQuery()) {
if (sel_cloud_snapshot_res.next()) {
totalSnapshotsSize = sel_cloud_snapshot_res.getLong(1);
}
try (PreparedStatement sel_templ_store_pstmt =
conn.prepareStatement("SELECT sum(template_store_ref.size) FROM `cloud`.`template_store_ref`,`cloud`.`vm_template` WHERE account_id = ?"
- + " AND template_store_ref.template_id = vm_template.id AND download_state = 'DOWNLOADED' AND destroyed = false AND removed is NULL");) {
+ + " AND template_store_ref.template_id = vm_template.id AND download_state = 'DOWNLOADED' AND destroyed = false AND removed is NULL")) {
sel_templ_store_pstmt.setLong(1, account_id);
- try (ResultSet templ_store_count = sel_templ_store_pstmt.executeQuery();) {
+ try (ResultSet templ_store_count = sel_templ_store_pstmt.executeQuery()) {
if (templ_store_count.next()) {
totalTemplatesSize = templ_store_count.getLong(1);
}
@@ -2562,13 +2494,12 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}
}
// 4. upgrade cpu,memory,primary_storage,secondary_storage for domains
- String resource_types[] = {"cpu", "memory", "primary_storage", "secondary_storage"};
- try(PreparedStatement sel_id_pstmt = conn.prepareStatement("select id FROM `cloud`.`domain`");) {
- try(ResultSet sel_id_res = sel_id_pstmt.executeQuery();) {
+ String[] resource_types = {"cpu", "memory", "primary_storage", "secondary_storage"};
+ try(PreparedStatement sel_id_pstmt = conn.prepareStatement("select id FROM `cloud`.`domain`")) {
+ try(ResultSet sel_id_res = sel_id_pstmt.executeQuery()) {
while (sel_id_res.next()) {
long domain_id = sel_id_res.getLong(1);
- for (int count = 0; count < resource_types.length; count++) {
- String resource_type = resource_types[count];
+ for (String resource_type : resource_types) {
upgradeResourceCountforDomain(conn, domain_id, resource_type, 0L); // reset value to 0 before statistics
}
}
@@ -2578,22 +2509,21 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e);
}
- for (int count = 0; count < resource_types.length; count++) {
- String resource_type = resource_types[count];
- try(PreparedStatement sel_dom_id_pstmt =
- conn.prepareStatement("select account.domain_id,sum(resource_count.count) from `cloud`.`account` left join `cloud`.`resource_count` on account.id=resource_count.account_id "
- + "where resource_count.type=? group by account.domain_id;");) {
+ for (String resource_type : resource_types) {
+ try (PreparedStatement sel_dom_id_pstmt =
+ conn.prepareStatement("select account.domain_id,sum(resource_count.count) from `cloud`.`account` left join `cloud`.`resource_count` on account.id=resource_count.account_id "
+ + "where resource_count.type=? group by account.domain_id;")) {
sel_dom_id_pstmt.setString(1, resource_type);
- try(ResultSet sel_dom_res = sel_dom_id_pstmt.executeQuery();) {
+ try (ResultSet sel_dom_res = sel_dom_id_pstmt.executeQuery()) {
while (sel_dom_res.next()) {
long domain_id = sel_dom_res.getLong(1);
long resource_count = sel_dom_res.getLong(2);
upgradeResourceCountforDomain(conn, domain_id, resource_type, resource_count);
}
- }catch (SQLException e) {
+ } catch (SQLException e) {
throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e);
}
- }catch (SQLException e) {
+ } catch (SQLException e) {
throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e);
}
}
@@ -2606,7 +2536,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private static void upgradeResourceCountforAccount(Connection conn, Long accountId, Long domainId, String type, Long resourceCount) throws SQLException {
//update or insert into resource_count table.
try(PreparedStatement pstmt =
- conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (account_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?");) {
+ conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (account_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?")) {
pstmt.setLong(1, accountId);
pstmt.setString(2, type);
pstmt.setLong(3, resourceCount);
@@ -2619,7 +2549,7 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl {
private static void upgradeResourceCountforDomain(Connection conn, Long domainId, String type, Long resourceCount) throws SQLException {
//update or insert into resource_count table.
- try(PreparedStatement pstmt = conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (domain_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?");) {
+ try(PreparedStatement pstmt = conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (domain_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?")) {
pstmt.setLong(1, domainId);
pstmt.setString(2, type);
pstmt.setLong(3, resourceCount);
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java
index c7295414326..9c1b45607a0 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java
@@ -31,7 +31,6 @@ import java.util.Set;
import com.cloud.hypervisor.Hypervisor;
import com.cloud.utils.exception.CloudRuntimeException;
-import static com.cloud.hypervisor.Hypervisor.HypervisorType.Hyperv;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.KVM;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.LXC;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.Ovm3;
@@ -85,12 +84,8 @@ public class Upgrade41500to41510 extends DbUpgradeAbstractImpl implements DbUpgr
hypervisorsListInUse.add(KVM);
} else if (type.equals(VMware)) {
hypervisorsListInUse.add(VMware);
- } else if (type.equals(Hyperv)) {
- hypervisorsListInUse.add(Hyperv);
} else if (type.equals(LXC)) {
hypervisorsListInUse.add(LXC);
- } else if (type.equals(Ovm3)) {
- hypervisorsListInUse.add(Ovm3);
}
}
} catch (final SQLException e) {
@@ -103,7 +98,6 @@ public class Upgrade41500to41510 extends DbUpgradeAbstractImpl implements DbUpgr
put(KVM, "systemvm-kvm-4.15.1");
put(VMware, "systemvm-vmware-4.15.1");
put(XenServer, "systemvm-xenserver-4.15.1");
- put(Hyperv, "systemvm-hyperv-4.15.1");
put(LXC, "systemvm-lxc-4.15.1");
put(Ovm3, "systemvm-ovm3-4.15.1");
}
@@ -114,7 +108,6 @@ public class Upgrade41500to41510 extends DbUpgradeAbstractImpl implements DbUpgr
put(KVM, "router.template.kvm");
put(VMware, "router.template.vmware");
put(XenServer, "router.template.xenserver");
- put(Hyperv, "router.template.hyperv");
put(LXC, "router.template.lxc");
put(Ovm3, "router.template.ovm3");
}
@@ -125,7 +118,6 @@ public class Upgrade41500to41510 extends DbUpgradeAbstractImpl implements DbUpgr
put(KVM, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-kvm.qcow2.bz2");
put(VMware, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-vmware.ova");
put(XenServer, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-xen.vhd.bz2");
- put(Hyperv, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-hyperv.vhd.zip");
put(LXC, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-kvm.qcow2.bz2");
put(Ovm3, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-ovm.raw.bz2");
}
@@ -136,7 +128,6 @@ public class Upgrade41500to41510 extends DbUpgradeAbstractImpl implements DbUpgr
put(KVM, "0e9f9a7d0957c3e0a2088e41b2da2cec");
put(XenServer, "86373992740b1eca8aff8b08ebf3aea5");
put(VMware, "4006982765846d373eb3719b2fe4d720");
- put(Hyperv, "0b9514e4b6cba1f636fea2125f0f7a5f");
put(LXC, "0e9f9a7d0957c3e0a2088e41b2da2cec");
put(Ovm3, "ae3977e696b3e6c81bdcbb792d514d29");
}
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
index d62a9bb9303..1a6d6a2d66c 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql
@@ -2187,7 +2187,6 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'manag
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'interval.baremetal.securitygroup.agent.echo', 10, 'Interval to echo baremetal security group agent, in seconds');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'timeout.baremetal.securitygroup.agent.echo', 3600, 'Timeout to echo baremetal security group agent, in seconds, the provisioning process will be treated as a failure');
-INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.template.hyperv', 'SystemVM Template (HyperV)', 'Name of the default router template on Hyperv.');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.template.kvm', 'SystemVM Template (KVM)', 'Name of the default router template on KVM.');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.template.lxc', 'SystemVM Template (LXC)', 'Name of the default router template on LXC.');
INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.template.vmware', 'SystemVM Template (vSphere)', 'Name of the default router template on Vmware.');
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql
index 2464a8a57ce..3b24252df0f 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql
@@ -24,7 +24,7 @@ ALTER TABLE `cloud`.`user_vm` ADD COLUMN `user_vm_type` varchar(255) DEFAULT "Us
-- This is set, so as to ensure that the controller details from the ovf template are adhered to
UPDATE `cloud`.`vm_template` set deploy_as_is = 1 where id = 8;
-DELETE FROM `cloud`.`configuration` WHERE name IN ("cloud.kubernetes.cluster.template.name.kvm", "cloud.kubernetes.cluster.template.name.vmware", "cloud.kubernetes.cluster.template.name.xenserver", "cloud.kubernetes.cluster.template.name.hyperv");
+DELETE FROM `cloud`.`configuration` WHERE name IN ("cloud.kubernetes.cluster.template.name.kvm", "cloud.kubernetes.cluster.template.name.vmware", "cloud.kubernetes.cluster.template.name.xenserver");
ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0;
ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint;
diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42200to42210.sql b/engine/schema/src/main/resources/META-INF/db/schema-42200to42210.sql
index 0fcadba1fdb..e7b5adef817 100644
--- a/engine/schema/src/main/resources/META-INF/db/schema-42200to42210.sql
+++ b/engine/schema/src/main/resources/META-INF/db/schema-42200to42210.sql
@@ -28,4 +28,13 @@ CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_volume','vm_id', '
DELETE FROM `cloud`.`configuration` WHERE name = 'ucs.sync.blade.interval';
+UPDATE `cloud`.`configuration` SET value = 'KVM,VMware,XenServer,Hyperv,BareMetal,Ovm,LXC,Ovm3,External' WHERE name = 'hypervisor.list';
+UPDATE `cloud`.`configuration` SET value = 'Hypervisor type used to create system vm, valid values are: XenServer, KVM, VMware, VirtualBox, Parralels, BareMetal, Any' WHERE name = 'system.vm.default.hypervisor';
+DELETE FROM `cloud`.`configuration` WHERE name = 'hyperv.public.network.device';
+DELETE FROM `cloud`.`configuration` WHERE name = 'hyperv.private.network.device'
+DELETE FROM `cloud`.`configuration` WHERE name = 'hyperv.guest.network.device'
+
+DELETE FROM `cloud`.`configuration` WHERE name = 'router.template.hyperv';
+DELETE FROM `cloud`.`configuration` WHERE name = 'router.template.ovm3';
+
ALTER TABLE `cloud`.`template_store_ref` MODIFY COLUMN `download_url` varchar(2048);
diff --git a/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java b/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java
index 8028e78c907..8ddb13e706f 100644
--- a/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java
+++ b/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java
@@ -1551,9 +1551,7 @@ public class SystemVmTemplateRegistrationTest {
verify(guestOSDao).findOneByDisplayName(DEFAULT_SYSTEM_VM_GUEST_OS_NAME);
assertEquals(10, SystemVmTemplateRegistration.LINUX_12_ID.intValue());
assertEquals(10, SystemVmTemplateRegistration.hypervisorGuestOsMap.get(Hypervisor.HypervisorType.KVM).intValue());
- assertEquals(10, SystemVmTemplateRegistration.hypervisorGuestOsMap.get(Hypervisor.HypervisorType.Hyperv).intValue());
assertEquals(10, SystemVmTemplateRegistration.hypervisorGuestOsMap.get(Hypervisor.HypervisorType.LXC).intValue());
- assertEquals(10, SystemVmTemplateRegistration.hypervisorGuestOsMap.get(Hypervisor.HypervisorType.Ovm3).intValue());
}
@Test
diff --git a/engine/schema/templateConfig.sh b/engine/schema/templateConfig.sh
index 21a4ee79574..d21ff179717 100755
--- a/engine/schema/templateConfig.sh
+++ b/engine/schema/templateConfig.sh
@@ -88,9 +88,7 @@ declare -A template_specs=(
[kvm-aarch64]="aarch64-kvm.qcow2.bz2"
[vmware]="x86_64-vmware.ova"
[xenserver]="x86_64-xen.vhd.bz2"
- [hyperv]="x86_64-hyperv.vhd.zip"
[lxc]="x86_64-kvm.qcow2.bz2"
- [ovm3]="x86_64-ovm.raw.bz2"
)
templates=()
diff --git a/engine/storage/pom.xml b/engine/storage/pom.xml
index a2044c6f4f8..1578f4d9dc9 100644
--- a/engine/storage/pom.xml
+++ b/engine/storage/pom.xml
@@ -65,5 +65,11 @@
cloud-engine-api
${project.version}
+
+ org.apache.cloudstack
+ cloud-api
+ 4.23.0.0-SNAPSHOT
+ compile
+
diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
index 061d18dc376..2effdbf2951 100644
--- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
+++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java
@@ -482,7 +482,7 @@ public class DefaultEndPointSelector implements EndPointSelector {
}
case MIGRATEVOLUME: {
VolumeInfo volume = (VolumeInfo) object;
- if (volume.getHypervisorType() == Hypervisor.HypervisorType.Hyperv || volume.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
+ if (volume.getHypervisorType() == Hypervisor.HypervisorType.VMware) {
VirtualMachine vm = volume.getAttachedVM();
if ((vm != null) && (vm.getState() == VirtualMachine.State.Running)) {
Long hostId = vm.getHostId();
diff --git a/packaging/package.sh b/packaging/package.sh
index ecffaace48b..3f64a6af568 100755
--- a/packaging/package.sh
+++ b/packaging/package.sh
@@ -36,7 +36,7 @@ Optional arguments:
-s, --simulator string Build package for Simulator ("default"|"DEFAULT"|"simulator"|"SIMULATOR") (default "default")
-b, --brand string Set branding to be used in package name (it will override any branding string in POM version)
-T, --use-timestamp Use epoch timestamp instead of SNAPSHOT in the package name (if not provided, use "SNAPSHOT")
- -t --templates Passes necessary flag to package the required templates. Comma separated string - kvm,xen,vmware,ovm,hyperv
+ -t --templates Passes necessary flag to package the required templates. Comma separated string - kvm,xen,vmware
Other arguments:
-h, --help Display this help message and exit
diff --git a/plugins/network-elements/internal-loadbalancer/pom.xml b/plugins/network-elements/internal-loadbalancer/pom.xml
index 2051a7c25bb..08357a08ac0 100644
--- a/plugins/network-elements/internal-loadbalancer/pom.xml
+++ b/plugins/network-elements/internal-loadbalancer/pom.xml
@@ -21,6 +21,14 @@
4.0.0
cloud-plugin-network-internallb
Apache CloudStack Plugin - Network Internal Load Balancer
+
+
+ org.apache.cloudstack
+ cloud-api
+ 4.23.0.0-SNAPSHOT
+ compile
+
+
org.apache.cloudstack
cloudstack-plugins
diff --git a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java
index 2f540a5935c..a81d242ddc3 100644
--- a/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java
+++ b/plugins/network-elements/internal-loadbalancer/src/main/java/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java
@@ -16,7 +16,6 @@
// under the License.
package org.apache.cloudstack.network.lb;
-import static com.cloud.hypervisor.Hypervisor.HypervisorType.Hyperv;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.KVM;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.LXC;
import static com.cloud.hypervisor.Hypervisor.HypervisorType.VMware;
@@ -763,8 +762,6 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In
templateName = VirtualNetworkApplianceManager.RouterTemplateKvm.valueIn(dataCenterId);
} else if (VMware.equals(hypervisorType)) {
templateName = VirtualNetworkApplianceManager.RouterTemplateVmware.valueIn(dataCenterId);
- } else if (Hyperv.equals(hypervisorType)) {
- templateName = VirtualNetworkApplianceManager.RouterTemplateHyperV.valueIn(dataCenterId);
} else if (LXC.equals(hypervisorType)) {
templateName = VirtualNetworkApplianceManager.RouterTemplateLxc.valueIn(dataCenterId);
}
diff --git a/pom.xml b/pom.xml
index d5b5ba121c9..24db8d8b33e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1051,9 +1051,6 @@
debian/source/format
dist/console-proxy/js/jquery.js
engine/schema/dist/**
- plugins/hypervisors/hyperv/conf/agent.properties
- plugins/hypervisors/hyperv/conf/uefi.properties
- plugins/hypervisors/hyperv/DotNet/ServerResource/**
scripts/installer/windows/acs_license.rtf
scripts/vm/systemvm/id_rsa.cloud
services/console-proxy/server/conf/agent.properties
diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java
index 57eeb63ea9f..cf2521612a4 100644
--- a/server/src/main/java/com/cloud/api/ApiDBUtils.java
+++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java
@@ -1312,14 +1312,8 @@ public class ApiDBUtils {
public static HypervisorType getHypervisorTypeFromFormat(long dcId, ImageFormat format){
HypervisorType type = s_storageMgr.getHypervisorTypeFromFormat(format);
if (format == ImageFormat.VHD) {
- // Xenserver and Hyperv both support vhd format. Additionally hyperv is only supported
- // in a dc/zone if there aren't any other hypervisor types present in the zone). If the
- // format type is VHD check is any xenserver clusters are present. If not, we assume it
- // is a hyperv zone and update the type.
+ // Only Xenserver supports vhd format. hyperv is no longer supported natively
List xenClusters = s_clusterDao.listByDcHyType(dcId, HypervisorType.XenServer.toString());
- if (xenClusters.isEmpty()) {
- type = HypervisorType.Hyperv;
- }
} if (format == ImageFormat.RAW) {
// Currently, KVM only supports RBD, PowerFlex, and FiberChannel images of type RAW.
// This results in a weird collision with OVM volumes which
diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java
index 55d3c298db1..5c2a96e933e 100644
--- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java
+++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java
@@ -18,6 +18,7 @@ package com.cloud.api;
import static com.cloud.utils.NumbersUtil.toHumanReadableSize;
+import java.lang.reflect.InvocationTargetException;
import java.security.cert.Certificate;
import java.security.cert.CertificateException;
import java.text.DecimalFormat;
@@ -190,7 +191,6 @@ import org.apache.cloudstack.backup.BackupOffering;
import org.apache.cloudstack.backup.BackupRepository;
import org.apache.cloudstack.backup.BackupSchedule;
import org.apache.cloudstack.backup.dao.BackupOfferingDao;
-import org.apache.cloudstack.backup.dao.BackupRepositoryDao;
import org.apache.cloudstack.config.Configuration;
import org.apache.cloudstack.config.ConfigurationGroup;
import org.apache.cloudstack.config.ConfigurationSubGroup;
@@ -404,7 +404,6 @@ import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
-import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.snapshot.SnapshotPolicy;
import com.cloud.storage.snapshot.SnapshotSchedule;
import com.cloud.tags.dao.ResourceTagDao;
@@ -446,6 +445,7 @@ import com.cloud.vm.snapshot.VMSnapshot;
import com.cloud.vm.snapshot.VMSnapshotVO;
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
+import org.jetbrains.annotations.NotNull;
import sun.security.x509.X509CertImpl;
public class ApiResponseHelper implements ResponseGenerator {
@@ -468,8 +468,6 @@ public class ApiResponseHelper implements ResponseGenerator {
@Inject
SnapshotDataFactory snapshotfactory;
@Inject
- private VolumeDao _volumeDao;
- @Inject
private DataStoreManager _dataStoreMgr;
@Inject
private SnapshotDataStoreDao _snapshotStoreDao;
@@ -514,8 +512,6 @@ public class ApiResponseHelper implements ResponseGenerator {
@Inject
VlanDetailsDao vlanDetailsDao;
@Inject
- BackupRepositoryDao backupRepositoryDao;
- @Inject
private ASNumberRangeDao asNumberRangeDao;
@Inject
private ASNumberDao asNumberDao;
@@ -756,7 +752,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
if (snapshotInfo == null) {
- logger.debug("Unable to find info for image store snapshot with uuid " + snapshot.getUuid());
+ logger.debug("Unable to find info for image store snapshot with uuid {}", snapshot.getUuid());
snapshotResponse.setRevertable(false);
} else {
snapshotResponse.setRevertable(snapshotInfo.isRevertable());
@@ -765,7 +761,7 @@ public class ApiResponseHelper implements ResponseGenerator {
// set tag information
List extends ResourceTag> tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.Snapshot, snapshot.getId());
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, true);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -795,7 +791,7 @@ public class ApiResponseHelper implements ResponseGenerator {
if (mapCapabilities != null) {
String value = mapCapabilities.get(DataStoreCapabilities.STORAGE_SYSTEM_SNAPSHOT.toString());
- Boolean supportsStorageSystemSnapshots = new Boolean(value);
+ boolean supportsStorageSystemSnapshots = Boolean.parseBoolean(value);
if (supportsStorageSystemSnapshots) {
return DataStoreRole.Primary;
@@ -835,7 +831,7 @@ public class ApiResponseHelper implements ResponseGenerator {
populateOwner(vmSnapshotResponse, vmSnapshot);
List extends ResourceTag> tags = _resourceTagDao.listBy(vmSnapshot.getId(), ResourceObjectType.VMSnapshot);
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, false);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -867,7 +863,7 @@ public class ApiResponseHelper implements ResponseGenerator {
policyResponse.setObjectName("snapshotpolicy");
List extends ResourceTag> tags = _resourceTagDao.listBy(policy.getId(), ResourceObjectType.SnapshotPolicy);
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, false);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -932,7 +928,7 @@ public class ApiResponseHelper implements ResponseGenerator {
try {
Long podId = ApiDBUtils.getPodIdForVlan(vlan.getId());
- VlanIpRangeResponse vlanResponse = subClass.newInstance();
+ VlanIpRangeResponse vlanResponse = subClass.getDeclaredConstructor().newInstance();
vlanResponse.setId(vlan.getUuid());
if (vlan.getVlanType() != null) {
vlanResponse.setForVirtualNetwork(vlan.getVlanType().equals(VlanType.VirtualNetwork));
@@ -996,7 +992,7 @@ public class ApiResponseHelper implements ResponseGenerator {
if (networkId != null) {
Network network = _ntwkModel.getNetwork(networkId);
if (network != null && TrafficType.Guest.equals(network.getTrafficType())) {
- Long accountId = network.getAccountId();
+ long accountId = network.getAccountId();
populateAccount(vlanResponse, accountId);
populateDomain(vlanResponse, ApiDBUtils.findAccountById(accountId).getDomainId());
}
@@ -1014,7 +1010,7 @@ public class ApiResponseHelper implements ResponseGenerator {
vlanResponse.setProvider(getProviderFromVlanDetailKey(vlan));
vlanResponse.setObjectName("vlan");
return vlanResponse;
- } catch (InstantiationException | IllegalAccessException e) {
+ } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | InvocationTargetException e) {
throw new CloudRuntimeException("Failed to create Vlan IP Range response", e);
}
}
@@ -1203,7 +1199,7 @@ public class ApiResponseHelper implements ResponseGenerator {
//set tag information
List extends ResourceTag> tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.PublicIpAddress, ipAddr.getId());
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, true);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -1309,7 +1305,7 @@ public class ApiResponseHelper implements ResponseGenerator {
//set tag information
List extends ResourceTag> tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.LoadBalancer, loadBalancer.getId());
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, true);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -1339,7 +1335,7 @@ public class ApiResponseHelper implements ResponseGenerator {
populateOwner(response, globalLoadBalancerRule);
response.setObjectName("globalloadbalancer");
- List siteLbResponses = new ArrayList();
+ List siteLbResponses = new ArrayList<>();
List extends LoadBalancer> siteLoadBalaners = ApiDBUtils.listSiteLoadBalancers(globalLoadBalancerRule.getId());
for (LoadBalancer siteLb : siteLoadBalaners) {
LoadBalancerResponse siteLbResponse = createLoadBalancerResponse(siteLb);
@@ -1360,15 +1356,14 @@ public class ApiResponseHelper implements ResponseGenerator {
@Override
public PodResponse createPodResponse(Pod pod, Boolean showCapacities) {
- String[] ipRange = new String[2];
- List startIps = new ArrayList();
- List endIps = new ArrayList();
- List forSystemVms = new ArrayList();
- List vlanIds = new ArrayList();
+ List startIps = new ArrayList<>();
+ List endIps = new ArrayList<>();
+ List forSystemVms = new ArrayList<>();
+ List vlanIds = new ArrayList<>();
List ipRanges = new ArrayList<>();
- if (pod.getDescription() != null && pod.getDescription().length() > 0) {
+ if (pod.getDescription() != null && !pod.getDescription().isEmpty()) {
final String[] existingPodIpRanges = pod.getDescription().split(",");
for(String podIpRange: existingPodIpRanges) {
@@ -1418,7 +1413,7 @@ public class ApiResponseHelper implements ResponseGenerator {
podResponse.setZoneStorageAccessGroups(zone.getStorageAccessGroups());
if (showCapacities != null && showCapacities) {
List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, pod.getId(), null);
- Set capacityResponses = new HashSet();
+ Set capacityResponses = new HashSet<>();
for (SummedCapacity capacity : capacities) {
CapacityResponse capacityResponse = new CapacityResponse();
capacityResponse.setCapacityType(capacity.getCapacityType());
@@ -1440,7 +1435,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
// Do it for stats as well.
capacityResponses.addAll(getStatsCapacityresponse(null, null, pod.getId(), pod.getDataCenterId()));
- podResponse.setCapacities(new ArrayList(capacityResponses));
+ podResponse.setCapacities(new ArrayList<>(capacityResponses));
}
podResponse.setHasAnnotation(annotationDao.hasAnnotations(pod.getUuid(), AnnotationService.EntityType.POD.name(),
@@ -1457,7 +1452,7 @@ public class ApiResponseHelper implements ResponseGenerator {
public static List getDataCenterCapacityResponse(Long zoneId) {
List capacities = ApiDBUtils.getCapacityByClusterPodZone(zoneId, null, null);
- Set capacityResponses = new HashSet();
+ Set capacityResponses = new HashSet<>();
for (SummedCapacity capacity : capacities) {
CapacityResponse capacityResponse = new CapacityResponse();
@@ -1481,18 +1476,18 @@ public class ApiResponseHelper implements ResponseGenerator {
// Do it for stats as well.
capacityResponses.addAll(getStatsCapacityresponse(null, null, null, zoneId));
- return new ArrayList(capacityResponses);
+ return new ArrayList<>(capacityResponses);
}
private static List getStatsCapacityresponse(Long poolId, Long clusterId, Long podId, Long zoneId) {
- List capacities = new ArrayList();
+ List capacities = new ArrayList<>();
capacities.add(ApiDBUtils.getStoragePoolUsedStats(poolId, clusterId, podId, zoneId));
if (clusterId == null && podId == null) {
capacities.add(ApiDBUtils.getSecondaryStorageUsedStats(poolId, zoneId));
capacities.add(ApiDBUtils.getObjectStorageUsedStats(zoneId));
}
- List capacityResponses = new ArrayList();
+ List capacityResponses = new ArrayList<>();
for (CapacityVO capacity : capacities) {
CapacityResponse capacityResponse = new CapacityResponse();
capacityResponse.setCapacityType(capacity.getCapacityType());
@@ -1610,7 +1605,7 @@ public class ApiResponseHelper implements ResponseGenerator {
if (showCapacities != null && showCapacities) {
List capacities = ApiDBUtils.getCapacityByClusterPodZone(null, null, cluster.getId());
- Set capacityResponses = new HashSet();
+ Set capacityResponses = new HashSet<>();
for (SummedCapacity capacity : capacities) {
CapacityResponse capacityResponse = new CapacityResponse();
@@ -1634,7 +1629,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
// Do it for stats as well.
capacityResponses.addAll(getStatsCapacityresponse(null, cluster.getId(), pod.getId(), pod.getDataCenterId()));
- clusterResponse.setCapacities(new ArrayList(capacityResponses));
+ clusterResponse.setCapacities(new ArrayList<>(capacityResponses));
}
clusterResponse.setHasAnnotation(annotationDao.hasAnnotations(cluster.getUuid(), AnnotationService.EntityType.CLUSTER.name(),
_accountMgr.isRootAdmin(CallContext.current().getCallingAccount().getId())));
@@ -1688,7 +1683,7 @@ public class ApiResponseHelper implements ResponseGenerator {
// set tag information
List extends ResourceTag> tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.PortForwardingRule, fwRule.getId());
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, true);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -1739,18 +1734,6 @@ public class ApiResponseHelper implements ResponseGenerator {
return response;
}
- /*
- @Override
- public List createUserVmResponse(String objectName, UserVm... userVms) {
- return createUserVmResponse(null, objectName, userVms);
- }
-
- @Override
- public List createUserVmResponse(String objectName, EnumSet details, UserVm... userVms) {
- return createUserVmResponse(null, objectName, userVms);
- }
- */
-
@Override
public List createUserVmResponse(ResponseView view, String objectName, EnumSet details, UserVm... userVms) {
List viewVms = ApiDBUtils.newUserVmView(userVms);
@@ -2146,7 +2129,7 @@ public class ApiResponseHelper implements ResponseGenerator {
@Override
public List createCapacityResponse(List extends Capacity> result, DecimalFormat format) {
- List capacityResponses = new ArrayList();
+ List capacityResponses = new ArrayList<>();
for (Capacity summedCapacity : result) {
if (summedCapacity.getTotalCapacity() == 0 &&
@@ -2273,12 +2256,12 @@ public class ApiResponseHelper implements ResponseGenerator {
}
// Set accounts
- List projectIds = new ArrayList();
- List regularAccounts = new ArrayList();
+ List projectIds = new ArrayList<>();
+ List regularAccounts = new ArrayList<>();
for (String accountName : accountNames) {
Account account = ApiDBUtils.findAccountByNameDomain(accountName, templateOwner.getDomainId());
if (account == null) {
- logger.error("Missing Account " + accountName + " in domain " + templateOwner.getDomainId());
+ logger.error("Missing Account {} in domain {}", accountName, templateOwner.getDomainId());
continue;
}
@@ -2341,7 +2324,6 @@ public class ApiResponseHelper implements ResponseGenerator {
@Override
public SecurityGroupResponse createSecurityGroupResponseFromSecurityGroupRule(List extends SecurityRule> securityRules) {
SecurityGroupResponse response = new SecurityGroupResponse();
- Map securiytGroupAccounts = new HashMap();
if ((securityRules != null) && !securityRules.isEmpty()) {
SecurityGroupJoinVO securityGroup = ApiDBUtils.findSecurityGroupViewById(securityRules.get(0).getSecurityGroupId()).get(0);
@@ -2367,7 +2349,7 @@ public class ApiResponseHelper implements ResponseGenerator {
Long allowedSecurityGroupId = securityRule.getAllowedNetworkId();
if (allowedSecurityGroupId != null) {
List sgs = ApiDBUtils.findSecurityGroupViewById(allowedSecurityGroupId);
- if (sgs != null && sgs.size() > 0) {
+ if (sgs != null && !sgs.isEmpty()) {
SecurityGroupJoinVO sg = sgs.get(0);
securityGroupData.setSecurityGroupName(sg.getName());
securityGroupData.setAccountName(sg.getAccountName());
@@ -2410,7 +2392,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
}
Map> serviceProviderMap = ApiDBUtils.listNetworkOfferingServices(offering.getId());
- List serviceResponses = new ArrayList();
+ List serviceResponses = new ArrayList<>();
for (Map.Entry> entry : serviceProviderMap.entrySet()) {
Service service = entry.getKey();
Set srvc_providers = entry.getValue();
@@ -2420,7 +2402,7 @@ public class ApiResponseHelper implements ResponseGenerator {
continue;
}
svcRsp.setName(service.getName());
- List providers = new ArrayList();
+ List providers = new ArrayList<>();
for (Provider provider : srvc_providers) {
if (provider != null) {
ProviderResponse providerRsp = new ProviderResponse();
@@ -2430,54 +2412,15 @@ public class ApiResponseHelper implements ResponseGenerator {
}
svcRsp.setProviders(providers);
if (Service.Lb == service) {
- List lbCapResponse = new ArrayList();
-
- CapabilityResponse lbIsoaltion = new CapabilityResponse();
- lbIsoaltion.setName(Capability.SupportedLBIsolation.getName());
- lbIsoaltion.setValue(offering.isDedicatedLB() ? "dedicated" : "shared");
- lbCapResponse.add(lbIsoaltion);
-
- CapabilityResponse eLb = new CapabilityResponse();
- eLb.setName(Capability.ElasticLb.getName());
- eLb.setValue(offering.isElasticLb() ? "true" : "false");
- lbCapResponse.add(eLb);
-
- CapabilityResponse inline = new CapabilityResponse();
- inline.setName(Capability.InlineMode.getName());
- inline.setValue(offering.isInline() ? "true" : "false");
- lbCapResponse.add(inline);
-
- CapabilityResponse vmAutoScaling = new CapabilityResponse();
- vmAutoScaling.setName(Capability.VmAutoScaling.getName());
- vmAutoScaling.setValue(offering.isSupportsVmAutoScaling() ? "true" : "false");
- lbCapResponse.add(vmAutoScaling);
+ List lbCapResponse = getCapabilityResponses(offering);
svcRsp.setCapabilities(lbCapResponse);
} else if (Service.SourceNat == service) {
- List capabilities = new ArrayList();
- CapabilityResponse sharedSourceNat = new CapabilityResponse();
- sharedSourceNat.setName(Capability.SupportedSourceNatTypes.getName());
- sharedSourceNat.setValue(offering.isSharedSourceNat() ? "perzone" : "peraccount");
- capabilities.add(sharedSourceNat);
-
- CapabilityResponse redundantRouter = new CapabilityResponse();
- redundantRouter.setName(Capability.RedundantRouter.getName());
- redundantRouter.setValue(offering.isRedundantRouter() ? "true" : "false");
- capabilities.add(redundantRouter);
+ List capabilities = getResponses(offering);
svcRsp.setCapabilities(capabilities);
} else if (service == Service.StaticNat) {
- List staticNatCapResponse = new ArrayList();
-
- CapabilityResponse eIp = new CapabilityResponse();
- eIp.setName(Capability.ElasticIp.getName());
- eIp.setValue(offering.isElasticIp() ? "true" : "false");
- staticNatCapResponse.add(eIp);
-
- CapabilityResponse associatePublicIp = new CapabilityResponse();
- associatePublicIp.setName(Capability.AssociatePublicIP.getName());
- associatePublicIp.setValue(offering.isAssociatePublicIP() ? "true" : "false");
- staticNatCapResponse.add(associatePublicIp);
+ List staticNatCapResponse = getCapabilityResponseList(offering);
svcRsp.setCapabilities(staticNatCapResponse);
}
@@ -2500,6 +2443,63 @@ public class ApiResponseHelper implements ResponseGenerator {
return response;
}
+ @NotNull
+ private static List getCapabilityResponseList(NetworkOffering offering) {
+ List staticNatCapResponse = new ArrayList<>();
+
+ CapabilityResponse eIp = new CapabilityResponse();
+ eIp.setName(Capability.ElasticIp.getName());
+ eIp.setValue(offering.isElasticIp() ? "true" : "false");
+ staticNatCapResponse.add(eIp);
+
+ CapabilityResponse associatePublicIp = new CapabilityResponse();
+ associatePublicIp.setName(Capability.AssociatePublicIP.getName());
+ associatePublicIp.setValue(offering.isAssociatePublicIP() ? "true" : "false");
+ staticNatCapResponse.add(associatePublicIp);
+ return staticNatCapResponse;
+ }
+
+ @NotNull
+ private static List getResponses(NetworkOffering offering) {
+ List capabilities = new ArrayList<>();
+ CapabilityResponse sharedSourceNat = new CapabilityResponse();
+ sharedSourceNat.setName(Capability.SupportedSourceNatTypes.getName());
+ sharedSourceNat.setValue(offering.isSharedSourceNat() ? "perzone" : "peraccount");
+ capabilities.add(sharedSourceNat);
+
+ CapabilityResponse redundantRouter = new CapabilityResponse();
+ redundantRouter.setName(Capability.RedundantRouter.getName());
+ redundantRouter.setValue(offering.isRedundantRouter() ? "true" : "false");
+ capabilities.add(redundantRouter);
+ return capabilities;
+ }
+
+ @NotNull
+ private static List getCapabilityResponses(NetworkOffering offering) {
+ List lbCapResponse = new ArrayList<>();
+
+ CapabilityResponse lbIsoaltion = new CapabilityResponse();
+ lbIsoaltion.setName(Capability.SupportedLBIsolation.getName());
+ lbIsoaltion.setValue(offering.isDedicatedLB() ? "dedicated" : "shared");
+ lbCapResponse.add(lbIsoaltion);
+
+ CapabilityResponse eLb = new CapabilityResponse();
+ eLb.setName(Capability.ElasticLb.getName());
+ eLb.setValue(offering.isElasticLb() ? "true" : "false");
+ lbCapResponse.add(eLb);
+
+ CapabilityResponse inline = new CapabilityResponse();
+ inline.setName(Capability.InlineMode.getName());
+ inline.setValue(offering.isInline() ? "true" : "false");
+ lbCapResponse.add(inline);
+
+ CapabilityResponse vmAutoScaling = new CapabilityResponse();
+ vmAutoScaling.setName(Capability.VmAutoScaling.getName());
+ vmAutoScaling.setValue(offering.isSupportsVmAutoScaling() ? "true" : "false");
+ lbCapResponse.add(vmAutoScaling);
+ return lbCapResponse;
+ }
+
private void createCapabilityResponse(List capabilityResponses,
String name,
String value,
@@ -2571,8 +2571,8 @@ public class ApiResponseHelper implements ResponseGenerator {
String[] guestVmCidrPair = cidr.split("\\/");
String[] guestCidrPair = network.getNetworkCidr().split("\\/");
- Long guestVmCidrSize = Long.valueOf(guestVmCidrPair[1]);
- Long guestCidrSize = Long.valueOf(guestCidrPair[1]);
+ long guestVmCidrSize = Long.parseLong(guestVmCidrPair[1]);
+ long guestCidrSize = Long.parseLong(guestCidrPair[1]);
String[] guestVmIpRange = NetUtils.getIpRangeFromCidr(guestVmCidrPair[0], guestVmCidrSize);
String[] guestIpRange = NetUtils.getIpRangeFromCidr(guestCidrPair[0], guestCidrSize);
@@ -2669,7 +2669,7 @@ public class ApiResponseHelper implements ResponseGenerator {
// populate capability
Map> serviceCapabilitiesMap = ApiDBUtils.getNetworkCapabilities(network.getId(), network.getDataCenterId());
Map> serviceProviderMap = ApiDBUtils.listNetworkOfferingServices(network.getNetworkOfferingId());
- List serviceResponses = new ArrayList();
+ List serviceResponses = new ArrayList<>();
if (serviceCapabilitiesMap != null) {
for (Map.Entry>entry : serviceCapabilitiesMap.entrySet()) {
Service service = entry.getKey();
@@ -2766,7 +2766,7 @@ public class ApiResponseHelper implements ResponseGenerator {
// set tag information
List extends ResourceTag> tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.Network, network.getId());
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, true);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -2785,8 +2785,8 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setStrechedL2Subnet(network.isStrechedL2Network());
if (network.isStrechedL2Network()) {
- Set networkSpannedZones = new HashSet();
- List vmInstances = new ArrayList();
+ Set networkSpannedZones = new HashSet<>();
+ List vmInstances = new ArrayList<>();
vmInstances.addAll(ApiDBUtils.listUserVMsByNetworkId(network.getId()));
vmInstances.addAll(ApiDBUtils.listDomainRoutersByNetworkId(network.getId()));
for (VirtualMachine vm : vmInstances) {
@@ -2800,8 +2800,8 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setCreated(network.getCreated());
response.setSupportsVmAutoScaling(networkOfferingDao.findByIdIncludingRemoved(network.getNetworkOfferingId()).isSupportsVmAutoScaling());
- Long bytesReceived = 0L;
- Long bytesSent = 0L;
+ long bytesReceived = 0L;
+ long bytesSent = 0L;
SearchBuilder sb = userStatsDao.createSearchBuilder();
sb.and("networkId", sb.entity().getNetworkId(), Op.EQ);
SearchCriteria sc = sb.create();
@@ -2856,7 +2856,7 @@ public class ApiResponseHelper implements ResponseGenerator {
private void setResponseAssociatedNetworkInformation(BaseResponseWithAssociatedNetwork response, Long networkId) {
final NetworkDetailVO detail = networkDetailsDao.findDetail(networkId, Network.AssociatedNetworkId);
if (detail != null) {
- Long associatedNetworkId = Long.valueOf(detail.getValue());
+ long associatedNetworkId = Long.parseLong(detail.getValue());
NetworkVO associatedNetwork = ApiDBUtils.findNetworkById(associatedNetworkId);
if (associatedNetwork != null) {
response.setAssociatedNetworkId(associatedNetwork.getUuid());
@@ -2927,7 +2927,7 @@ public class ApiResponseHelper implements ResponseGenerator {
// set tag information
List extends ResourceTag> tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.FirewallRule, fwRule.getId());
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, true);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -2979,7 +2979,7 @@ public class ApiResponseHelper implements ResponseGenerator {
//set tag information
List extends ResourceTag> tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.NetworkACL, aclItem.getId());
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, true);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -3068,7 +3068,7 @@ public class ApiResponseHelper implements ResponseGenerator {
private void populateAccount(ControlledEntityResponse response, long accountId) {
Account account = ApiDBUtils.findAccountById(accountId);
if (account == null) {
- logger.debug("Unable to find account with id: " + accountId);
+ logger.debug("Unable to find account with id: {}", accountId);
} else if (account.getType() == Account.Type.PROJECT) {
// find the project
Project project = ApiDBUtils.findProjectByProjectAccountId(account.getId());
@@ -3077,7 +3077,7 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setProjectName(project.getName());
response.setAccountName(account.getAccountName());
} else {
- logger.debug("Unable to find project with id: " + account.getId());
+ logger.debug("Unable to find project with id: {}", account.getId());
}
} else {
response.setAccountName(account.getAccountName());
@@ -3201,7 +3201,26 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setName(service.getName());
// set list of capabilities required for the service
- List capabilityResponses = new ArrayList();
+ List capabilityResponses = getCapabilityResponses(service);
+ response.setCapabilities(capabilityResponses);
+
+ // set list of providers providing this service
+ List extends Network.Provider> serviceProviders = ApiDBUtils.getProvidersForService(service);
+ List serviceProvidersResponses = new ArrayList<>();
+ for (Network.Provider serviceProvider : serviceProviders) {
+ ProviderResponse serviceProviderResponse = createServiceProviderResponse(serviceProvider);
+ serviceProvidersResponses.add(serviceProviderResponse);
+ }
+ response.setProviders(serviceProvidersResponses);
+
+ response.setObjectName("networkservice");
+ return response;
+
+ }
+
+ @NotNull
+ private static List getCapabilityResponses(Service service) {
+ List capabilityResponses = new ArrayList<>();
Capability[] capabilities = service.getCapabilities();
for (Capability cap : capabilities) {
CapabilityResponse capabilityResponse = new CapabilityResponse();
@@ -3215,20 +3234,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
capabilityResponses.add(capabilityResponse);
}
- response.setCapabilities(capabilityResponses);
-
- // set list of providers providing this service
- List extends Network.Provider> serviceProviders = ApiDBUtils.getProvidersForService(service);
- List serviceProvidersResponses = new ArrayList();
- for (Network.Provider serviceProvider : serviceProviders) {
- ProviderResponse serviceProviderResponse = createServiceProviderResponse(serviceProvider);
- serviceProvidersResponses.add(serviceProviderResponse);
- }
- response.setProviders(serviceProvidersResponses);
-
- response.setObjectName("networkservice");
- return response;
-
+ return capabilityResponses;
}
private ProviderResponse createServiceProviderResponse(Provider serviceProvider) {
@@ -3255,7 +3261,7 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setState(result.getState().toString());
// set enabled services
- List services = new ArrayList();
+ List services = new ArrayList<>();
for (Service service : result.getEnabledServices()) {
services.add(service.getName());
}
@@ -3287,8 +3293,6 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setXenLabel(result.getXenNetworkLabel());
response.setKvmLabel(result.getKvmNetworkLabel());
response.setVmwareLabel(result.getVmwareNetworkLabel());
- response.setHypervLabel(result.getHypervNetworkLabel());
- response.setOvm3Label(result.getOvm3NetworkLabel());
response.setVlan(result.getVlan());
response.setObjectName("traffictype");
@@ -3343,7 +3347,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
}
- List responses = new ArrayList();
+ List responses = new ArrayList<>();
LBStickinessPolicyResponse ruleResponse = new LBStickinessPolicyResponse(stickinessPolicy);
responses.add(ruleResponse);
@@ -3371,7 +3375,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
}
- List responses = new ArrayList();
+ List responses = new ArrayList<>();
for (StickinessPolicy stickinessPolicy : stickinessPolicies) {
LBStickinessPolicyResponse ruleResponse = new LBStickinessPolicyResponse(stickinessPolicy);
responses.add(ruleResponse);
@@ -3400,7 +3404,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
}
- List responses = new ArrayList();
+ List responses = new ArrayList<>();
for (HealthCheckPolicy healthcheckPolicy : healthcheckPolicies) {
LBHealthCheckPolicyResponse ruleResponse = new LBHealthCheckPolicyResponse(healthcheckPolicy);
responses.add(ruleResponse);
@@ -3426,7 +3430,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
}
- List responses = new ArrayList();
+ List responses = new ArrayList<>();
LBHealthCheckPolicyResponse ruleResponse = new LBHealthCheckPolicyResponse(healthcheckPolicy);
responses.add(ruleResponse);
hcResponse.setRules(responses);
@@ -3477,29 +3481,7 @@ public class ApiResponseHelper implements ResponseGenerator {
}
VpcOfferingResponse response = ApiDBUtils.newVpcOfferingResponse(offering);
Map> serviceProviderMap = ApiDBUtils.listVpcOffServices(offering.getId());
- List serviceResponses = new ArrayList();
- for (Map.Entry> entry : serviceProviderMap.entrySet()) {
- Service service = entry.getKey();
- Set srvc_providers = entry.getValue();
-
- ServiceResponse svcRsp = new ServiceResponse();
- // skip gateway service
- if (service == Service.Gateway) {
- continue;
- }
- svcRsp.setName(service.getName());
- List providers = new ArrayList();
- for (Provider provider : srvc_providers) {
- if (provider != null) {
- ProviderResponse providerRsp = new ProviderResponse();
- providerRsp.setName(provider.getName());
- providers.add(providerRsp);
- }
- }
- svcRsp.setProviders(providers);
-
- serviceResponses.add(svcRsp);
- }
+ List serviceResponses = getServiceResponses(serviceProviderMap);
response.setServices(serviceResponses);
return response;
}
@@ -3530,30 +3512,9 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setAsNumber(asNumberVO.getAsNumber());
}
Map> serviceProviderMap = ApiDBUtils.listVpcOffServices(vpc.getVpcOfferingId());
- List serviceResponses = new ArrayList();
- for (Map.Entry>entry : serviceProviderMap.entrySet()) {
- Service service = entry.getKey();
- Set serviceProviders = entry.getValue();
- ServiceResponse svcRsp = new ServiceResponse();
- // skip gateway service
- if (service == Service.Gateway) {
- continue;
- }
- svcRsp.setName(service.getName());
- List providers = new ArrayList();
- for (Provider provider : serviceProviders) {
- if (provider != null) {
- ProviderResponse providerRsp = new ProviderResponse();
- providerRsp.setName(provider.getName());
- providers.add(providerRsp);
- }
- }
- svcRsp.setProviders(providers);
+ List serviceResponses = getServiceResponses(serviceProviderMap);
- serviceResponses.add(svcRsp);
- }
-
- List networkResponses = new ArrayList();
+ List networkResponses = new ArrayList<>();
List extends Network> networks = ApiDBUtils.listVpcNetworks(vpc.getId());
for (Network network : networks) {
NetworkResponse ntwkRsp = createNetworkResponse(view, network);
@@ -3573,7 +3534,7 @@ public class ApiResponseHelper implements ResponseGenerator {
// set tag information
List extends ResourceTag> tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.Vpc, vpc.getId());
- List tagResponses = new ArrayList();
+ List tagResponses = new ArrayList<>();
for (ResourceTag tag : tags) {
ResourceTagResponse tagResponse = createResourceTagResponse(tag, true);
CollectionUtils.addIgnoreNull(tagResponses, tagResponse);
@@ -3615,6 +3576,33 @@ public class ApiResponseHelper implements ResponseGenerator {
return response;
}
+ @NotNull
+ private static List getServiceResponses(Map> serviceProviderMap) {
+ List serviceResponses = new ArrayList<>();
+ for (Map.Entry>entry : serviceProviderMap.entrySet()) {
+ Service service = entry.getKey();
+ Set serviceProviders = entry.getValue();
+ ServiceResponse svcRsp = new ServiceResponse();
+ // skip gateway service
+ if (service == Service.Gateway) {
+ continue;
+ }
+ svcRsp.setName(service.getName());
+ List providers = new ArrayList<>();
+ for (Provider provider : serviceProviders) {
+ if (provider != null) {
+ ProviderResponse providerRsp = new ProviderResponse();
+ providerRsp.setName(provider.getName());
+ providers.add(providerRsp);
+ }
+ }
+ svcRsp.setProviders(providers);
+
+ serviceResponses.add(svcRsp);
+ }
+ return serviceResponses;
+ }
+
@Override
public PrivateGatewayResponse createPrivateGatewayResponse(ResponseView view, PrivateGateway result) {
PrivateGatewayResponse response = new PrivateGatewayResponse();
@@ -3744,7 +3732,7 @@ public class ApiResponseHelper implements ResponseGenerator {
response.setQuietTime(policy.getQuietTime());
response.setAction(policy.getAction().toString());
List vos = ApiDBUtils.getAutoScalePolicyConditions(policy.getId());
- ArrayList conditions = new ArrayList(vos.size());
+ ArrayList conditions = new ArrayList<>(vos.size());
for (ConditionVO vo : vos) {
conditions.add(createConditionResponse(vo));
}
@@ -3800,15 +3788,15 @@ public class ApiResponseHelper implements ResponseGenerator {
}
}
- List scaleUpPoliciesResponse = new ArrayList();
- List scaleDownPoliciesResponse = new ArrayList();
+ List scaleUpPoliciesResponse = new ArrayList<>();
+ List scaleDownPoliciesResponse = new ArrayList<>();
response.setScaleUpPolicies(scaleUpPoliciesResponse);
response.setScaleDownPolicies(scaleDownPoliciesResponse);
response.setObjectName("autoscalevmgroup");
// Fetch policies for vmgroup
- List scaleUpPolicies = new ArrayList();
- List scaleDownPolicies = new ArrayList