diff --git a/client/tomcatconf/componentContext.xml.in b/client/tomcatconf/componentContext.xml.in
index 9c92764377c..49b7e34f3c3 100644
--- a/client/tomcatconf/componentContext.xml.in
+++ b/client/tomcatconf/componentContext.xml.in
@@ -105,12 +105,6 @@
-
-
-
-
-
-
diff --git a/client/tomcatconf/components.xml.in b/client/tomcatconf/components.xml.in
index c62abe8ff2a..7d86a1c2cb9 100755
--- a/client/tomcatconf/components.xml.in
+++ b/client/tomcatconf/components.xml.in
@@ -66,10 +66,6 @@ under the License.
-
-
-
-
diff --git a/core/src/com/cloud/storage/DiskOfferingVO.java b/core/src/com/cloud/storage/DiskOfferingVO.java
index 5f4f18bcd34..e4fc21c7c13 100755
--- a/core/src/com/cloud/storage/DiskOfferingVO.java
+++ b/core/src/com/cloud/storage/DiskOfferingVO.java
@@ -311,4 +311,8 @@ public class DiskOfferingVO implements DiskOffering {
public int getSortKey() {
return sortKey;
}
+
+ public void setRecreatable(boolean recreatable) {
+ this.recreatable = recreatable;
+ }
}
diff --git a/server/src/com/cloud/storage/allocator/StoragePoolAllocator.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java
similarity index 79%
rename from server/src/com/cloud/storage/allocator/StoragePoolAllocator.java
rename to engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java
index 1c02c6cb669..ffced54c28c 100644
--- a/server/src/com/cloud/storage/allocator/StoragePoolAllocator.java
+++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StoragePoolAllocator.java
@@ -14,14 +14,12 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
-package com.cloud.storage.allocator;
+package org.apache.cloudstack.engine.subsystem.api.storage;
import java.util.List;
-import java.util.Set;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.host.Host;
import com.cloud.storage.StoragePool;
import com.cloud.utils.component.Adapter;
import com.cloud.vm.DiskProfile;
@@ -31,12 +29,6 @@ import com.cloud.vm.VirtualMachineProfile;
/**
*/
public interface StoragePoolAllocator extends Adapter {
-
- //keeping since storageMgr is using this API for some existing functionalities
- List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, long dcId, long podId, Long clusterId, Long hostId, Set extends StoragePool> avoids, int returnUpTo);
-
- String chooseStorageIp(VirtualMachine vm, Host host, Host storage);
-
/**
* Determines which storage pools are suitable for the guest virtual machine
*
diff --git a/engine/storage/integration-test/pom.xml b/engine/storage/integration-test/pom.xml
index 368a4e301ab..1bce37afd23 100644
--- a/engine/storage/integration-test/pom.xml
+++ b/engine/storage/integration-test/pom.xml
@@ -105,6 +105,29 @@
+
+ maven-antrun-plugin
+ 1.7
+
+
+ generate-resource
+ generate-resources
+
+ run
+
+
+
+
+
+
+
+
+
+
+
+
+
maven-surefire-plugin
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java
new file mode 100644
index 00000000000..25922063b2d
--- /dev/null
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTest.java
@@ -0,0 +1,423 @@
+package org.apache.cloudstack.storage.allocator;
+
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import javax.inject.Inject;
+
+import junit.framework.Assert;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.HostPodVO;
+import com.cloud.dc.DataCenter.NetworkType;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.dao.DataCenterDao;
+import com.cloud.dc.dao.HostPodDao;
+import com.cloud.deploy.DataCenterDeployment;
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.org.Cluster.ClusterType;
+import com.cloud.org.Managed.ManagedState;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolDetailVO;
+import com.cloud.storage.StoragePoolStatus;
+import com.cloud.storage.Volume;
+import com.cloud.storage.VolumeVO;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.StoragePoolDao;
+import com.cloud.storage.dao.StoragePoolDetailsDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.utils.component.ComponentContext;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachineProfile;
+
+@RunWith(SpringJUnit4ClassRunner.class)
+@ContextConfiguration(locations = "classpath:/storageContext.xml")
+public class StorageAllocatorTest {
+ @Inject
+ StoragePoolDao storagePoolDao;
+ @Inject
+ StorageManager storageMgr;
+ @Inject
+ DiskOfferingDao diskOfferingDao;
+ @Inject
+ VolumeDao volumeDao;
+ @Inject
+ HostPodDao podDao;
+ @Inject
+ ClusterDao clusterDao;
+ @Inject
+ DataCenterDao dcDao;
+ @Inject
+ StoragePoolDetailsDao poolDetailsDao;
+ @Inject
+ DataStoreProviderManager providerMgr;
+ Long dcId = 1l;
+ Long podId = 1l;
+ Long clusterId = 1l;
+ Long volumeId = null;
+ Long diskOfferingId = null;
+ Long storagePoolId = null;
+ VolumeVO volume = null;
+ DiskOfferingVO diskOffering = null;
+ StoragePoolVO storage = null;
+
+ @Before
+ public void setup() throws Exception {
+ ComponentContext.initComponentsLifeCycle();
+
+ }
+
+ protected void createDb() {
+ DataCenterVO dc = new DataCenterVO(UUID.randomUUID().toString(), "test", "8.8.8.8", null, "10.0.0.1", null, "10.0.0.1/24",
+ null, null, NetworkType.Basic, null, null, true, true);
+ dc = dcDao.persist(dc);
+ dcId = dc.getId();
+
+ HostPodVO pod = new HostPodVO(UUID.randomUUID().toString(), dc.getId(), "255.255.255.255", "", 8, "test");
+ pod = podDao.persist(pod);
+ podId = pod.getId();
+
+ ClusterVO cluster = new ClusterVO(dc.getId(), pod.getId(), "devcloud cluster");
+ cluster.setHypervisorType(HypervisorType.XenServer.toString());
+ cluster.setClusterType(ClusterType.CloudManaged);
+ cluster.setManagedState(ManagedState.Managed);
+ cluster = clusterDao.persist(cluster);
+ clusterId = cluster.getId();
+
+ DataStoreProvider provider = providerMgr.getDataStoreProvider("ancient primary data store provider");
+ storage = new StoragePoolVO();
+ storage.setDataCenterId(dcId);
+ storage.setPodId(podId);
+ storage.setPoolType(StoragePoolType.NetworkFilesystem);
+ storage.setClusterId(clusterId);
+ storage.setStatus(StoragePoolStatus.Up);
+ storage.setScope(ScopeType.CLUSTER);
+ storage.setAvailableBytes(1000);
+ storage.setCapacityBytes(20000);
+ storage.setHostAddress(UUID.randomUUID().toString());
+ storage.setPath(UUID.randomUUID().toString());
+ storage.setStorageProviderId(provider.getId());
+ storage = storagePoolDao.persist(storage);
+ storagePoolId = storage.getId();
+
+ storageMgr.createCapacityEntry(storage.getId());
+
+ diskOffering = new DiskOfferingVO();
+ diskOffering.setDiskSize(500);
+ diskOffering.setName("test-disk");
+ diskOffering.setSystemUse(false);
+ diskOffering.setUseLocalStorage(false);
+ diskOffering.setCustomized(false);
+ diskOffering.setRecreatable(false);
+ diskOffering = diskOfferingDao.persist(diskOffering);
+ diskOfferingId = diskOffering.getId();
+
+ volume = new VolumeVO(Volume.Type.ROOT, "volume", dcId, 1, 1, diskOffering.getId(), diskOffering.getDiskSize());
+ volume = volumeDao.persist(volume);
+ volumeId = volume.getId();
+ }
+
+
+
+ @Inject
+ List allocators;
+ @Test
+ public void testClusterAllocatorMultiplePools() {
+ Long newStorageId = null;
+ try {
+ createDb();
+
+ DataStoreProvider provider = providerMgr.getDataStoreProvider("ancient primary data store provider");
+ storage = new StoragePoolVO();
+ storage.setDataCenterId(dcId);
+ storage.setPodId(podId);
+ storage.setPoolType(StoragePoolType.NetworkFilesystem);
+ storage.setClusterId(clusterId);
+ storage.setStatus(StoragePoolStatus.Up);
+ storage.setScope(ScopeType.CLUSTER);
+ storage.setAvailableBytes(1000);
+ storage.setCapacityBytes(20000);
+ storage.setHostAddress(UUID.randomUUID().toString());
+ storage.setPath(UUID.randomUUID().toString());
+ storage.setStorageProviderId(provider.getId());
+ StoragePoolVO newStorage = storagePoolDao.persist(storage);
+ newStorageId = newStorage.getId();
+
+ DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
+ VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
+ Mockito.when(storageMgr.storagePoolHasEnoughSpace(
+ Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
+ DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
+ int foundAcct = 0;
+ for (StoragePoolAllocator allocator : allocators) {
+ List pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
+ if (!pools.isEmpty()) {
+ Assert.assertEquals(pools.size(), 1);
+ foundAcct++;
+ }
+ }
+
+ if (foundAcct > 1 || foundAcct == 0) {
+ Assert.fail();
+ }
+ } catch (Exception e) {
+ cleanDb();
+
+ if (newStorageId != null) {
+ storagePoolDao.remove(newStorageId);
+ }
+ Assert.fail();
+ }
+ }
+
+ @Test
+ public void testClusterAllocator() {
+ try {
+ createDb();
+ DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
+ VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
+ Mockito.when(storageMgr.storagePoolHasEnoughSpace(
+ Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
+ DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
+ int foundAcct = 0;
+ for (StoragePoolAllocator allocator : allocators) {
+ List pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
+ if (!pools.isEmpty()) {
+ Assert.assertEquals(pools.get(0).getId(), storage.getId());
+ foundAcct++;
+ }
+ }
+
+ if (foundAcct > 1 || foundAcct == 0) {
+ Assert.fail();
+ }
+ } catch (Exception e) {
+ cleanDb();
+ Assert.fail();
+ }
+ }
+
+
+ @Test
+ public void testClusterAllocatorWithTags() {
+ try {
+ createDb();
+ StoragePoolDetailVO detailVO = new StoragePoolDetailVO(this.storagePoolId, "high", "true");
+ poolDetailsDao.persist(detailVO);
+ DiskOfferingVO diskOff = this.diskOfferingDao.findById(diskOffering.getId());
+ List tags = new ArrayList();
+ tags.add("high");
+ diskOff.setTagsArray(tags);
+ diskOfferingDao.update(diskOff.getId(), diskOff);
+
+ DiskProfile profile = new DiskProfile(volume, diskOff, HypervisorType.XenServer);
+ VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
+ Mockito.when(storageMgr.storagePoolHasEnoughSpace(
+ Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
+ DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
+ int foundAcct = 0;
+ for (StoragePoolAllocator allocator : allocators) {
+ List pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
+ if (!pools.isEmpty()) {
+ Assert.assertEquals(pools.get(0).getId(), storage.getId());
+ foundAcct++;
+ }
+ }
+
+ if (foundAcct > 1 || foundAcct == 0) {
+ Assert.fail();
+ }
+ } catch (Exception e) {
+ cleanDb();
+ Assert.fail();
+ }
+ }
+
+ @Test
+ public void testClusterAllocatorWithWrongTag() {
+ try {
+ createDb();
+ StoragePoolDetailVO detailVO = new StoragePoolDetailVO(this.storagePoolId, "high", "true");
+ poolDetailsDao.persist(detailVO);
+ DiskOfferingVO diskOff = this.diskOfferingDao.findById(diskOffering.getId());
+ List tags = new ArrayList();
+ tags.add("low");
+ diskOff.setTagsArray(tags);
+ diskOfferingDao.update(diskOff.getId(), diskOff);
+
+ DiskProfile profile = new DiskProfile(volume, diskOff, HypervisorType.XenServer);
+ VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
+ Mockito.when(storageMgr.storagePoolHasEnoughSpace(
+ Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
+ DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
+ int foundAcct = 0;
+ for (StoragePoolAllocator allocator : allocators) {
+ List pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
+ if (!pools.isEmpty()) {
+ foundAcct++;
+ }
+ }
+
+ if (foundAcct != 0) {
+ Assert.fail();
+ }
+ } catch (Exception e) {
+ cleanDb();
+ Assert.fail();
+ }
+ }
+
+ @Test
+ public void testZoneWideStorageAllocator() {
+ try {
+ createDb();
+
+ StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
+ pool.setScope(ScopeType.ZONE);
+ storagePoolDao.update(pool.getId(), pool);
+
+ DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.KVM);
+ VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
+ Mockito.when(vmProfile.getHypervisorType()).thenReturn(HypervisorType.KVM);
+ Mockito.when(storageMgr.storagePoolHasEnoughSpace(
+ Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
+ DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
+ int foundAcct = 0;
+ for (StoragePoolAllocator allocator : allocators) {
+ List pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
+ if (!pools.isEmpty()) {
+ Assert.assertEquals(pools.get(0).getId(), storage.getId());
+ foundAcct++;
+ }
+ }
+
+ if (foundAcct > 1 || foundAcct == 0) {
+ Assert.fail();
+ }
+ } catch (Exception e) {
+ cleanDb();
+ Assert.fail();
+ }
+ }
+
+ @Test
+ public void testPoolStateIsNotUp() {
+ try {
+ createDb();
+
+ StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
+ pool.setScope(ScopeType.ZONE);
+ pool.setStatus(StoragePoolStatus.Maintenance);
+ storagePoolDao.update(pool.getId(), pool);
+
+ DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
+ VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
+ Mockito.when(storageMgr.storagePoolHasEnoughSpace(
+ Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
+ DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
+ int foundAcct = 0;
+ for (StoragePoolAllocator allocator : allocators) {
+ List pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
+ if (!pools.isEmpty()) {
+ Assert.assertEquals(pools.get(0).getId(), storage.getId());
+ foundAcct++;
+ }
+ }
+
+ if (foundAcct == 1) {
+ Assert.fail();
+ }
+ } catch (Exception e) {
+ cleanDb();
+ Assert.fail();
+ }
+ }
+
+
+
+
+ @Test
+ public void testLocalStorageAllocator() {
+ try {
+ createDb();
+
+ StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
+ pool.setScope(ScopeType.HOST);
+ storagePoolDao.update(pool.getId(), pool);
+
+ DiskOfferingVO diskOff = diskOfferingDao.findById(diskOfferingId);
+ diskOff.setUseLocalStorage(true);
+ diskOfferingDao.update(diskOfferingId, diskOff);
+
+ DiskProfile profile = new DiskProfile(volume, diskOff, HypervisorType.XenServer);
+ VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
+ Mockito.when(storageMgr.storagePoolHasEnoughSpace(
+ Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
+ DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
+ int foundAcct = 0;
+ for (StoragePoolAllocator allocator : allocators) {
+ List pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
+ if (!pools.isEmpty()) {
+ Assert.assertEquals(pools.get(0).getId(), storage.getId());
+ foundAcct++;
+ }
+ }
+
+ if (foundAcct > 1 || foundAcct == 0) {
+ Assert.fail();
+ }
+ } catch (Exception e) {
+ cleanDb();
+ Assert.fail();
+ }
+ }
+
+ protected void cleanDb() {
+ if (volumeId != null) {
+ volumeDao.remove(volumeId);
+ volumeId = null;
+ }
+ if (diskOfferingId != null) {
+ diskOfferingDao.remove(diskOfferingId);
+ diskOfferingId = null;
+ }
+ if (storagePoolId != null) {
+ storagePoolDao.remove(storagePoolId);
+ storagePoolId = null;
+ }
+ if (clusterId != null) {
+ clusterDao.remove(clusterId);
+ clusterId = null;
+ }
+ if (podId != null) {
+ podDao.remove(podId);
+ podId = null;
+ }
+ if (dcId != null) {
+ dcDao.remove(dcId);
+ dcId = null;
+ }
+ }
+
+}
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTestConfiguration.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTestConfiguration.java
new file mode 100644
index 00000000000..cb23adc3f3f
--- /dev/null
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/allocator/StorageAllocatorTestConfiguration.java
@@ -0,0 +1,63 @@
+package org.apache.cloudstack.storage.allocator;
+
+import java.io.IOException;
+
+import org.apache.cloudstack.storage.allocator.StorageAllocatorTestConfiguration.Library;
+import org.mockito.Mockito;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.ComponentScan;
+import org.springframework.context.annotation.ComponentScan.Filter;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.context.annotation.FilterType;
+import org.springframework.core.type.classreading.MetadataReader;
+import org.springframework.core.type.classreading.MetadataReaderFactory;
+import org.springframework.core.type.filter.TypeFilter;
+
+import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl;
+import com.cloud.configuration.dao.ConfigurationDaoImpl;
+import com.cloud.dc.dao.ClusterDaoImpl;
+import com.cloud.dc.dao.DataCenterDaoImpl;
+import com.cloud.domain.dao.DomainDaoImpl;
+import com.cloud.host.dao.HostDaoImpl;
+import com.cloud.host.dao.HostDetailsDaoImpl;
+import com.cloud.host.dao.HostTagsDaoImpl;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.dao.StoragePoolDaoImpl;
+import com.cloud.storage.dao.StoragePoolDetailsDaoImpl;
+import com.cloud.storage.dao.VMTemplateDaoImpl;
+import com.cloud.utils.component.SpringComponentScanUtils;
+import com.cloud.vm.UserVmManager;
+
+
+@Configuration
+@ComponentScan(basePackageClasses={
+ StoragePoolDetailsDaoImpl.class,
+ StoragePoolDaoImpl.class,
+ VMTemplateDaoImpl.class,
+ HostDaoImpl.class,
+ DomainDaoImpl.class,
+ DataCenterDaoImpl.class,
+ },
+ includeFilters={@Filter(value=Library.class, type=FilterType.CUSTOM)},
+ useDefaultFilters=false
+ )
+public class StorageAllocatorTestConfiguration {
+ @Bean
+ public UserVmManager UserVmManager() {
+ return Mockito.mock(UserVmManager.class);
+ }
+ @Bean
+ public StorageManager StorageManager() {
+ return Mockito.mock(StorageManager.class);
+ }
+
+ public static class Library implements TypeFilter {
+
+ @Override
+ public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
+ mdr.getClassMetadata().getClassName();
+ ComponentScan cs = StorageAllocatorTestConfiguration.class.getAnnotation(ComponentScan.class);
+ return SpringComponentScanUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs);
+ }
+ }
+}
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java
index 2ad52159afc..a063bdda8ad 100644
--- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java
@@ -20,6 +20,7 @@ import java.io.IOException;
import org.apache.cloudstack.acl.APIChecker;
import org.apache.cloudstack.engine.service.api.OrchestrationService;
+import org.apache.cloudstack.framework.rpc.RpcProvider;
import org.apache.cloudstack.storage.HostEndpointRpcServer;
import org.apache.cloudstack.storage.endpoint.EndPointSelector;
import org.apache.cloudstack.storage.test.ChildTestConfiguration.Library;
@@ -35,46 +36,55 @@ import org.springframework.core.type.filter.TypeFilter;
import com.cloud.agent.AgentManager;
import com.cloud.alert.AlertManager;
+import com.cloud.capacity.dao.CapacityDaoImpl;
import com.cloud.cluster.ClusteredAgentRebalanceService;
-import com.cloud.cluster.agentlb.dao.HostTransferMapDao;
import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl;
-import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.configuration.dao.ConfigurationDaoImpl;
-import com.cloud.dc.dao.ClusterDao;
+import com.cloud.dc.ClusterDetailsDaoImpl;
import com.cloud.dc.dao.ClusterDaoImpl;
-import com.cloud.dc.dao.DataCenterDao;
-import com.cloud.dc.dao.DataCenterDaoImpl;
import com.cloud.dc.dao.DataCenterIpAddressDaoImpl;
import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDaoImpl;
import com.cloud.dc.dao.DataCenterVnetDaoImpl;
import com.cloud.dc.dao.DcDetailsDaoImpl;
-import com.cloud.dc.dao.HostPodDao;
import com.cloud.dc.dao.HostPodDaoImpl;
import com.cloud.dc.dao.PodVlanDaoImpl;
-import com.cloud.domain.dao.DomainDao;
-import com.cloud.domain.dao.DomainDaoImpl;
-import com.cloud.host.dao.HostDao;
-import com.cloud.host.dao.HostDetailsDao;
+import com.cloud.host.dao.HostDaoImpl;
import com.cloud.host.dao.HostDetailsDaoImpl;
-import com.cloud.host.dao.HostTagsDao;
import com.cloud.host.dao.HostTagsDaoImpl;
+import com.cloud.resource.ResourceManager;
+import com.cloud.server.ManagementServer;
import com.cloud.server.auth.UserAuthenticator;
-import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.service.dao.ServiceOfferingDaoImpl;
+import com.cloud.storage.OCFS2ManagerImpl;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.VolumeManager;
+import com.cloud.storage.dao.DiskOfferingDaoImpl;
+import com.cloud.storage.dao.SnapshotDaoImpl;
import com.cloud.storage.dao.StoragePoolHostDaoImpl;
+import com.cloud.storage.dao.StoragePoolWorkDaoImpl;
import com.cloud.storage.dao.VMTemplateDaoImpl;
-import com.cloud.storage.dao.VMTemplateDetailsDao;
import com.cloud.storage.dao.VMTemplateDetailsDaoImpl;
import com.cloud.storage.dao.VMTemplateHostDaoImpl;
import com.cloud.storage.dao.VMTemplatePoolDaoImpl;
-import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.storage.dao.VMTemplateZoneDaoImpl;
import com.cloud.storage.dao.VolumeDaoImpl;
import com.cloud.storage.dao.VolumeHostDaoImpl;
+import com.cloud.storage.s3.S3Manager;
import com.cloud.storage.snapshot.SnapshotManager;
+import com.cloud.storage.swift.SwiftManager;
import com.cloud.tags.dao.ResourceTagsDaoImpl;
+import com.cloud.template.TemplateManager;
+import com.cloud.user.dao.UserDaoImpl;
import com.cloud.utils.component.SpringComponentScanUtils;
+import com.cloud.vm.VirtualMachineManager;
+import com.cloud.vm.dao.ConsoleProxyDaoImpl;
+import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.NicDaoImpl;
+import com.cloud.vm.dao.SecondaryStorageVmDaoImpl;
+import com.cloud.vm.dao.UserVmDaoImpl;
+import com.cloud.vm.dao.UserVmDetailsDaoImpl;
import com.cloud.vm.dao.VMInstanceDaoImpl;
+import com.cloud.vm.snapshot.dao.VMSnapshotDaoImpl;
@Configuration
@ComponentScan(basePackageClasses={
NicDaoImpl.class,
@@ -85,88 +95,62 @@ import com.cloud.vm.dao.VMInstanceDaoImpl;
VMTemplatePoolDaoImpl.class,
ResourceTagsDaoImpl.class,
VMTemplateDaoImpl.class,
- MockStorageMotionStrategy.class
+ MockStorageMotionStrategy.class,
+ ConfigurationDaoImpl.class,
+ ClusterDaoImpl.class,
+ HostPodDaoImpl.class,
+ VMTemplateZoneDaoImpl.class,
+ VMTemplateDetailsDaoImpl.class,
+ HostDaoImpl.class,
+ HostDetailsDaoImpl.class,
+ HostTagsDaoImpl.class,
+ HostTransferMapDaoImpl.class,
+ DataCenterIpAddressDaoImpl.class,
+ DataCenterLinkLocalIpAddressDaoImpl.class,
+ DataCenterVnetDaoImpl.class,
+ PodVlanDaoImpl.class,
+ DcDetailsDaoImpl.class,
+ DiskOfferingDaoImpl.class,
+ StoragePoolHostDaoImpl.class,
+ UserVmDaoImpl.class,
+ UserVmDetailsDaoImpl.class,
+ ServiceOfferingDaoImpl.class,
+ CapacityDaoImpl.class,
+ SnapshotDaoImpl.class,
+ VMSnapshotDaoImpl.class,
+ OCFS2ManagerImpl.class,
+ ClusterDetailsDaoImpl.class,
+ SecondaryStorageVmDaoImpl.class,
+
+ ConsoleProxyDaoImpl.class,
+ StoragePoolWorkDaoImpl.class,
+ UserDaoImpl.class
+
},
includeFilters={@Filter(value=Library.class, type=FilterType.CUSTOM)},
useDefaultFilters=false
)
public class ChildTestConfiguration extends TestConfiguration {
- @Override
- @Bean
- public HostDao hostDao() {
- HostDao dao = super.hostDao();
- HostDao nDao = Mockito.spy(dao);
- return nDao;
- }
-
@Bean
public EndPointSelector selector() {
return Mockito.mock(EndPointSelector.class);
}
- @Bean
- public DataCenterDao dcDao() {
- return new DataCenterDaoImpl();
- }
- @Bean
- public HostDetailsDao hostDetailsDao() {
- return new HostDetailsDaoImpl();
- }
-
- @Bean
- public HostTagsDao hostTagsDao() {
- return new HostTagsDaoImpl();
- }
-
- @Bean ClusterDao clusterDao() {
- return new ClusterDaoImpl();
- }
-
- @Bean HostTransferMapDao hostTransferDao() {
- return new HostTransferMapDaoImpl();
- }
- @Bean DataCenterIpAddressDaoImpl dataCenterIpAddressDaoImpl() {
- return new DataCenterIpAddressDaoImpl();
- }
- @Bean DataCenterLinkLocalIpAddressDaoImpl dataCenterLinkLocalIpAddressDaoImpl() {
- return new DataCenterLinkLocalIpAddressDaoImpl();
- }
- @Bean DataCenterVnetDaoImpl dataCenterVnetDaoImpl() {
- return new DataCenterVnetDaoImpl();
- }
- @Bean PodVlanDaoImpl podVlanDaoImpl() {
- return new PodVlanDaoImpl();
- }
- @Bean DcDetailsDaoImpl dcDetailsDaoImpl() {
- return new DcDetailsDaoImpl();
- }
- @Bean HostPodDao hostPodDao() {
- return new HostPodDaoImpl();
- }
- @Bean StoragePoolHostDao storagePoolHostDao() {
- return new StoragePoolHostDaoImpl();
- }
- @Bean VMTemplateZoneDao templateZoneDao() {
- return new VMTemplateZoneDaoImpl();
- }
- @Bean VMTemplateDetailsDao templateDetailsDao() {
- return new VMTemplateDetailsDaoImpl();
- }
- @Bean ConfigurationDao configDao() {
- return new ConfigurationDaoImpl();
- }
+
@Bean
public AgentManager agentMgr() {
return new DirectAgentManagerSimpleImpl();
}
- @Bean DomainDao domainDao() {
- return new DomainDaoImpl();
- }
@Bean
public HostEndpointRpcServer rpcServer() {
return new MockHostEndpointRpcServerDirectCallResource();
}
+
+ @Bean
+ public RpcProvider rpcProvider() {
+ return Mockito.mock(RpcProvider.class);
+ }
@Bean
public ClusteredAgentRebalanceService _rebalanceService() {
return Mockito.mock(ClusteredAgentRebalanceService.class);
@@ -183,12 +167,50 @@ public class ChildTestConfiguration extends TestConfiguration {
public APIChecker apiChecker() {
return Mockito.mock(APIChecker.class);
}
+ @Bean
+ public TemplateManager templateMgr() {
+ return Mockito.mock(TemplateManager.class);
+ }
+ @Bean
+ public VolumeManager volumeMgr() {
+ return Mockito.mock(VolumeManager.class);
+ }
+ @Bean
+ public SwiftManager switfMgr() {
+ return Mockito.mock(SwiftManager.class);
+ }
+ @Bean
+ public ManagementServer server() {
+ return Mockito.mock(ManagementServer.class);
+ }
+ @Bean
+ public VirtualMachineManager vmMgr() {
+ return Mockito.mock(VirtualMachineManager.class);
+ }
+
+ @Bean
+ public S3Manager s3Mgr() {
+ return Mockito.mock(S3Manager.class);
+ }
@Bean
public SnapshotManager snapshotMgr() {
return Mockito.mock(SnapshotManager.class);
}
+ @Bean
+ public ResourceManager resourceMgr() {
+ return Mockito.mock(ResourceManager.class);
+ }
+ @Bean
+ public DomainRouterDao domainRouterDao() {
+ return Mockito.mock(DomainRouterDao.class);
+ }
+ @Bean
+ public StorageManager storageMgr() {
+ return Mockito.mock(StorageManager.class);
+ }
+
@Bean
public AlertManager alertMgr() {
return Mockito.mock(AlertManager.class);
@@ -204,9 +226,5 @@ public class ChildTestConfiguration extends TestConfiguration {
}
}
-/* @Override
- @Bean
- public PrimaryDataStoreDao primaryDataStoreDao() {
- return Mockito.mock(PrimaryDataStoreDaoImpl.class);
- }*/
+
}
diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestConfiguration.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestConfiguration.java
index d3280c0e38d..7cec42a3378 100644
--- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestConfiguration.java
+++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestConfiguration.java
@@ -16,16 +16,8 @@
// under the License.
package org.apache.cloudstack.storage.test;
-import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
-import com.cloud.host.dao.HostDao;
-import com.cloud.host.dao.HostDaoImpl;
-
@Configuration
public class TestConfiguration {
- @Bean
- public HostDao hostDao() {
- return new HostDaoImpl();
- }
}
diff --git a/engine/storage/integration-test/test/resource/StorageAllocatorTestContext.xml b/engine/storage/integration-test/test/resource/StorageAllocatorTestContext.xml
new file mode 100644
index 00000000000..1f5aa585f67
--- /dev/null
+++ b/engine/storage/integration-test/test/resource/StorageAllocatorTestContext.xml
@@ -0,0 +1,45 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/engine/storage/integration-test/test/resource/storageContext.xml b/engine/storage/integration-test/test/resource/storageContext.xml
index 4f55e243bac..7c5382d49f9 100644
--- a/engine/storage/integration-test/test/resource/storageContext.xml
+++ b/engine/storage/integration-test/test/resource/storageContext.xml
@@ -23,10 +23,8 @@
-
-
-
+
@@ -34,50 +32,12 @@
-
-
-
-
-
-
+
-
-
-
-
-
-
-
-
- org.apache.cloudstack.framework
-
-
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java b/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java
index b709991ee57..f441f39ddfa 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/HypervsiorHostEndPointRpcServer.java
@@ -40,24 +40,24 @@ public class HypervsiorHostEndPointRpcServer implements HostEndpointRpcServer {
private static final Logger s_logger = Logger.getLogger(HypervsiorHostEndPointRpcServer.class);
@Inject
- private RpcProvider _rpcProvider;
+ private RpcProvider rpcProvider;
public HypervsiorHostEndPointRpcServer() {
}
public HypervsiorHostEndPointRpcServer(RpcProvider rpcProvider) {
- _rpcProvider = rpcProvider;
- _rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
+ rpcProvider = rpcProvider;
+ rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
}
@PostConstruct
public void Initialize() {
- _rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
+ rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
}
@Override
public void sendCommandAsync(HypervisorHostEndPoint host, final Command command, final AsyncCompletionCallback callback) {
- _rpcProvider.newCall(host.getHostAddr()).addCallbackListener(new RpcCallbackListener() {
+ rpcProvider.newCall(host.getHostAddr()).addCallbackListener(new RpcCallbackListener() {
@Override
public void onSuccess(Answer result) {
callback.complete(result);
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
new file mode 100755
index 00000000000..4c5f0e6cccf
--- /dev/null
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/AbstractStoragePoolAllocator.java
@@ -0,0 +1,192 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.allocator;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+import org.apache.log4j.Logger;
+
+import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.dao.ClusterDao;
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.storage.DiskOfferingVO;
+import com.cloud.storage.Storage.StoragePoolType;
+import com.cloud.storage.StorageManager;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.Volume;
+import com.cloud.storage.Volume.Type;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.storage.dao.StoragePoolDao;
+import com.cloud.storage.dao.VolumeDao;
+import com.cloud.user.Account;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.utils.component.AdapterBase;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+
+public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
+ private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class);
+ @Inject StorageManager storageMgr;
+ protected @Inject StoragePoolDao _storagePoolDao;
+ @Inject VolumeDao _volumeDao;
+ @Inject ConfigurationDao _configDao;
+ @Inject ClusterDao _clusterDao;
+ protected @Inject DataStoreManager dataStoreMgr;
+ protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1);
+ long _extraBytesPerVolume = 0;
+ Random _rand;
+ boolean _dontMatter;
+ protected String _allocationAlgorithm = "random";
+ @Inject
+ DiskOfferingDao _diskOfferingDao;
+
+ @Override
+ public boolean configure(String name, Map params) throws ConfigurationException {
+ super.configure(name, params);
+
+ Map configs = _configDao.getConfiguration(null, params);
+
+ String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor");
+ _storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
+
+ _extraBytesPerVolume = 0;
+
+ _rand = new Random(System.currentTimeMillis());
+
+ _dontMatter = Boolean.parseBoolean(configs.get("storage.overwrite.provisioning"));
+
+ String allocationAlgorithm = configs.get("vm.allocation.algorithm");
+ if (allocationAlgorithm != null) {
+ _allocationAlgorithm = allocationAlgorithm;
+ }
+
+ return true;
+ }
+
+ protected abstract List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo);
+
+ @Override
+ public
+ List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+ List pools = select(dskCh, vmProfile, plan, avoid, returnUpTo);
+ return reOrder(pools, vmProfile, plan);
+ }
+
+ protected List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List pools, Account account) {
+ if(account == null){
+ return pools;
+ }
+ long dcId = plan.getDataCenterId();
+ Long podId = plan.getPodId();
+ Long clusterId = plan.getClusterId();
+
+ List poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount);
+ }
+
+ //now filter the given list of Pools by this ordered list
+ Map poolMap = new HashMap();
+ for (StoragePool pool : pools) {
+ poolMap.put(pool.getId(), pool);
+ }
+ List matchingPoolIds = new ArrayList(poolMap.keySet());
+
+ poolIdsByVolCount.retainAll(matchingPoolIds);
+
+ List reorderedPools = new ArrayList();
+ for(Long id: poolIdsByVolCount){
+ reorderedPools.add(poolMap.get(id));
+ }
+
+ return reorderedPools;
+ }
+
+ protected List reOrder(List pools,
+ VirtualMachineProfile extends VirtualMachine> vmProfile,
+ DeploymentPlan plan) {
+ Account account = null;
+ if(vmProfile.getVirtualMachine() != null){
+ account = vmProfile.getOwner();
+ }
+
+ if(_allocationAlgorithm.equals("random") || _allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
+ // Shuffle this so that we don't check the pools in the same order.
+ Collections.shuffle(pools);
+ }else if(_allocationAlgorithm.equals("userdispersing")){
+ pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
+ }
+ return pools;
+ }
+
+ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
+ DeploymentPlan plan) {
+
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName()+ " ,poolId: "+ pool.getId());
+ }
+ if (avoid.shouldAvoid(pool)) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("StoragePool is in avoid set, skipping this pool");
+ }
+ return false;
+ }
+
+ if(dskCh.getType().equals(Type.ROOT) && pool.getPoolType().equals(StoragePoolType.Iscsi)){
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("Disk needed for ROOT volume, but StoragePoolType is Iscsi, skipping this and trying other available pools");
+ }
+ return false;
+ }
+
+ DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId());
+ if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) {
+ s_logger.debug("Skipping RBD pool " + pool.getName() + " as a suitable pool. RBD is not supported for System VM's");
+ return false;
+ }
+
+
+ Long clusterId = pool.getClusterId();
+ ClusterVO cluster = _clusterDao.findById(clusterId);
+ if (!(cluster.getHypervisorType() == dskCh.getHypersorType())) {
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool");
+ }
+ return false;
+ }
+
+ // check capacity
+ Volume volume = _volumeDao.findById(dskCh.getVolumeId());
+ List requestVolumes = new ArrayList();
+ requestVolumes.add(volume);
+ return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
+ }
+}
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
new file mode 100644
index 00000000000..747e2586fed
--- /dev/null
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java
@@ -0,0 +1,105 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.allocator;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+
+import javax.ejb.Local;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.offering.ServiceOffering;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.dao.DiskOfferingDao;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+
+@Component
+@Local(value=StoragePoolAllocator.class)
+public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocator {
+ private static final Logger s_logger = Logger.getLogger(ClusterScopeStoragePoolAllocator.class);
+ protected String _allocationAlgorithm = "random";
+
+ @Inject
+ DiskOfferingDao _diskOfferingDao;
+
+ @Override
+ protected List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+
+ List suitablePools = new ArrayList();
+
+ long dcId = plan.getDataCenterId();
+ Long podId = plan.getPodId();
+ Long clusterId = plan.getClusterId();
+
+ if(dskCh.getTags() != null && dskCh.getTags().length != 0){
+ s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags()));
+ }else{
+ s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
+ }
+
+ List pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
+ if (pools.size() == 0) {
+ if (s_logger.isDebugEnabled()) {
+ String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString();
+ s_logger.debug("No storage pools available for " + storageType + " volume allocation, returning");
+ }
+ return suitablePools;
+ }
+
+ for (StoragePoolVO pool: pools) {
+ if(suitablePools.size() == returnUpTo){
+ break;
+ }
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
+ if (filter(avoid, pol, dskCh, plan)) {
+ suitablePools.add(pol);
+ }
+ }
+
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools");
+ }
+
+ return suitablePools;
+ }
+
+ @Override
+ public boolean configure(String name, Map params) throws ConfigurationException {
+ super.configure(name, params);
+
+ if (_configDao != null) {
+ Map configs = _configDao.getConfiguration(params);
+ String allocationAlgorithm = configs.get("vm.allocation.algorithm");
+ if (allocationAlgorithm != null) {
+ _allocationAlgorithm = allocationAlgorithm;
+ }
+ }
+ return true;
+ }
+}
diff --git a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java
similarity index 82%
rename from server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java
rename to engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java
index 4eeae280d8b..91bc25c715d 100644
--- a/server/src/com/cloud/storage/allocator/GarbageCollectingStoragePoolAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/GarbageCollectingStoragePoolAllocator.java
@@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
-package com.cloud.storage.allocator;
+package org.apache.cloudstack.storage.allocator;
import java.util.List;
import java.util.Map;
@@ -23,8 +23,8 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.deploy.DeploymentPlan;
@@ -36,32 +36,18 @@ import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
-@Component
@Local(value=StoragePoolAllocator.class)
public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(GarbageCollectingStoragePoolAllocator.class);
StoragePoolAllocator _firstFitStoragePoolAllocator;
StoragePoolAllocator _localStoragePoolAllocator;
- @Inject StorageManager _storageMgr;
+ @Inject StorageManager storageMgr;
@Inject ConfigurationDao _configDao;
boolean _storagePoolCleanupEnabled;
@Override
- public boolean allocatorIsCorrectType(DiskProfile dskCh) {
- return true;
- }
-
- public Integer getStorageOverprovisioningFactor() {
- return null;
- }
-
- public Long getExtraBytesPerVolume() {
- return null;
- }
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+ public List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
if (!_storagePoolCleanupEnabled) {
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
@@ -69,10 +55,10 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
}
// Clean up all storage pools
- _storageMgr.cleanupStorage(false);
+ storageMgr.cleanupStorage(false);
// Determine what allocator to use
StoragePoolAllocator allocator;
- if (localStorageAllocationNeeded(dskCh)) {
+ if (dskCh.useLocalStorage()) {
allocator = _localStoragePoolAllocator;
} else {
allocator = _firstFitStoragePoolAllocator;
@@ -88,7 +74,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
public boolean configure(String name, Map params) throws ConfigurationException {
super.configure(name, params);
- _firstFitStoragePoolAllocator = ComponentContext.inject(FirstFitStoragePoolAllocator.class);
+ _firstFitStoragePoolAllocator = ComponentContext.inject(ClusterScopeStoragePoolAllocator.class);
_firstFitStoragePoolAllocator.configure("GCFirstFitStoragePoolAllocator", params);
_localStoragePoolAllocator = ComponentContext.inject(LocalStoragePoolAllocator.class);
_localStoragePoolAllocator.configure("GCLocalStoragePoolAllocator", params);
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
new file mode 100644
index 00000000000..a8d5173cebe
--- /dev/null
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java
@@ -0,0 +1,126 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.cloudstack.storage.allocator;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import javax.ejb.Local;
+import javax.inject.Inject;
+import javax.naming.ConfigurationException;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.capacity.dao.CapacityDao;
+import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.service.dao.ServiceOfferingDao;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.StoragePoolHostVO;
+import com.cloud.storage.Volume;
+import com.cloud.storage.dao.StoragePoolHostDao;
+import com.cloud.utils.NumbersUtil;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
+
+@Component
+@Local(value = StoragePoolAllocator.class)
+public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
+ private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class);
+
+ @Inject
+ StoragePoolHostDao _poolHostDao;
+ @Inject
+ VMInstanceDao _vmInstanceDao;
+ @Inject
+ UserVmDao _vmDao;
+ @Inject
+ ServiceOfferingDao _offeringDao;
+ @Inject
+ CapacityDao _capacityDao;
+ @Inject
+ ConfigurationDao _configDao;
+
+ @Override
+ protected List select(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+
+ List suitablePools = new ArrayList();
+
+
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
+ }
+
+ // data disk and host identified from deploying vm (attach volume case)
+ if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
+ List hostPools = _poolHostDao.listByHostId(plan.getHostId());
+ for (StoragePoolHostVO hostPool: hostPools) {
+ StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId());
+ if (pool != null && pool.isLocal()) {
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
+ if (filter(avoid, pol, dskCh, plan)) {
+ s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
+ suitablePools.add(pol);
+ }
+ }
+
+ if (suitablePools.size() == returnUpTo) {
+ break;
+ }
+ }
+ } else {
+ List availablePools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), dskCh.getTags());
+ for (StoragePoolVO pool : availablePools) {
+ if (suitablePools.size() == returnUpTo) {
+ break;
+ }
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
+ if (filter(avoid, pol, dskCh, plan)) {
+ suitablePools.add(pol);
+ }
+ }
+ }
+
+ if (s_logger.isDebugEnabled()) {
+ s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
+ }
+
+ return suitablePools;
+ }
+
+ @Override
+ public boolean configure(String name, Map params) throws ConfigurationException {
+ super.configure(name, params);
+
+ _storageOverprovisioningFactor = new BigDecimal(1);
+ _extraBytesPerVolume = NumbersUtil.parseLong((String) params.get("extra.bytes.per.volume"), 50 * 1024L * 1024L);
+
+ return true;
+ }
+
+ public LocalStoragePoolAllocator() {
+ }
+}
diff --git a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java
similarity index 74%
rename from server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java
rename to engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java
index 2c19406fef6..4663b12e97e 100644
--- a/server/src/com/cloud/storage/allocator/UseLocalForRootAllocator.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/UseLocalForRootAllocator.java
@@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
-package com.cloud.storage.allocator;
+package org.apache.cloudstack.storage.allocator;
import java.util.List;
import java.util.Map;
@@ -23,23 +23,17 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
-import org.springframework.stereotype.Component;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
-import com.cloud.configuration.Config;
-import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.host.Host;
import com.cloud.storage.StoragePool;
-import com.cloud.storage.Volume.Type;
-
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
-@Component
@Local(value=StoragePoolAllocator.class)
public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implements StoragePoolAllocator {
@@ -55,29 +49,13 @@ public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implemen
return super.allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo);
}
-
- @Override
- public String chooseStorageIp(VirtualMachine vm, Host host, Host storage) {
- return null;
- }
@Override
public boolean configure(String name, Map params) throws ConfigurationException {
super.configure(name, params);
return true;
}
-
- @Override
- protected boolean localStorageAllocationNeeded(DiskProfile dskCh) {
- if (dskCh.getType() == Type.ROOT) {
- return true;
- } else if (dskCh.getType() == Type.DATADISK) {
- return false;
- } else {
- return super.localStorageAllocationNeeded(dskCh);
- }
- }
-
+
protected UseLocalForRootAllocator() {
}
}
diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
new file mode 100644
index 00000000000..041ecb73707
--- /dev/null
+++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java
@@ -0,0 +1,64 @@
+package org.apache.cloudstack.storage.allocator;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.inject.Inject;
+
+import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
+import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
+import org.apache.log4j.Logger;
+import org.springframework.stereotype.Component;
+
+import com.cloud.deploy.DeploymentPlan;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
+import com.cloud.hypervisor.Hypervisor.HypervisorType;
+import com.cloud.storage.StoragePool;
+import com.cloud.storage.Volume;
+import com.cloud.storage.dao.StoragePoolDao;
+import com.cloud.vm.DiskProfile;
+import com.cloud.vm.VirtualMachine;
+import com.cloud.vm.VirtualMachineProfile;
+
+@Component
+public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
+ private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class);
+ @Inject StoragePoolDao _storagePoolDao;
+ @Inject DataStoreManager dataStoreMgr;
+
+ @Override
+ protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
+ DeploymentPlan plan) {
+ Volume volume = _volumeDao.findById(dskCh.getVolumeId());
+ List requestVolumes = new ArrayList();
+ requestVolumes.add(volume);
+ return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
+ }
+
+ @Override
+ protected List select(DiskProfile dskCh,
+ VirtualMachineProfile extends VirtualMachine> vmProfile,
+ DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
+ List suitablePools = new ArrayList();
+ HypervisorType hypervisor = vmProfile.getHypervisorType();
+ if (hypervisor != null) {
+ if (hypervisor != HypervisorType.KVM) {
+ s_logger.debug("Only kvm supports zone wide storage");
+ return suitablePools;
+ }
+ }
+
+ List storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
+
+ for (StoragePoolVO storage : storagePools) {
+ if (suitablePools.size() == returnUpTo) {
+ break;
+ }
+ StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
+ if (filter(avoid, pol, dskCh, plan)) {
+ suitablePools.add(pol);
+ }
+ }
+ return suitablePools;
+ }
+}
diff --git a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
index c067a1b651c..cfd9f400839 100644
--- a/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
+++ b/engine/storage/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java
@@ -100,7 +100,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
@Inject
ConfigurationDao configDao;
@Inject
- StorageManager storagMgr;
+ StorageManager storageMgr;
@Inject
VolumeDao volDao;
@Inject
@@ -149,7 +149,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
CopyVolumeAnswer cvAnswer = null;
String errMsg = null;
try {
- cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(destPool,
+ cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool,
cvCmd);
} catch (StorageUnavailableException e1) {
s_logger.debug("Failed to copy volume " + srcData.getId() + " to "
@@ -231,7 +231,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
"2.1");
Answer answer = null;
try {
- answer = this.storagMgr.sendToPool(pool, cmd);
+ answer = this.storageMgr.sendToPool(pool, cmd);
} catch (StorageUnavailableException e) {
} finally {
snapshotDao.unlockFromLockTable(snapshotId.toString());
@@ -268,7 +268,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
+ snapshotId
+ " due to this snapshot is being used, try it later ");
}
- answer = (CreateVolumeFromSnapshotAnswer) this.storagMgr
+ answer = (CreateVolumeFromSnapshotAnswer) this.storageMgr
.sendToPool(pool, createVolumeFromSnapshotCommand);
if (answer != null && answer.getResult()) {
vdiUUID = answer.getVdi();
@@ -306,7 +306,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
StoragePool pool = (StoragePool)volume.getDataStore();
String errMsg = null;
try {
- answer = storagMgr.sendToPool(pool, null, cmd);
+ answer = storageMgr.sendToPool(pool, null, cmd);
} catch (StorageUnavailableException e) {
s_logger.debug("Failed to send to storage pool", e);
throw new CloudRuntimeException("Failed to send to storage pool", e);
@@ -358,7 +358,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
_copyvolumewait);
CopyVolumeAnswer cvAnswer;
try {
- cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(srcPool, cvCmd);
+ cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(srcPool, cvCmd);
} catch (StorageUnavailableException e1) {
throw new CloudRuntimeException(
"Failed to copy the volume from the source primary storage pool to secondary storage.",
@@ -376,7 +376,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
secondaryStorageVolumePath, destPool,
secondaryStorageURL, false, _copyvolumewait);
try {
- cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(destPool, cvCmd);
+ cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool, cvCmd);
} catch (StorageUnavailableException e1) {
throw new CloudRuntimeException(
"Failed to copy the volume from secondary storage to the destination primary storage pool.");
@@ -464,7 +464,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
Long volumeId = snapshot.getVolumeId();
String origTemplateInstallPath = null;
- List pools = this.storagMgr
+ List pools = this.storageMgr
.ListByDataCenterHypervisor(zoneId,
snapshot.getHypervisorType());
if (pools == null || pools.size() == 0) {
@@ -516,7 +516,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
Answer answer = null;
try {
- answer = this.storagMgr.sendToPool(pool, cmd);
+ answer = this.storageMgr.sendToPool(pool, cmd);
cmd = null;
} catch (StorageUnavailableException e) {
} finally {
@@ -557,7 +557,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
CreatePrivateTemplateAnswer answer = null;
try {
- answer = (CreatePrivateTemplateAnswer) this.storagMgr.sendToPool(
+ answer = (CreatePrivateTemplateAnswer) this.storageMgr.sendToPool(
pool, cmd);
} catch (StorageUnavailableException e) {
throw new CloudRuntimeException(
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java
similarity index 99%
rename from engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java
rename to engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java
index 2167ba19a32..1c938888f72 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCyclImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/lifecycle/AncientPrimaryDataStoreLifeCycleImpl.java
@@ -94,10 +94,10 @@ import com.cloud.vm.dao.SecondaryStorageVmDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
-public class AncientPrimaryDataStoreLifeCyclImpl implements
+public class AncientPrimaryDataStoreLifeCycleImpl implements
PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger
- .getLogger(AncientPrimaryDataStoreLifeCyclImpl.class);
+ .getLogger(AncientPrimaryDataStoreLifeCycleImpl.class);
@Inject
protected ResourceManager _resourceMgr;
protected List _discoverers;
@@ -134,9 +134,6 @@ public class AncientPrimaryDataStoreLifeCyclImpl implements
protected StoragePoolHostDao _storagePoolHostDao;
@Inject
protected AlertManager _alertMgr;
-
-
-
@Inject
protected ConsoleProxyDao _consoleProxyDao;
diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
index e7d65167eac..09e78e45659 100644
--- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
+++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/provider/AncientPrimaryDataStoreProviderImpl.java
@@ -27,7 +27,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl;
-import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCyclImpl;
+import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCycleImpl;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.ComponentContext;
@@ -55,7 +55,7 @@ public class AncientPrimaryDataStoreProviderImpl implements
@Override
public boolean configure(Map params) {
- lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCyclImpl.class);
+ lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCycleImpl.class);
driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class);
uuid = (String)params.get("uuid");
id = (Long)params.get("id");
diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
index 357b4333678..57f545323f9 100644
--- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
+++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XcpOssResource.java
@@ -57,7 +57,7 @@ public class XcpOssResource extends CitrixResourceBase {
@Override
protected List getPatchFiles() {
List files = new ArrayList();
- String patch = "scripts/vm/hypervisor/xenserver/xcposs/patch";
+ String patch = "patch";
String patchfilePath = Script.findScript("", patch);
if (patchfilePath == null) {
throw new CloudRuntimeException("Unable to find patch file " + patch);
diff --git a/plugins/storage-allocators/random/pom.xml b/plugins/storage-allocators/random/pom.xml
index 06754ffc133..223b7cd5cdf 100644
--- a/plugins/storage-allocators/random/pom.xml
+++ b/plugins/storage-allocators/random/pom.xml
@@ -26,4 +26,12 @@
4.2.0-SNAPSHOT
../../pom.xml
+
+
+ org.apache.cloudstack
+ cloud-engine-storage
+ ${project.version}
+ test
+
+
diff --git a/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java b/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java
deleted file mode 100644
index af21f50cc6f..00000000000
--- a/plugins/storage-allocators/random/src/com/cloud/storage/allocator/RandomStoragePoolAllocator.java
+++ /dev/null
@@ -1,91 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one or more
-// contributor license agreements. See the NOTICE file distributed with
-// this work for additional information regarding copyright ownership.
-// The ASF licenses this file to You under the Apache License, Version 2.0
-// (the "License"); you may not use this file except in compliance with
-// the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package com.cloud.storage.allocator;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-import javax.ejb.Local;
-
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-import org.springframework.stereotype.Component;
-
-import com.cloud.deploy.DeploymentPlan;
-import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.server.StatsCollector;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.vm.DiskProfile;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachineProfile;
-
-@Component
-@Local(value=StoragePoolAllocator.class)
-public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
- private static final Logger s_logger = Logger.getLogger(RandomStoragePoolAllocator.class);
-
- @Override
- public boolean allocatorIsCorrectType(DiskProfile dskCh) {
- return true;
- }
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
-
- List suitablePools = new ArrayList();
-
- VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
- // Check that the allocator type is correct
- if (!allocatorIsCorrectType(dskCh)) {
- return suitablePools;
- }
- long dcId = plan.getDataCenterId();
- Long podId = plan.getPodId();
- Long clusterId = plan.getClusterId();
- s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
- List pools = _storagePoolDao.listBy(dcId, podId, clusterId);
- if (pools.size() == 0) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("No storage pools available for allocation, returning");
- }
- return suitablePools;
- }
-
- StatsCollector sc = StatsCollector.getInstance();
-
- Collections.shuffle(pools);
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
- }
- for (StoragePoolVO pool: pools) {
- if(suitablePools.size() == returnUpTo){
- break;
- }
- if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) {
- StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
- suitablePools.add(pol);
- }
- }
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("RandomStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools");
- }
-
- return suitablePools;
- }
-}
diff --git a/pom.xml b/pom.xml
index beeccaf936a..0ee62cd976f 100644
--- a/pom.xml
+++ b/pom.xml
@@ -161,9 +161,9 @@
usage
utils
deps/XenServerJava
+ engine
plugins
patches
- engine
framework
services
test
diff --git a/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
index bf629896907..cff4cfc1b95 100644
--- a/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
+++ b/server/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java
@@ -19,6 +19,7 @@ package com.cloud.cluster.agentlb.dao;
import java.util.Date;
import java.util.List;
+import javax.annotation.PostConstruct;
import javax.ejb.Local;
import org.apache.log4j.Logger;
@@ -37,30 +38,35 @@ import com.cloud.utils.db.SearchCriteria;
public class HostTransferMapDaoImpl extends GenericDaoBase implements HostTransferMapDao {
private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class);
- protected final SearchBuilder AllFieldsSearch;
- protected final SearchBuilder IntermediateStateSearch;
- protected final SearchBuilder ActiveSearch;
+ protected SearchBuilder AllFieldsSearch;
+ protected SearchBuilder IntermediateStateSearch;
+ protected SearchBuilder ActiveSearch;
public HostTransferMapDaoImpl() {
- AllFieldsSearch = createSearchBuilder();
- AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
- AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
- AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
- AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
- AllFieldsSearch.done();
-
- IntermediateStateSearch = createSearchBuilder();
- IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
- IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
- IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN);
- IntermediateStateSearch.done();
-
- ActiveSearch = createSearchBuilder();
- ActiveSearch.and("created", ActiveSearch.entity().getCreated(), SearchCriteria.Op.GT);
- ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ);
- ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ);
- ActiveSearch.done();
-
+ super();
+ }
+
+ @PostConstruct
+ public void init() {
+ AllFieldsSearch = createSearchBuilder();
+ AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
+ AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
+ AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
+ AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
+ AllFieldsSearch.done();
+
+ IntermediateStateSearch = createSearchBuilder();
+ IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
+ IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
+ IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN);
+ IntermediateStateSearch.done();
+
+ ActiveSearch = createSearchBuilder();
+ ActiveSearch.and("created", ActiveSearch.entity().getCreated(), SearchCriteria.Op.GT);
+ ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ);
+ ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ);
+ ActiveSearch.done();
+
}
@Override
diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java
index eb6fb242983..b6d49bcc5c5 100755
--- a/server/src/com/cloud/configuration/Config.java
+++ b/server/src/com/cloud/configuration/Config.java
@@ -20,6 +20,8 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
+
import com.cloud.agent.AgentManager;
import com.cloud.consoleproxy.ConsoleProxyManager;
import com.cloud.ha.HighAvailabilityManager;
@@ -28,7 +30,6 @@ import com.cloud.network.NetworkManager;
import com.cloud.network.router.VpcVirtualNetworkApplianceManager;
import com.cloud.server.ManagementServer;
import com.cloud.storage.StorageManager;
-import com.cloud.storage.allocator.StoragePoolAllocator;
import com.cloud.storage.secondary.SecondaryStorageVmManager;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.template.TemplateManager;
diff --git a/server/src/com/cloud/deploy/FirstFitPlanner.java b/server/src/com/cloud/deploy/FirstFitPlanner.java
index 4933467bd8f..187ceab25dc 100755
--- a/server/src/com/cloud/deploy/FirstFitPlanner.java
+++ b/server/src/com/cloud/deploy/FirstFitPlanner.java
@@ -28,17 +28,23 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import com.cloud.dc.*;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.log4j.Logger;
import com.cloud.agent.manager.allocator.HostAllocator;
-import com.cloud.api.ApiDBUtils;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityManager;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
+import com.cloud.dc.ClusterDetailsDao;
+import com.cloud.dc.ClusterDetailsVO;
+import com.cloud.dc.ClusterVO;
+import com.cloud.dc.DataCenter;
+import com.cloud.dc.DataCenterVO;
+import com.cloud.dc.HostPodVO;
+import com.cloud.dc.Pod;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
@@ -58,7 +64,6 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
-import com.cloud.storage.allocator.StoragePoolAllocator;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
diff --git a/server/src/com/cloud/host/dao/HostDaoImpl.java b/server/src/com/cloud/host/dao/HostDaoImpl.java
index 697c3dc3826..07a42322ce3 100755
--- a/server/src/com/cloud/host/dao/HostDaoImpl.java
+++ b/server/src/com/cloud/host/dao/HostDaoImpl.java
@@ -128,6 +128,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao
@Inject protected ClusterDao _clusterDao;
public HostDaoImpl() {
+ super();
}
@PostConstruct
@@ -261,7 +262,11 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao
* UnmanagedDirectConnectSearch.and("lastPinged", UnmanagedDirectConnectSearch.entity().getLastPinged(),
* SearchCriteria.Op.LTEQ); UnmanagedDirectConnectSearch.cp(); UnmanagedDirectConnectSearch.cp();
*/
+ try {
HostTransferSearch = _hostTransferDao.createSearchBuilder();
+ } catch (Throwable e) {
+ s_logger.debug("error", e);
+ }
HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL);
UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(), JoinType.LEFTOUTER);
ClusterManagedSearch = _clusterDao.createSearchBuilder();
diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java
index 9daf77db38a..ca21f62455a 100755
--- a/server/src/com/cloud/storage/StorageManagerImpl.java
+++ b/server/src/com/cloud/storage/StorageManagerImpl.java
@@ -58,6 +58,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult;
@@ -69,7 +70,6 @@ import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.agent.AgentManager;
-
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.BackupSnapshotCommand;
import com.cloud.agent.api.CleanupSnapshotBackupCommand;
@@ -78,7 +78,6 @@ import com.cloud.agent.api.ManageSnapshotCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.agent.api.storage.DeleteTemplateCommand;
import com.cloud.agent.api.storage.DeleteVolumeCommand;
-
import com.cloud.agent.manager.Commands;
import com.cloud.alert.AlertManager;
import com.cloud.api.ApiDBUtils;
@@ -100,10 +99,10 @@ import com.cloud.dc.HostPodVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
+import com.cloud.deploy.DataCenterDeployment;
+import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.domain.dao.DomainDao;
-
import com.cloud.event.dao.EventDao;
-import com.cloud.event.dao.UsageEventDao;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.ConnectionException;
import com.cloud.exception.InsufficientCapacityException;
@@ -113,7 +112,6 @@ import com.cloud.exception.PermissionDeniedException;
import com.cloud.exception.ResourceInUseException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.exception.StorageUnavailableException;
-
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
@@ -132,8 +130,6 @@ import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume.Type;
-import com.cloud.storage.allocator.StoragePoolAllocator;
-
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotPolicyDao;
@@ -146,7 +142,6 @@ import com.cloud.storage.dao.VMTemplateS3Dao;
import com.cloud.storage.dao.VMTemplateSwiftDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.storage.dao.VolumeHostDao;
-
import com.cloud.storage.download.DownloadMonitor;
import com.cloud.storage.listener.StoragePoolMonitor;
import com.cloud.storage.listener.VolumeStateListener;
@@ -156,7 +151,11 @@ import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.storage.snapshot.SnapshotScheduler;
import com.cloud.tags.dao.ResourceTagDao;
import com.cloud.template.TemplateManager;
-import com.cloud.user.*;
+import com.cloud.user.Account;
+import com.cloud.user.AccountManager;
+import com.cloud.user.ResourceLimitService;
+import com.cloud.user.User;
+import com.cloud.user.UserContext;
import com.cloud.user.dao.AccountDao;
import com.cloud.user.dao.UserDao;
import com.cloud.utils.NumbersUtil;
@@ -165,20 +164,28 @@ import com.cloud.utils.UriUtils;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.component.ManagerBase;
import com.cloud.utils.concurrency.NamedThreadFactory;
-import com.cloud.utils.db.*;
+import com.cloud.utils.db.DB;
+import com.cloud.utils.db.GenericSearchBuilder;
+import com.cloud.utils.db.GlobalLock;
+import com.cloud.utils.db.JoinBuilder;
import com.cloud.utils.db.JoinBuilder.JoinType;
+import com.cloud.utils.db.SearchBuilder;
+import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Op;
+import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
-
import com.cloud.vm.DiskProfile;
import com.cloud.vm.UserVmManager;
import com.cloud.vm.VMInstanceVO;
+import com.cloud.vm.VirtualMachine.State;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.VirtualMachineProfile;
import com.cloud.vm.VirtualMachineProfileImpl;
-
-import com.cloud.vm.VirtualMachine.State;
-import com.cloud.vm.dao.*;
+import com.cloud.vm.dao.ConsoleProxyDao;
+import com.cloud.vm.dao.DomainRouterDao;
+import com.cloud.vm.dao.SecondaryStorageVmDao;
+import com.cloud.vm.dao.UserVmDao;
+import com.cloud.vm.dao.VMInstanceDao;
@Component
@Local(value = { StorageManager.class, StorageService.class })
@@ -193,24 +200,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Inject
protected TemplateManager _tmpltMgr;
@Inject
- protected AsyncJobManager _asyncMgr;
- @Inject
- protected SnapshotManager _snapshotMgr;
- @Inject
- protected SnapshotScheduler _snapshotScheduler;
- @Inject
protected AccountManager _accountMgr;
@Inject
protected ConfigurationManager _configMgr;
@Inject
- protected ConsoleProxyManager _consoleProxyMgr;
- @Inject
- protected SecondaryStorageVmManager _secStorageMgr;
- @Inject
- protected NetworkModel _networkMgr;
- @Inject
- protected ServiceOfferingDao _serviceOfferingDao;
- @Inject
protected VolumeDao _volsDao;
@Inject
protected HostDao _hostDao;
@@ -275,30 +268,14 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
@Inject
protected ClusterDao _clusterDao;
@Inject
- protected VirtualMachineManager _vmMgr;
- @Inject
- protected DomainRouterDao _domrDao;
- @Inject
- protected SecondaryStorageVmDao _secStrgDao;
- @Inject
protected StoragePoolWorkDao _storagePoolWorkDao;
@Inject
protected HypervisorGuruManager _hvGuruMgr;
@Inject
protected VolumeDao _volumeDao;
@Inject
- protected OCFS2Manager _ocfs2Mgr;
- @Inject
- protected ResourceLimitService _resourceLimitMgr;
- @Inject
protected SecondaryStorageVmManager _ssvmMgr;
@Inject
- protected ResourceManager _resourceMgr;
- @Inject
- protected DownloadMonitor _downloadMonitor;
- @Inject
- protected ResourceTagDao _resourceTagDao;
- @Inject
protected List _storagePoolAllocators;
@Inject
ConfigurationDao _configDao;
@@ -464,14 +441,19 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
VMInstanceVO vm, final Set avoid) {
VirtualMachineProfile profile = new VirtualMachineProfileImpl(
- vm);
+ vm);
for (StoragePoolAllocator allocator : _storagePoolAllocators) {
- final List poolList = allocator.allocateToPool(
- dskCh, profile, dc.getId(), pod.getId(), clusterId, hostId,
- avoid, 1);
- if (poolList != null && !poolList.isEmpty()) {
- return (StoragePool)this.dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
- }
+
+ ExcludeList avoidList = new ExcludeList();
+ for(StoragePool pool : avoid){
+ avoidList.addPool(pool.getId());
+ }
+ DataCenterDeployment plan = new DataCenterDeployment(dc.getId(), pod.getId(), clusterId, hostId, null, null);
+
+ final List poolList = allocator.allocateToPool(dskCh, profile, plan, avoidList, 1);
+ if (poolList != null && !poolList.isEmpty()) {
+ return (StoragePool)this.dataStoreMgr.getDataStore(poolList.get(0).getId(), DataStoreRole.Primary);
+ }
}
return null;
}
diff --git a/server/src/com/cloud/storage/VolumeManagerImpl.java b/server/src/com/cloud/storage/VolumeManagerImpl.java
index a69607f1f3f..336dbcbf336 100644
--- a/server/src/com/cloud/storage/VolumeManagerImpl.java
+++ b/server/src/com/cloud/storage/VolumeManagerImpl.java
@@ -51,6 +51,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo;
+import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory;
import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo;
@@ -115,7 +116,6 @@ import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume.Event;
import com.cloud.storage.Volume.Type;
-import com.cloud.storage.allocator.StoragePoolAllocator;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotPolicyDao;
diff --git a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java
deleted file mode 100755
index d747d25c7b5..00000000000
--- a/server/src/com/cloud/storage/allocator/AbstractStoragePoolAllocator.java
+++ /dev/null
@@ -1,209 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.storage.allocator;
-
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
-import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-
-import com.cloud.capacity.CapacityManager;
-import com.cloud.configuration.dao.ConfigurationDao;
-import com.cloud.dc.ClusterVO;
-import com.cloud.dc.dao.ClusterDao;
-import com.cloud.deploy.DataCenterDeployment;
-import com.cloud.deploy.DeploymentPlan;
-import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.host.Host;
-import com.cloud.server.StatsCollector;
-import com.cloud.storage.Storage.StoragePoolType;
-import com.cloud.storage.StorageManager;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.StoragePoolStatus;
-import com.cloud.storage.VMTemplateStoragePoolVO;
-import com.cloud.storage.VMTemplateStorageResourceAssoc;
-import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.Volume;
-import com.cloud.storage.Volume.Type;
-import com.cloud.storage.dao.StoragePoolDao;
-import com.cloud.storage.dao.StoragePoolHostDao;
-import com.cloud.storage.dao.VMTemplateDao;
-import com.cloud.storage.dao.VMTemplateHostDao;
-import com.cloud.storage.dao.VMTemplatePoolDao;
-import com.cloud.storage.dao.VolumeDao;
-import com.cloud.storage.swift.SwiftManager;
-import com.cloud.template.TemplateManager;
-import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.component.AdapterBase;
-import com.cloud.vm.DiskProfile;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachineProfile;
-
-public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
- private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class);
- @Inject TemplateManager _tmpltMgr;
- @Inject StorageManager _storageMgr;
- @Inject StoragePoolDao _storagePoolDao;
- @Inject VMTemplateHostDao _templateHostDao;
- @Inject VMTemplatePoolDao _templatePoolDao;
- @Inject VMTemplateDao _templateDao;
- @Inject VolumeDao _volumeDao;
- @Inject StoragePoolHostDao _poolHostDao;
- @Inject ConfigurationDao _configDao;
- @Inject ClusterDao _clusterDao;
- @Inject SwiftManager _swiftMgr;
- @Inject CapacityManager _capacityMgr;
- @Inject DataStoreManager dataStoreMgr;
- protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1);
- long _extraBytesPerVolume = 0;
- Random _rand;
- boolean _dontMatter;
-
- @Override
- public boolean configure(String name, Map params) throws ConfigurationException {
- super.configure(name, params);
-
- Map configs = _configDao.getConfiguration(null, params);
-
- String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor");
- _storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
-
- _extraBytesPerVolume = 0;
-
- _rand = new Random(System.currentTimeMillis());
-
- _dontMatter = Boolean.parseBoolean(configs.get("storage.overwrite.provisioning"));
-
- return true;
- }
-
- abstract boolean allocatorIsCorrectType(DiskProfile dskCh);
-
- protected boolean templateAvailable(long templateId, long poolId) {
- VMTemplateStorageResourceAssoc thvo = _templatePoolDao.findByPoolTemplate(poolId, templateId);
- if (thvo != null) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Template id : " + templateId + " status : " + thvo.getDownloadState().toString());
- }
- return (thvo.getDownloadState()==Status.DOWNLOADED);
- } else {
- return false;
- }
- }
-
- protected boolean localStorageAllocationNeeded(DiskProfile dskCh) {
- return dskCh.useLocalStorage();
- }
-
- protected boolean poolIsCorrectType(DiskProfile dskCh, StoragePool pool) {
- boolean localStorageAllocationNeeded = localStorageAllocationNeeded(dskCh);
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Is localStorageAllocationNeeded? "+ localStorageAllocationNeeded);
- s_logger.debug("Is storage pool shared? "+ pool.isShared());
- }
-
- return ((!localStorageAllocationNeeded && pool.getPoolType().isShared()) || (localStorageAllocationNeeded && !pool.getPoolType().isShared()));
- }
-
- protected boolean checkPool(ExcludeList avoid, StoragePoolVO pool, DiskProfile dskCh, VMTemplateVO template, List templatesInPool,
- StatsCollector sc, DeploymentPlan plan) {
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName()+ " ,poolId: "+ pool.getId());
- }
- StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
- if (avoid.shouldAvoid(pol)) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("StoragePool is in avoid set, skipping this pool");
- }
- return false;
- }
- if(dskCh.getType().equals(Type.ROOT) && pool.getPoolType().equals(StoragePoolType.Iscsi)){
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Disk needed for ROOT volume, but StoragePoolType is Iscsi, skipping this and trying other available pools");
- }
- return false;
- }
-
- //by default, all pools are up when successfully added
- //don't return the pool if not up (if in maintenance/prepareformaintenance/errorinmaintenance)
- if(!pool.getStatus().equals(StoragePoolStatus.Up)){
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("StoragePool status is not UP, status is: "+pool.getStatus().name()+", skipping this pool");
- }
- return false;
- }
-
- // Check that the pool type is correct
- if (!poolIsCorrectType(dskCh, pol)) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("StoragePool is not of correct type, skipping this pool");
- }
- return false;
- }
-
- /*hypervisor type is correct*/
- // TODO : when creating a standalone volume, offering is passed as NULL, need to
- // refine the logic of checking hypervisorType based on offering info
- Long clusterId = pool.getClusterId();
- ClusterVO cluster = _clusterDao.findById(clusterId);
- if (!(cluster.getHypervisorType() == dskCh.getHypersorType())) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool");
- }
- return false;
- }
-
-
- // check capacity
- Volume volume = _volumeDao.findById(dskCh.getVolumeId());
- List requestVolumes = new ArrayList();
- requestVolumes.add(volume);
- return _storageMgr.storagePoolHasEnoughSpace(requestVolumes, pol);
- }
-
-
-
- @Override
- public String chooseStorageIp(VirtualMachine vm, Host host, Host storage) {
- return storage.getStorageIpAddress();
- }
-
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, long dcId, long podId, Long clusterId, Long hostId, Set extends StoragePool> avoids, int returnUpTo) {
-
- ExcludeList avoid = new ExcludeList();
- for(StoragePool pool : avoids){
- avoid.addPool(pool.getId());
- }
-
- DataCenterDeployment plan = new DataCenterDeployment(dcId, podId, clusterId, hostId, null, null);
- return allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo);
- }
-
-}
diff --git a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java
deleted file mode 100644
index f0df3a6f001..00000000000
--- a/server/src/com/cloud/storage/allocator/FirstFitStoragePoolAllocator.java
+++ /dev/null
@@ -1,175 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.storage.allocator;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import javax.ejb.Local;
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-
-import com.cloud.deploy.DeploymentPlan;
-import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.offering.ServiceOffering;
-import com.cloud.server.StatsCollector;
-import com.cloud.storage.DiskOfferingVO;
-import com.cloud.storage.VMTemplateVO;
-import com.cloud.storage.Storage.StoragePoolType;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.dao.DiskOfferingDao;
-import com.cloud.user.Account;
-import com.cloud.vm.DiskProfile;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachineProfile;
-
-@Local(value=StoragePoolAllocator.class)
-public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator {
- private static final Logger s_logger = Logger.getLogger(FirstFitStoragePoolAllocator.class);
- protected String _allocationAlgorithm = "random";
-
- @Inject
- DiskOfferingDao _diskOfferingDao;
-
- @Override
- public boolean allocatorIsCorrectType(DiskProfile dskCh) {
- return !localStorageAllocationNeeded(dskCh);
- }
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
-
-
- VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
- Account account = null;
- if(vmProfile.getVirtualMachine() != null){
- account = vmProfile.getOwner();
- }
-
- List suitablePools = new ArrayList();
-
- // Check that the allocator type is correct
- if (!allocatorIsCorrectType(dskCh)) {
- return suitablePools;
- }
- long dcId = plan.getDataCenterId();
- Long podId = plan.getPodId();
- Long clusterId = plan.getClusterId();
-
- if(dskCh.getTags() != null && dskCh.getTags().length != 0){
- s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags()));
- }else{
- s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
- }
-
- List pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags(), null);
- if (pools.size() == 0) {
- if (s_logger.isDebugEnabled()) {
- String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString();
- s_logger.debug("No storage pools available for " + storageType + " volume allocation, returning");
- }
- return suitablePools;
- }
-
- StatsCollector sc = StatsCollector.getInstance();
-
- //FixMe: We are ignoring userdispersing algorithm when account is null. Find a way to get account ID when VMprofile is null
- if(_allocationAlgorithm.equals("random") || _allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
- // Shuffle this so that we don't check the pools in the same order.
- Collections.shuffle(pools);
- }else if(_allocationAlgorithm.equals("userdispersing")){
- pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
- }
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("FirstFitStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
- }
-
- DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId());
- for (StoragePoolVO pool: pools) {
- if(suitablePools.size() == returnUpTo){
- break;
- }
- if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) {
- s_logger.debug("Skipping RBD pool " + pool.getName() + " as a suitable pool. RBD is not supported for System VM's");
- continue;
- }
-
- if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) {
- StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
- suitablePools.add(pol);
- }
- }
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools");
- }
-
- return suitablePools;
- }
-
- private List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List pools, Account account) {
- if(account == null){
- return pools;
- }
- long dcId = plan.getDataCenterId();
- Long podId = plan.getPodId();
- Long clusterId = plan.getClusterId();
-
- List poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount);
- }
-
- //now filter the given list of Pools by this ordered list
- Map poolMap = new HashMap();
- for (StoragePoolVO pool : pools) {
- poolMap.put(pool.getId(), pool);
- }
- List matchingPoolIds = new ArrayList(poolMap.keySet());
-
- poolIdsByVolCount.retainAll(matchingPoolIds);
-
- List reorderedPools = new ArrayList();
- for(Long id: poolIdsByVolCount){
- reorderedPools.add(poolMap.get(id));
- }
-
- return reorderedPools;
- }
-
- @Override
- public boolean configure(String name, Map params) throws ConfigurationException {
- super.configure(name, params);
-
- if (_configDao != null) {
- Map configs = _configDao.getConfiguration(params);
- String allocationAlgorithm = configs.get("vm.allocation.algorithm");
- if (allocationAlgorithm != null) {
- _allocationAlgorithm = allocationAlgorithm;
- }
- }
- return true;
- }
-}
diff --git a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java b/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java
deleted file mode 100644
index 24b4dabe281..00000000000
--- a/server/src/com/cloud/storage/allocator/LocalStoragePoolAllocator.java
+++ /dev/null
@@ -1,288 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package com.cloud.storage.allocator;
-
-import java.math.BigDecimal;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import javax.ejb.Local;
-import javax.inject.Inject;
-import javax.naming.ConfigurationException;
-
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.log4j.Logger;
-
-import com.cloud.capacity.CapacityVO;
-import com.cloud.capacity.dao.CapacityDao;
-import com.cloud.configuration.dao.ConfigurationDao;
-import com.cloud.deploy.DeploymentPlan;
-import com.cloud.deploy.DeploymentPlanner.ExcludeList;
-import com.cloud.offering.ServiceOffering;
-import com.cloud.service.dao.ServiceOfferingDao;
-import com.cloud.storage.StoragePool;
-import com.cloud.storage.StoragePoolHostVO;
-import com.cloud.storage.Volume;
-import com.cloud.storage.VolumeVO;
-import com.cloud.storage.dao.StoragePoolHostDao;
-import com.cloud.utils.DateUtil;
-import com.cloud.utils.NumbersUtil;
-import com.cloud.utils.db.GenericSearchBuilder;
-import com.cloud.utils.db.JoinBuilder;
-import com.cloud.utils.db.SearchBuilder;
-import com.cloud.utils.db.SearchCriteria;
-import com.cloud.utils.db.SearchCriteria.Func;
-import com.cloud.vm.DiskProfile;
-import com.cloud.vm.UserVmVO;
-import com.cloud.vm.VMInstanceVO;
-import com.cloud.vm.VirtualMachine;
-import com.cloud.vm.VirtualMachine.State;
-import com.cloud.vm.VirtualMachineProfile;
-import com.cloud.vm.dao.UserVmDao;
-import com.cloud.vm.dao.VMInstanceDao;
-
-//
-// TODO
-// Rush to make LocalStoragePoolAllocator use static allocation status, we should revisit the overall
-// allocation process to make it more reliable in next release. The code put in here is pretty ugly
-//
-@Local(value = StoragePoolAllocator.class)
-public class LocalStoragePoolAllocator extends FirstFitStoragePoolAllocator {
- private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class);
-
- @Inject
- StoragePoolHostDao _poolHostDao;
- @Inject
- VMInstanceDao _vmInstanceDao;
- @Inject
- UserVmDao _vmDao;
- @Inject
- ServiceOfferingDao _offeringDao;
- @Inject
- CapacityDao _capacityDao;
- @Inject
- ConfigurationDao _configDao;
-
- protected GenericSearchBuilder VmsOnPoolSearch;
-
- private int _secondsToSkipStoppedVMs = 86400;
-
- @Override
- public boolean allocatorIsCorrectType(DiskProfile dskCh) {
- return localStorageAllocationNeeded(dskCh);
- }
-
- @Override
- public List allocateToPool(DiskProfile dskCh, VirtualMachineProfile extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
-
- List suitablePools = new ArrayList();
-
- // Check that the allocator type is correct
- if (!allocatorIsCorrectType(dskCh)) {
- return suitablePools;
- }
-
- ExcludeList myAvoids = new ExcludeList(avoid.getDataCentersToAvoid(), avoid.getPodsToAvoid(), avoid.getClustersToAvoid(), avoid.getHostsToAvoid(), avoid.getPoolsToAvoid());
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
- }
-
- // data disk and host identified from deploying vm (attach volume case)
- if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
- List hostPools = _poolHostDao.listByHostId(plan.getHostId());
- for (StoragePoolHostVO hostPool: hostPools) {
- StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId());
- if (pool != null && pool.isLocal()) {
- s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
- StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
- suitablePools.add(pol);
- }
-
- if (suitablePools.size() == returnUpTo) {
- break;
- }
- }
- } else {
- List availablePool;
- while (!(availablePool = super.allocateToPool(dskCh, vmProfile, plan, myAvoids, 1)).isEmpty()) {
- StoragePool pool = availablePool.get(0);
- myAvoids.addPool(pool.getId());
- List hostsInSPool = _poolHostDao.listByPoolId(pool.getId());
- assert (hostsInSPool.size() == 1) : "Local storage pool should be one host per pool";
-
- s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
- suitablePools.add(pool);
-
- if (suitablePools.size() == returnUpTo) {
- break;
- }
- }
- }
-
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
- }
-
- if (suitablePools.isEmpty()) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Unable to find storage pool to fit the vm");
- }
- }
- return suitablePools;
- }
-
- // we don't need to check host capacity now, since hostAllocators will do that anyway
- private boolean hostHasCpuMemoryCapacity(long hostId, List vmOnHost, VMInstanceVO vm) {
-
- ServiceOffering so = _offeringDao.findById(vm.getServiceOfferingId());
-
- long usedMemory = calcHostAllocatedCpuMemoryCapacity(vmOnHost, CapacityVO.CAPACITY_TYPE_MEMORY);
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Calculated static-allocated memory for VMs on host " + hostId + ": " + usedMemory + " bytes, requesting memory: " + (so != null ? so.getRamSize() * 1024L * 1024L : "")
- + " bytes");
- }
-
- SearchCriteria sc = _capacityDao.createSearchCriteria();
- sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
- sc.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_MEMORY);
- List capacities = _capacityDao.search(sc, null);
- if (capacities.size() > 0) {
- if (capacities.get(0).getTotalCapacity() < usedMemory + (so != null ? so.getRamSize() * 1024L * 1024L : 0)) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Host " + hostId + " runs out of memory capacity");
- }
- return false;
- }
- } else {
- s_logger.warn("Host " + hostId + " has not reported memory capacity yet");
- return false;
- }
-
- long usedCpu = calcHostAllocatedCpuMemoryCapacity(vmOnHost, CapacityVO.CAPACITY_TYPE_CPU);
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Calculated static-allocated CPU for VMs on host " + hostId + ": " + usedCpu + " GHz, requesting cpu: " + (so != null ? so.getCpu() * so.getSpeed() : "") + " GHz");
- }
-
- sc = _capacityDao.createSearchCriteria();
- sc.addAnd("hostOrPoolId", SearchCriteria.Op.EQ, hostId);
- sc.addAnd("capacityType", SearchCriteria.Op.EQ, CapacityVO.CAPACITY_TYPE_CPU);
- capacities = _capacityDao.search(sc, null);
- if (capacities.size() > 0) {
- if (capacities.get(0).getTotalCapacity() < usedCpu + (so != null ? so.getCpu() * so.getSpeed() : 0)) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Host " + hostId + " runs out of CPU capacity");
- }
- return false;
- }
- } else {
- s_logger.warn("Host " + hostId + " has not reported CPU capacity yet");
- return false;
- }
-
- return true;
- }
-
- private boolean skipCalculation(VMInstanceVO vm) {
- if (vm == null) {
- return true;
- }
-
- if (vm.getState() == State.Expunging) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Skip counting capacity for Expunging VM : " + vm.getInstanceName());
- }
- return true;
- }
-
- if (vm.getState() == State.Destroyed && vm.getType() != VirtualMachine.Type.User) {
- return true;
- }
-
- if (vm.getState() == State.Stopped || vm.getState() == State.Destroyed) {
- // for stopped/Destroyed VMs, we will skip counting it if it hasn't been used for a while
-
- long millisecondsSinceLastUpdate = DateUtil.currentGMTTime().getTime() - vm.getUpdateTime().getTime();
- if (millisecondsSinceLastUpdate > _secondsToSkipStoppedVMs * 1000L) {
- if (s_logger.isDebugEnabled()) {
- s_logger.debug("Skip counting vm " + vm.getInstanceName() + " in capacity allocation as it has been stopped for " + millisecondsSinceLastUpdate / 60000 + " minutes");
- }
- return true;
- }
- }
- return false;
- }
-
- private long calcHostAllocatedCpuMemoryCapacity(List vmOnHost, short capacityType) {
- assert (capacityType == CapacityVO.CAPACITY_TYPE_MEMORY || capacityType == CapacityVO.CAPACITY_TYPE_CPU) : "Invalid capacity type passed in calcHostAllocatedCpuCapacity()";
-
- long usedCapacity = 0;
- for (Long vmId : vmOnHost) {
- VMInstanceVO vm = _vmInstanceDao.findById(vmId);
- if (skipCalculation(vm)) {
- continue;
- }
-
- ServiceOffering so = _offeringDao.findById(vm.getServiceOfferingId());
- if (vm.getType() == VirtualMachine.Type.User) {
- UserVmVO userVm = _vmDao.findById(vm.getId());
- if (userVm == null) {
- continue;
- }
- }
-
- if (capacityType == CapacityVO.CAPACITY_TYPE_MEMORY) {
- usedCapacity += so.getRamSize() * 1024L * 1024L;
- } else if (capacityType == CapacityVO.CAPACITY_TYPE_CPU) {
- usedCapacity += so.getCpu() * so.getSpeed();
- }
- }
-
- return usedCapacity;
- }
-
- @Override
- public boolean configure(String name, Map params) throws ConfigurationException {
- super.configure(name, params);
-
- _storageOverprovisioningFactor = new BigDecimal(1);
- _extraBytesPerVolume = NumbersUtil.parseLong((String) params.get("extra.bytes.per.volume"), 50 * 1024L * 1024L);
-
- Map configs = _configDao.getConfiguration("management-server", params);
- String value = configs.get("vm.resource.release.interval");
- _secondsToSkipStoppedVMs = NumbersUtil.parseInt(value, 86400);
-
- VmsOnPoolSearch = _vmInstanceDao.createSearchBuilder(Long.class);
- VmsOnPoolSearch.select(null, Func.DISTINCT, VmsOnPoolSearch.entity().getId());
- VmsOnPoolSearch.and("removed", VmsOnPoolSearch.entity().getRemoved(), SearchCriteria.Op.NULL);
- VmsOnPoolSearch.and("state", VmsOnPoolSearch.entity().getState(), SearchCriteria.Op.NIN);
-
- SearchBuilder sbVolume = _volumeDao.createSearchBuilder();
- sbVolume.and("poolId", sbVolume.entity().getPoolId(), SearchCriteria.Op.EQ);
-
- VmsOnPoolSearch.join("volumeJoin", sbVolume, VmsOnPoolSearch.entity().getId(), sbVolume.entity().getInstanceId(), JoinBuilder.JoinType.INNER);
-
- sbVolume.done();
- VmsOnPoolSearch.done();
-
- return true;
- }
-
- public LocalStoragePoolAllocator() {
- }
-}
diff --git a/server/src/com/cloud/storage/dao/StoragePoolDao.java b/server/src/com/cloud/storage/dao/StoragePoolDao.java
index 64bbd5fb5ed..28ead9c2945 100644
--- a/server/src/com/cloud/storage/dao/StoragePoolDao.java
+++ b/server/src/com/cloud/storage/dao/StoragePoolDao.java
@@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import com.cloud.storage.StoragePoolStatus;
@@ -37,7 +38,7 @@ public interface StoragePoolDao extends GenericDao {
/**
* @param datacenterId -- the id of the datacenter (availability zone)
*/
- List listBy(long datacenterId, long podId, Long clusterId);
+ List listBy(long datacenterId, long podId, Long clusterId, ScopeType scope);
/**
* Set capacity of storage pool in bytes
@@ -71,9 +72,9 @@ public interface StoragePoolDao extends GenericDao {
* @param details details to match. All must match for the pool to be returned.
* @return List of StoragePoolVO
*/
- List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details);
+ List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details, ScopeType scope);
- List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared);
+ List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags);
/**
* Find pool by UUID.
@@ -104,4 +105,9 @@ public interface StoragePoolDao extends GenericDao {
List listByStatusInZone(long dcId, StoragePoolStatus status);
List listPoolsByCluster(long clusterId);
+
+ List findLocalStoragePoolsByTags(long dcId, long podId,
+ Long clusterId, String[] tags);
+
+ List findZoneWideStoragePoolsByTags(long dcId, String[] tags);
}
diff --git a/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java b/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java
index ebf2943ec9c..28b4dbc5c18 100644
--- a/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java
+++ b/server/src/com/cloud/storage/dao/StoragePoolDaoImpl.java
@@ -28,14 +28,13 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
+import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.springframework.stereotype.Component;
import com.cloud.host.Status;
-import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StoragePoolDetailVO;
import com.cloud.storage.StoragePoolStatus;
-
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
@@ -43,6 +42,8 @@ import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
+import com.cloud.utils.db.SearchCriteria2;
+import com.cloud.utils.db.SearchCriteriaService;
import com.cloud.utils.db.Transaction;
import com.cloud.utils.exception.CloudRuntimeException;
@@ -59,8 +60,11 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
@Inject protected StoragePoolDetailsDao _detailsDao;
- private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and (";
+ private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
+ private final String ZoneWideDetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
+ private final String ZoneWideDetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
+
private final String FindPoolTagDetails = "SELECT storage_pool_details.name FROM storage_pool_details WHERE pool_id = ? and value = ?";
protected StoragePoolDaoImpl() {
@@ -77,6 +81,8 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
DcPodSearch = createSearchBuilder();
DcPodSearch.and("datacenterId", DcPodSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
+ DcPodSearch.and("status", DcPodSearch.entity().getStatus(), SearchCriteria.Op.EQ);
+ DcPodSearch.and("scope", DcPodSearch.entity().getScope(), SearchCriteria.Op.EQ);
DcPodSearch.and().op("nullpod", DcPodSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodSearch.or("podId", DcPodSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodSearch.cp();
@@ -87,6 +93,8 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
DcPodAnyClusterSearch = createSearchBuilder();
DcPodAnyClusterSearch.and("datacenterId", DcPodAnyClusterSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ);
+ DcPodAnyClusterSearch.and("status", DcPodAnyClusterSearch.entity().getStatus(), SearchCriteria.Op.EQ);
+ DcPodAnyClusterSearch.and("scope", DcPodAnyClusterSearch.entity().getScope(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.and().op("nullpod", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.NULL);
DcPodAnyClusterSearch.or("podId", DcPodAnyClusterSearch.entity().getPodId(), SearchCriteria.Op.EQ);
DcPodAnyClusterSearch.cp();
@@ -192,11 +200,13 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
}
@Override
- public List listBy(long datacenterId, long podId, Long clusterId) {
+ public List listBy(long datacenterId, long podId, Long clusterId, ScopeType scope) {
if (clusterId != null) {
SearchCriteria sc = DcPodSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
+ sc.setParameters("status", Status.Up);
+ sc.setParameters("scope", scope);
sc.setParameters("cluster", clusterId);
return listBy(sc);
@@ -204,6 +214,8 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
SearchCriteria sc = DcPodAnyClusterSearch.create();
sc.setParameters("datacenterId", datacenterId);
sc.setParameters("podId", podId);
+ sc.setParameters("status", Status.Up);
+ sc.setParameters("scope", scope);
return listBy(sc);
}
}
@@ -242,11 +254,12 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
@DB
@Override
- public List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details) {
+ public List findPoolsByDetails(long dcId, long podId, Long clusterId, Map details, ScopeType scope) {
StringBuilder sql = new StringBuilder(DetailsSqlPrefix);
if (clusterId != null) {
sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (");
}
+
for (Map.Entry detail : details.entrySet()) {
sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
}
@@ -259,6 +272,7 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setLong(i++, podId);
+ pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
@@ -283,26 +297,67 @@ public class StoragePoolDaoImpl extends GenericDaoBase imp
}
@Override
- public List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags, Boolean shared) {
+ public List findPoolsByTags(long dcId, long podId, Long clusterId, String[] tags) {
List storagePools = null;
if (tags == null || tags.length == 0) {
- storagePools = listBy(dcId, podId, clusterId);
+ storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
} else {
Map details = tagsToDetails(tags);
- storagePools = findPoolsByDetails(dcId, podId, clusterId, details);
+ storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.CLUSTER);
}
-
- if (shared == null) {
- return storagePools;
+
+ return storagePools;
+ }
+
+ @Override
+ public List findLocalStoragePoolsByTags(long dcId, long podId, Long clusterId, String[] tags) {
+ List storagePools = null;
+ if (tags == null || tags.length == 0) {
+ storagePools = listBy(dcId, podId, clusterId, ScopeType.HOST);
} else {
- List filteredStoragePools = new ArrayList(storagePools);
- for (StoragePoolVO pool : storagePools) {
- if (shared != pool.isShared()) {
- filteredStoragePools.remove(pool);
- }
- }
-
- return filteredStoragePools;
+ Map details = tagsToDetails(tags);
+ storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.HOST);
+ }
+
+ return storagePools;
+ }
+
+ @Override
+ public List findZoneWideStoragePoolsByTags(long dcId, String[] tags) {
+ List storagePools = null;
+ if (tags == null || tags.length == 0) {
+ SearchCriteriaService sc = SearchCriteria2.create(StoragePoolVO.class);
+ sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId);
+ sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up);
+ sc.addAnd(sc.getEntity().getScope(), Op.EQ, ScopeType.ZONE);
+ return sc.list();
+ } else {
+ Map details = tagsToDetails(tags);
+
+ StringBuilder sql = new StringBuilder(ZoneWideDetailsSqlPrefix);
+
+ for (Map.Entry detail : details.entrySet()) {
+ sql.append("((storage_pool_details.name='").append(detail.getKey()).append("') AND (storage_pool_details.value='").append(detail.getValue()).append("')) OR ");
+ }
+ sql.delete(sql.length() - 4, sql.length());
+ sql.append(ZoneWideDetailsSqlSuffix);
+ Transaction txn = Transaction.currentTxn();
+ PreparedStatement pstmt = null;
+ try {
+ pstmt = txn.prepareAutoCloseStatement(sql.toString());
+ int i = 1;
+ pstmt.setLong(i++, dcId);
+ pstmt.setString(i++, ScopeType.ZONE.toString());
+ pstmt.setInt(i++, details.size());
+ ResultSet rs = pstmt.executeQuery();
+ List pools = new ArrayList();
+ while (rs.next()) {
+ pools.add(toEntityBean(rs, false));
+ }
+ return pools;
+ } catch (SQLException e) {
+ throw new CloudRuntimeException("Unable to execute " + pstmt, e);
+ }
}
}
diff --git a/utils/src/com/cloud/utils/db/Transaction.java b/utils/src/com/cloud/utils/db/Transaction.java
index c1f157ed7a2..c15104455c0 100755
--- a/utils/src/com/cloud/utils/db/Transaction.java
+++ b/utils/src/com/cloud/utils/db/Transaction.java
@@ -97,8 +97,8 @@ public class Transaction {
/* FIXME: We need a better solution for this
* Initialize encryption if we need it for db.properties
*/
- EncryptionSecretKeyChecker enc = new EncryptionSecretKeyChecker();
- enc.check();
+ /*EncryptionSecretKeyChecker enc = new EncryptionSecretKeyChecker();
+ enc.check(); */
}
private final LinkedList _stack;
diff --git a/utils/src/com/cloud/utils/db/TransactionContextBuilder.java b/utils/src/com/cloud/utils/db/TransactionContextBuilder.java
index c8a7f7a0f1b..9b474d56c94 100644
--- a/utils/src/com/cloud/utils/db/TransactionContextBuilder.java
+++ b/utils/src/com/cloud/utils/db/TransactionContextBuilder.java
@@ -20,10 +20,13 @@ import java.lang.reflect.Method;
import org.aopalliance.intercept.MethodInterceptor;
import org.aopalliance.intercept.MethodInvocation;
+import org.apache.log4j.Logger;
import org.aspectj.lang.ProceedingJoinPoint;
+import org.aspectj.lang.Signature;
import org.aspectj.lang.reflect.MethodSignature;
public class TransactionContextBuilder implements MethodInterceptor {
+ private static final Logger s_logger = Logger.getLogger(TransactionContextBuilder.class);
public TransactionContextBuilder() {
}
@@ -31,7 +34,15 @@ public class TransactionContextBuilder implements MethodInterceptor {
MethodSignature methodSignature = (MethodSignature)call.getSignature();
Method targetMethod = methodSignature.getMethod();
if(needToIntercept(targetMethod)) {
- Transaction txn = Transaction.open(call.getSignature().getName());
+ Transaction txn = null;
+ try {
+ Signature s = call.getSignature();
+ String name = s.getName();
+ txn = Transaction.open(name);
+ } catch (Throwable e) {
+ s_logger.debug("Failed to open transaction: " + e.toString());
+ throw e;
+ }
Object ret = null;
try {
ret = call.proceed();