fix bunch of bugs related to zone wide storage

This commit is contained in:
Edison Su 2013-04-03 21:51:17 -07:00
parent bb841d6011
commit f18a1d6f14
17 changed files with 193 additions and 86 deletions

View File

@ -79,9 +79,26 @@ public class StoragePoolResponse extends BaseResponse {
@SerializedName(ApiConstants.STATE) @Param(description="the state of the storage pool")
private StoragePoolStatus state;
@SerializedName(ApiConstants.SCOPE) @Param(description="the scope of the storage pool")
private String scope;
/**
* @return the scope
*/
public String getScope() {
return scope;
}
/**
* @param scope the scope to set
*/
public void setScope(String scope) {
this.scope = scope;
}
@Override
public String getObjectId() {
return this.getId();

View File

@ -1,23 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.cloudstack.engine.subsystem.api.storage;
public enum DataStoreStatus {
Initial, Initialized, Creating, Attaching, Up, PrepareForMaintenance, ErrorInMaintenance, CancelMaintenance, Maintenance, Removed;
}

View File

@ -51,6 +51,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool");
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
long dcId = plan.getDataCenterId();

View File

@ -48,7 +48,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
@Override
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
s_logger.debug("GarbageCollectingStoragePoolAllocator looking for storage pool");
if (!_storagePoolCleanupEnabled) {
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
return null;

View File

@ -69,11 +69,12 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
if (s_logger.isDebugEnabled()) {
s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
}
s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
if (!dskCh.useLocalStorage()) {
return suitablePools;
}
// data disk and host identified from deploying vm (attach volume case)
if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
List<StoragePoolHostVO> hostPools = _poolHostDao.listByHostId(plan.getHostId());

View File

@ -55,6 +55,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
protected List<StoragePool> select(DiskProfile dskCh,
VirtualMachineProfile<? extends VirtualMachine> vmProfile,
DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool");
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
HypervisorType hypervisor = dskCh.getHypersorType();
if (hypervisor != null) {

View File

@ -1,22 +1,15 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
you under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-hypervisor-kvm</artifactId>
<name>Apache CloudStack Plugin - Hypervisor KVM</name>
@ -45,29 +38,29 @@
</dependency>
</dependencies>
<build>
<defaultGoal>install</defaultGoal>
<sourceDirectory>src</sourceDirectory>
<testSourceDirectory>test</testSourceDirectory>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>2.5.1</version>
<executions>
<execution>
<id>copy-dependencies</id>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
<includeScope>runtime</includeScope>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<defaultGoal>install</defaultGoal>
<sourceDirectory>src</sourceDirectory>
<testSourceDirectory>test</testSourceDirectory>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>2.5.1</version>
<executions>
<execution>
<id>copy-dependencies</id>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
<includeScope>runtime</includeScope>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.14</version>
@ -77,6 +70,52 @@
</excludes>
</configuration>
</plugin>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<version>2.3</version>
<configuration>
<finalName>kvm-agent</finalName>
<appendAssemblyId>false</appendAssemblyId>
<descriptors>
<descriptor>agent-descriptor.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>make-agent</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-resources-plugin</artifactId>
<version>2.6</version>
<executions>
<execution>
<id>copy-resources</id>
<!-- here the phase you need -->
<phase>package</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<configuration>
<outputDirectory>dist</outputDirectory>
<resources>
<resource>
<directory>target</directory>
<includes>
<include>kvm-agent.zip</include>
</includes>
</resource>
</resources>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -666,6 +666,9 @@ ServerResource {
if (_localStoragePath == null) {
_localStoragePath = "/var/lib/libvirt/images/";
}
File storagePath = new File(_localStoragePath);
_localStoragePath = storagePath.getAbsolutePath();
_localStorageUUID = (String) params.get("local.storage.uuid");
if (_localStorageUUID == null) {

View File

@ -348,6 +348,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements
new String(storageHost + hostPath).getBytes()).toString();
}
List<StoragePoolVO> spHandles = primaryDataStoreDao
.findIfDuplicatePoolsExistByUUID(uuid);
if ((spHandles != null) && (spHandles.size() > 0)) {
@ -358,6 +359,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements
"Another active pool with the same uuid already exists");
}
String poolName = (String) dsInfos.get("name");
parameters.setUuid(uuid);

View File

@ -82,6 +82,7 @@ public class StoragePoolJoinDaoImpl extends GenericDaoBase<StoragePoolJoinVO, Lo
poolResponse.setPodId(pool.getPodUuid());
poolResponse.setPodName(pool.getPodName());
poolResponse.setCreated(pool.getCreated());
poolResponse.setScope(pool.getScope().toString());
long allocatedSize = pool.getUsedCapacity() + pool.getReservedCapacity();

View File

@ -31,6 +31,7 @@ import com.cloud.utils.db.GenericDao;
import org.apache.cloudstack.api.Identity;
import org.apache.cloudstack.api.InternalIdentity;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
/**
* Storage Pool DB view.
@ -124,6 +125,24 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I
@Column(name="job_status")
private int jobStatus;
@Column(name = "scope")
@Enumerated(value = EnumType.STRING)
private ScopeType scope;
/**
* @return the scope
*/
public ScopeType getScope() {
return scope;
}
/**
* @param scope the scope to set
*/
public void setScope(ScopeType scope) {
this.scope = scope;
}
@Override
public long getId() {

View File

@ -4001,7 +4001,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
DataCenterVO zone = ApiDBUtils.findZoneById(cluster.getDataCenterId());
return zone.getAllocationState();
}
}
}
@Override
public AllocationState findPodAllocationState(HostPodVO pod){

View File

@ -102,7 +102,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
@Inject protected StorageManager _storageMgr;
@Inject DataStoreManager dataStoreMgr;
@Inject protected ClusterDetailsDao _clusterDetailsDao;
protected List<StoragePoolAllocator> _storagePoolAllocators;
public List<StoragePoolAllocator> getStoragePoolAllocators() {
return _storagePoolAllocators;

View File

@ -51,7 +51,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreRole;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreStatus;
import org.apache.cloudstack.engine.subsystem.api.storage.HostScope;
import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener;
import org.apache.cloudstack.engine.subsystem.api.storage.ImageDataFactory;
@ -381,7 +380,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (pools == null
|| pools.size() == 0
|| (pools.size() == 1 && pools.get(0).getStatus()
.equals(DataStoreStatus.Maintenance))) {
.equals(StoragePoolStatus.Maintenance))) {
return false;
} else {
return true;
@ -1000,8 +999,16 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
if (capacities.size() == 0) {
CapacityVO capacity = new CapacityVO(storagePool.getId(), storagePool.getDataCenterId(), storagePool.getPodId(), storagePool.getClusterId(), allocated, totalOverProvCapacity, capacityType);
CapacityState capacityState = _configMgr.findClusterAllocationState(ApiDBUtils.findClusterById(storagePool.getClusterId())) == AllocationState.Disabled ?
AllocationState allocationState = null;
if (storagePool.getScope() == ScopeType.ZONE) {
DataCenterVO dc = ApiDBUtils.findZoneById(storagePool.getDataCenterId());
allocationState = dc.getAllocationState();
} else {
allocationState = _configMgr.findClusterAllocationState(ApiDBUtils.findClusterById(storagePool.getClusterId()));
}
CapacityState capacityState = (allocationState == AllocationState.Disabled) ?
CapacityState.Disabled : CapacityState.Enabled;
capacity.setCapacityState(capacityState);
_capacityDao.persist(capacity);
} else {
@ -1419,9 +1426,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
}
}
if (!primaryStorage.getStatus().equals(DataStoreStatus.Up)
if (!primaryStorage.getStatus().equals(StoragePoolStatus.Up)
&& !primaryStorage.getStatus().equals(
DataStoreStatus.ErrorInMaintenance)) {
StoragePoolStatus.ErrorInMaintenance)) {
throw new InvalidParameterValueException("Primary storage with id "
+ primaryStorageId
+ " is not ready for migration, as the status is:"
@ -1458,9 +1465,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
throw new InvalidParameterValueException(msg);
}
if (primaryStorage.getStatus().equals(DataStoreStatus.Up)
if (primaryStorage.getStatus().equals(StoragePoolStatus.Up)
|| primaryStorage.getStatus().equals(
DataStoreStatus.PrepareForMaintenance)) {
StoragePoolStatus.PrepareForMaintenance)) {
throw new StorageUnavailableException("Primary storage with id "
+ primaryStorageId
+ " is not ready to complete migration, as the status is:"
@ -1519,11 +1526,11 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C
// check if pool is in an inconsistent state
if (pool != null
&& (pool.getStatus().equals(
DataStoreStatus.ErrorInMaintenance)
StoragePoolStatus.ErrorInMaintenance)
|| pool.getStatus()
.equals(DataStoreStatus.PrepareForMaintenance) || pool
.equals(StoragePoolStatus.PrepareForMaintenance) || pool
.getStatus()
.equals(DataStoreStatus.CancelMaintenance))) {
.equals(StoragePoolStatus.CancelMaintenance))) {
_storagePoolWorkDao.removePendingJobsOnMsRestart(
vo.getMsid(), poolId);
pool.setStatus(StoragePoolStatus.ErrorInMaintenance);

View File

@ -742,7 +742,8 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar
return true;
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Primary storage is not ready, wait until it is ready to launch secondary storage vm");
s_logger.debug("Primary storage is not ready, wait until it is ready to launch secondary storage vm. dcId: " + dataCenterId + " system.vm.use.local.storage: " + _useLocalStorage +
"If you want to use local storage to start ssvm, need to set system.vm.use.local.storage to true");
}
}
} else {

View File

@ -62,7 +62,13 @@ public class Upgrade410to420 implements DbUpgrade {
upgradeVmwareLabels(conn);
createPlaceHolderNics(conn);
updateRemoteAccessVpn(conn);
PreparedStatement sql = null;
updateSystemVmTemplates(conn);
updateCluster_details(conn);
updatePrimaryStore(conn);
}
private void updateSystemVmTemplates(Connection conn) {
PreparedStatement sql = null;
try {
sql = conn.prepareStatement("update vm_template set image_data_store_id = 1 where type = 'SYSTEM' or type = 'BUILTIN'");
sql.executeUpdate();
@ -76,8 +82,39 @@ public class Upgrade410to420 implements DbUpgrade {
}
}
}
updateCluster_details(conn);
}
}
private void updatePrimaryStore(Connection conn) {
PreparedStatement sql = null;
PreparedStatement sql2 = null;
try {
sql = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type = 'Filesystem' or pool_type = 'LVM'");
sql.setString(1, "ancient primary data store provider");
sql.setString(2, "HOST");
sql.executeUpdate();
sql2 = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type != 'Filesystem' and pool_type != 'LVM'");
sql2.setString(1, "ancient primary data store provider");
sql2.setString(2, "CLUSTER");
sql2.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString());
} finally {
if (sql != null) {
try {
sql.close();
} catch (SQLException e) {
}
}
if (sql2 != null) {
try {
sql2.close();
} catch (SQLException e) {
}
}
}
}
//update the cluster_details table with default overcommit ratios.
private void updateCluster_details(Connection conn) {

View File

@ -1479,6 +1479,7 @@ CREATE VIEW `cloud`.`storage_pool_view` AS
storage_pool.created,
storage_pool.removed,
storage_pool.capacity_bytes,
storage_pool.scope,
cluster.id cluster_id,
cluster.uuid cluster_uuid,
cluster.name cluster_name,