CLOUDSTACK-1037: Fix cloudmonkey's caching, autocompletion and printing

Signed-off-by: Rohit Yadav <bhaisaab@apache.org>
This commit is contained in:
Rohit Yadav 2013-02-01 22:01:07 -08:00
parent 5476391b9f
commit b5a2e99809
5 changed files with 126 additions and 130 deletions

View File

@ -19,9 +19,9 @@
try:
import json
import os
import re
import types
from requester import monkeyrequest
from config import cache_file
except ImportError, e:
import sys
print "ImportError", e
@ -35,52 +35,79 @@ def getvalue(dictionary, key):
return None
def csv_str_as_list(string):
def splitcsvstring(string):
if string is not None:
return filter(lambda x: x.strip() != '', string.split(','))
else:
return []
def cachegen_from_file(json_file):
def splitverbsubject(string):
idx = 0
for char in string:
if char.islower():
idx += 1
else:
break
return string[:idx].lower(), string[idx:].lower()
def savecache(apicache, json_file):
"""
Saves apicache dictionary as json_file, returns dictionary as indented str
"""
apicachestr = json.dumps(apicache, indent=2)
with open(json_file, 'w') as cache_file:
cache_file.write(apicachestr)
return apicachestr
def loadcache(json_file):
"""
Loads json file as dictionary, feeds it to monkeycache and spits result
"""
f = open(json_file, 'r')
data = f.read()
f.close()
try:
apis = json.loads(data)
apicache = json.loads(data)
except ValueError, e:
print "Error processing json in cachegen()", e
return cachegen(apis)
print "Error processing json:", json_file, e
return {}
return apicache
def cachegen(apis):
pattern = re.compile("[A-Z]")
def monkeycache(apis):
"""
Feed this a dictionary of api bananas, it spits out processed cache
"""
if isinstance(type(apis), types.NoneType):
return {}
responsekey = filter(lambda x: 'response' in x, apis.keys())
if len(responsekey) == 0:
print "[cachegen] Invalid dictionary, has no response"
print "[monkeycache] Invalid dictionary, has no response"
return None
if len(responsekey) != 1:
print "[cachegen] Multiple responsekeys, chosing first one"
print "[monkeycache] Multiple responsekeys, chosing first one"
responsekey = responsekey[0]
verbs = set()
cache = {}
cache['count'] = getvalue(apis[responsekey], 'count')
cache['asyncapis'] = []
for api in getvalue(apis[responsekey], 'api'):
name = getvalue(api, 'name')
response = getvalue(api, 'response')
idx = pattern.search(name).start()
verb = name[:idx]
subject = name[idx:]
verb, subject = splitverbsubject(name)
apidict = {}
apidict['name'] = name
apidict['description'] = getvalue(api, 'description')
apidict['isasync'] = getvalue(api, 'isasync')
apidict['related'] = csv_str_as_list(getvalue(api, 'related'))
if apidict['isasync']:
cache['asyncapis'].append(name)
apidict['related'] = splitcsvstring(getvalue(api, 'related'))
required = []
apiparams = []
@ -91,15 +118,16 @@ def cachegen(apis):
apiparam['required'] = (getvalue(param, 'required') is True)
apiparam['length'] = int(getvalue(param, 'length'))
apiparam['type'] = getvalue(param, 'type')
apiparam['related'] = csv_str_as_list(getvalue(param, 'related'))
apiparam['related'] = splitcsvstring(getvalue(param, 'related'))
if apiparam['required']:
required.append(apiparam['name'])
apiparams.append(apiparam)
apidict['requiredparams'] = required
apidict['params'] = apiparams
apidict['response'] = getvalue(api, 'response')
cache[verb] = {subject: apidict}
if verb not in cache:
cache[verb] = {}
cache[verb][subject] = apidict
verbs.add(verb)
cache['verbs'] = list(verbs)
@ -108,7 +136,7 @@ def cachegen(apis):
def main(json_file):
"""
cachegen.py creates a precache datastore of all available apis of
cachemaker.py creates a precache datastore of all available apis of
CloudStack and dumps the precache dictionary in an
importable python module. This way we cheat on the runtime overhead of
completing commands and help docs. This reduces the overall search and
@ -116,7 +144,7 @@ def main(json_file):
"""
f = open("precache.py", "w")
f.write("""# -*- coding: utf-8 -*-
# Auto-generated code by cachegen.py
# Auto-generated code by cachemaker.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
@ -133,13 +161,13 @@ def main(json_file):
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.""")
f.write("\nprecache = %s" % cachegen_from_file(json_file))
f.write("\napicache = %s" % loadcache(json_file))
f.close()
if __name__ == "__main__":
json_file = 'listapis.json'
if os.path.exists(json_file):
main(json_file)
print "[cachemaker] Pre-caching using user's cloudmonkey cache", cache_file
if os.path.exists(cache_file):
main(cache_file)
else:
pass
#print "[ERROR] cli:cachegen is unable to locate %s" % json_file
print "[cachemaker] Unable to cache apis, file not found", cache_file
print "[cachemaker] Run cloudmonkey sync to generate cache"

View File

@ -24,28 +24,32 @@ try:
import logging
import os
import pdb
import re
import shlex
import sys
import types
from urllib2 import HTTPError, URLError
from httplib import BadStatusLine
from config import __version__, config_file
from config import precached_verbs, read_config, write_config
from config import __version__, cache_file
from config import read_config, write_config
from printer import monkeyprint
from requester import monkeyrequest
from cachemaker import loadcache, savecache, monkeycache
from cachemaker import splitverbsubject
from prettytable import PrettyTable
from marvin.cloudstackConnection import cloudConnection
from marvin.cloudstackException import cloudstackAPIException
from marvin.cloudstackAPI import *
from marvin import cloudstackAPI
except ImportError, e:
print "Import error in %s : %s" % (__name__, e)
import sys
sys.exit()
try:
from precache import apicache
except ImportError:
apicache = {}
# Fix autocompletion issue, can be put in .pythonstartup
try:
import readline
@ -60,23 +64,21 @@ else:
log_fmt = '%(asctime)s - %(filename)s:%(lineno)s - [%(levelname)s] %(message)s'
logger = logging.getLogger(__name__)
completions = cloudstackAPI.__all__
class CloudMonkeyShell(cmd.Cmd, object):
intro = ("☁ Apache CloudStack 🐵 cloudmonkey " + __version__ +
". Type help or ? to list commands.\n")
ruler = "="
apicache = {}
# datastructure {'verb': {cmd': ['api', [params], doc, required=[]]}}
cache_verbs = precached_verbs
cache_file = cache_file
## datastructure {'verb': {cmd': ['api', [params], doc, required=[]]}}
#cache_verbs = apicache
config_options = []
def __init__(self, pname, verbs):
def __init__(self, pname):
self.program_name = pname
self.verbs = verbs
self.config_options = read_config(self.get_attr, self.set_attr)
self.loadcache()
self.prompt = self.prompt.strip() + " " # Cosmetic fix for prompt
logging.basicConfig(filename=self.log_file,
@ -111,8 +113,27 @@ class CloudMonkeyShell(cmd.Cmd, object):
except KeyboardInterrupt:
print("^C")
def loadcache(self):
if os.path.exists(self.cache_file):
self.apicache = loadcache(self.cache_file)
else:
self.apicache = apicache
self.verbs = apicache['verbs']
def monkeyprint(self, *args):
monkeyprint((self.color == 'true'), *args)
output = ""
try:
for arg in args:
if isinstance(type(arg), types.NoneType):
continue
output += str(arg)
except Exception, e:
print e
if self.color == 'true':
monkeyprint(output)
else:
print output
def print_result(self, result, result_filter=None):
if result is None or len(result) == 0:
@ -186,6 +207,9 @@ class CloudMonkeyShell(cmd.Cmd, object):
if self.pipe_runner(args):
return
apiname = args.partition(' ')[0]
verb, subject = splitverbsubject(apiname)
lexp = shlex.shlex(args.strip())
lexp.whitespace = " "
lexp.whitespace_split = True
@ -196,7 +220,6 @@ class CloudMonkeyShell(cmd.Cmd, object):
if next_val is None:
break
args.append(next_val)
api_name = args[0]
args_dict = dict(map(lambda x: [x.partition("=")[0],
x.partition("=")[2]],
@ -207,22 +230,15 @@ class CloudMonkeyShell(cmd.Cmd, object):
map(lambda x: x.strip(),
args_dict.pop('filter').split(',')))
for attribute in args_dict.keys():
setattr(api_cmd, attribute, args_dict[attribute])
missing_args = filter(lambda x: x not in args_dict.keys(),
self.apicache[verb][subject]['requiredparams'])
#command = api_cmd()
#missing_args = filter(lambda x: x not in args_dict.keys(),
# command.required)
if len(missing_args) > 0:
self.monkeyprint("Missing arguments: ", ' '.join(missing_args))
return
#if len(missing_args) > 0:
# self.monkeyprint("Missing arguments: ", ' '.join(missing_args))
# return
isAsync = False
#if "isAsync" in dir(command):
# isAsync = (command.isAsync == "true")
result = self.make_request(api_name, args_dict, isAsync)
result = self.make_request(apiname, args_dict,
apiname in self.apicache['asyncapis'])
if result is None:
return
try:
@ -248,17 +264,19 @@ class CloudMonkeyShell(cmd.Cmd, object):
search_string = ""
if separator != " ": # Complete verb subjects
autocompletions = self.cache_verbs[verb].keys()
autocompletions = self.apicache[verb].keys()
search_string = subject
else: # Complete subject params
autocompletions = map(lambda x: x + "=",
self.cache_verbs[verb][subject][1])
map(lambda x: x['name'],
self.apicache[verb][subject]['params']))
search_string = text
if self.tabularize == "true" and subject != "":
autocompletions.append("filter=")
return [s for s in autocompletions if s.startswith(search_string)]
def do_sync(self, args):
"""
Asks cloudmonkey to discovery and sync apis available on user specified
@ -266,9 +284,9 @@ class CloudMonkeyShell(cmd.Cmd, object):
it rollbacks last datastore or api precached datastore.
"""
response = self.make_request("listApis")
f = open('test.json', "w")
f.write(json.dumps(response))
f.close()
self.apicache = monkeycache(response)
savecache(self.apicache, self.cache_file)
self.loadcache()
def do_api(self, args):
"""
@ -282,11 +300,6 @@ class CloudMonkeyShell(cmd.Cmd, object):
else:
self.monkeyprint("Please use a valid syntax")
def complete_api(self, text, line, begidx, endidx):
mline = line.partition(" ")[2]
offs = len(mline) - len(text)
return [s[offs:] for s in completions if s.startswith(mline)]
def do_set(self, args):
"""
Set config for cloudmonkey. For example, options can be:
@ -387,9 +400,12 @@ class CloudMonkeyShell(cmd.Cmd, object):
def main():
pattern = re.compile("[A-Z]")
verbs = list(set([x[:pattern.search(x).start()] for x in completions
if pattern.search(x) is not None]).difference(['cloudstack']))
verbs = []
if os.path.exists(cache_file):
verbs = loadcache(cache_file)['verbs']
elif 'verbs' in apicache:
verbs = apicache['verbs']
for verb in verbs:
def add_grammar(verb):
def grammar_closure(self, args):
@ -397,9 +413,9 @@ def main():
return
try:
args_partition = args.partition(" ")
res = self.cache_verbs[verb][args_partition[0]]
cmd = res[0]
helpdoc = res[2]
api = self.apicache[verb][args_partition[0]]
cmd = api['name']
helpdoc = api['description']
args = args_partition[2]
except KeyError, e:
self.monkeyprint("Error: invalid %s api arg" % verb, e)
@ -412,10 +428,10 @@ def main():
grammar_handler = add_grammar(verb)
grammar_handler.__doc__ = "%ss resources" % verb.capitalize()
grammar_handler.__name__ = 'do_' + verb
grammar_handler.__name__ = 'do_' + str(verb)
setattr(CloudMonkeyShell, grammar_handler.__name__, grammar_handler)
shell = CloudMonkeyShell(sys.argv[0], verbs)
shell = CloudMonkeyShell(sys.argv[0])
if len(sys.argv) > 1:
shell.onecmd(' '.join(sys.argv[1:]))
else:

View File

@ -26,9 +26,8 @@ try:
from ConfigParser import ConfigParser, SafeConfigParser
from os.path import expanduser
from precache import precached_verbs
except ImportError, e:
precached_verbs = {}
print "ImportError", e
param_type = ['boolean', 'date', 'float', 'integer', 'short', 'list',
'long', 'object', 'map', 'string', 'tzdate', 'uuid']
@ -37,12 +36,12 @@ iterable_type = ['set', 'list', 'object']
config_dir = expanduser('~/.cloudmonkey')
config_file = expanduser(config_dir + '/config')
cache_file = expanduser(config_dir + '/cache')
# cloudmonkey config fields
config_fields = {'core': {}, 'ui': {}, 'server': {}, 'user': {}}
# core
config_fields['core']['cache_file'] = expanduser(config_dir + '/cache')
config_fields['core']['history_file'] = expanduser(config_dir + '/history')
config_fields['core']['log_file'] = expanduser(config_dir + '/log')
@ -106,10 +105,10 @@ def read_config(get_attr, set_attr):
try:
set_attr(key, config.get(section, key))
except Exception:
missing_keys.appned(key)
missing_keys.append(key)
if len(missing_keys) > 0:
print "Please fix `%s` in %s" % (key, config_file)
print "Please fix `%s` in %s" % (', '.join(missing_keys), config_file)
sys.exit()
return config_options

View File

@ -25,7 +25,6 @@ try:
from pygments.token import *
import sys
import types
except ImportError, e:
print e
@ -113,21 +112,9 @@ class MonkeyFormatter(Formatter):
outfile.write(value)
def monkeyprint(color=True, *args):
def monkeyprint(text):
fmter = MonkeyFormatter()
lexer = MonkeyLexer()
lexer.encoding = 'utf-8'
fmter.encoding = 'utf-8'
output = ""
try:
for arg in args:
if isinstance(type(arg), types.NoneType):
continue
output += str(arg)
except Exception, e:
print e
if color:
highlight(output, lexer, fmter, sys.stdout)
else:
print output
highlight(text, lexer, fmter, sys.stdout)

View File

@ -32,45 +32,11 @@
<build>
<defaultGoal>install</defaultGoal>
<plugins>
<plugin>
<artifactId>maven-antrun-plugin</artifactId>
<version>1.7</version>
<executions>
<execution>
<id>generate-resource</id>
<phase>generate-resources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<delete dir="${basedir}/cloudmonkey/marvin"/>
</target>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.2.1</version>
<executions>
<execution>
<id>compile</id>
<phase>compile</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<workingDirectory>${basedir}</workingDirectory>
<executable>cp</executable>
<arguments>
<argument>-rv</argument>
<argument>${basedir}/../marvin/marvin</argument>
<argument>${basedir}/cloudmonkey</argument>
</arguments>
</configuration>
</execution>
<execution>
<id>cachemaker</id>
<phase>compile</phase>