Explorar o código

AMBARI-6662. API required to provide temporal min/max/avg/sum/rate for a Flume metric (ncole)

Nate Cole %!s(int64=11) %!d(string=hai) anos
pai
achega
58347de3d9

+ 1 - 1
ambari-agent/src/main/python/ambari_agent/AmbariConfig.py

@@ -31,7 +31,7 @@ secured_url_port=8441
 
 
 [agent]
 [agent]
 prefix=/tmp/ambari-agent
 prefix=/tmp/ambari-agent
-tmp_dir=/tmp/ambari-agent/tmp # For test purposes
+tmp_dir=/tmp/ambari-agent/tmp
 data_cleanup_interval=86400
 data_cleanup_interval=86400
 data_cleanup_max_age=2592000
 data_cleanup_max_age=2592000
 ping_port=8670
 ping_port=8670

+ 0 - 25
ambari-server/src/main/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProvider.java

@@ -155,7 +155,6 @@ public class NagiosPropertyProvider extends BaseProvider implements PropertyProv
           } catch (Exception e) {
           } catch (Exception e) {
             LOG.error("Could not load Nagios alerts: " + e.getMessage());
             LOG.error("Could not load Nagios alerts: " + e.getMessage());
           }
           }
-          alerts.addAll(convertAlerts(clusterName));
           CLUSTER_ALERTS.put(clusterName, alerts);
           CLUSTER_ALERTS.put(clusterName, alerts);
         }
         }
       }
       }
@@ -163,30 +162,6 @@ public class NagiosPropertyProvider extends BaseProvider implements PropertyProv
   }
   }
 
 
   
   
-  /**
-   * Convert Alert from cluster to NagiosAlert
-   * @param clusterName the cluster name
-   * @return Collection of NagiosAlerts
-   * @throws AmbariException 
-   */
-  public List<NagiosAlert> convertAlerts(String clusterName) {
-    Cluster cluster;
-    try {
-      cluster = clusters.getCluster(clusterName);
-    } catch (AmbariException ex) {
-      return new ArrayList<NagiosAlert>();
-    }
-    Collection<Alert> clusterAlerts = cluster.getAlerts();
-    List<NagiosAlert> results = new ArrayList<NagiosAlert>();
-    if (clusterAlerts != null) {
-      for (Alert alert : clusterAlerts) {
-        NagiosAlert a = new NagiosAlert(alert);
-        results.add(a);
-      }
-    }
-    return results;
-  }
-  
   /**
   /**
    * Use only for testing to remove all cached alerts.
    * Use only for testing to remove all cached alerts.
    */
    */

+ 128 - 0
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/metrics.json

@@ -288,7 +288,71 @@
             "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
             "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
             "pointInTime":true,
             "pointInTime":true,
             "temporal":true
             "temporal":true
+          },
+
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum",
+            "pointInTime":false,
+            "temporal":true
+          },
+
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum",
+            "pointInTime":false,
+            "temporal":true
+          },
+          
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._sum",
+            "pointInTime":false,
+            "temporal":true
           }
           }
+
         }
         }
       }
       }
     ],
     ],
@@ -580,7 +644,71 @@
             "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
             "metric":"(\\w+).SOURCE.(\\w+).EventAcceptedCount",
             "pointInTime":true,
             "pointInTime":true,
             "temporal":true
             "temporal":true
+          },
+
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventTakeSuccessCount/rate/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventTakeSuccessCount._rate._sum",
+            "pointInTime":false,
+            "temporal":true
+          },
+
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")EventPutSuccessCount/rate/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).EventPutSuccessCount._rate._sum",
+            "pointInTime":false,
+            "temporal":true
+          },
+          
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/avg": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._avg",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/max": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._max",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/min": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._min",
+            "pointInTime":false,
+            "temporal":true
+          },
+          "metrics/flume/$1.substring(0)/CHANNEL/$2.replaceAll(\"[^-]+\",\"\")ChannelSize/sum": {
+            "metric":"(\\w+).CHANNEL.(\\w+).ChannelSize._sum",
+            "pointInTime":false,
+            "temporal":true
           }
           }
+          
         }
         }
       }
       }
     ]
     ]

+ 1 - 1
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/FLUME/package/scripts/flume.py

@@ -82,7 +82,7 @@ def flume(action = None):
         Execute(flume_cmd, wait_for_finish=False)
         Execute(flume_cmd, wait_for_finish=False)
 
 
         # sometimes startup spawns a couple of threads - so only the first line may count
         # sometimes startup spawns a couple of threads - so only the first line may count
-        pid_cmd = format('pgrep -o -u {flume_user} -f ^{java_home} > {flume_agent_pid_file}')
+        pid_cmd = format('pgrep -o -u {flume_user} -f ^{java_home}.*{agent}.* > {flume_agent_pid_file}')
         Execute(pid_cmd, logoutput=True, tries=10, try_sleep=1)
         Execute(pid_cmd, logoutput=True, tries=10, try_sleep=1)
 
 
     pass
     pass

+ 155 - 17
ambari-server/src/main/resources/stacks/HDP/2.0.6/services/GANGLIA/package/templates/rrd.py.j2

@@ -19,22 +19,20 @@ limitations under the License.
 '''
 '''
 
 
 import cgi
 import cgi
+import glob
 import os
 import os
+import re
 import rrdtool
 import rrdtool
 import sys
 import sys
 import time
 import time
-import re
 import urlparse
 import urlparse
 
 
 # place this script in /var/www/cgi-bin of the Ganglia collector
 # place this script in /var/www/cgi-bin of the Ganglia collector
 # requires 'yum install rrdtool-python' on the Ganglia collector
 # requires 'yum install rrdtool-python' on the Ganglia collector
-
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end,
-                resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
+'''
+  Loads rrd file info
+'''
+def loadRRDData(file, cf, start, end, resolution):
   args = [file, cf, "--daemon", "unix:{{ganglia_runtime_dir}}/rrdcached.limited.sock"]
   args = [file, cf, "--daemon", "unix:{{ganglia_runtime_dir}}/rrdcached.limited.sock"]
 
 
   if start is not None:
   if start is not None:
@@ -48,7 +46,139 @@ def printMetric(clusterName, hostName, metricName, file, cf, start, end,
   if resolution is not None:
   if resolution is not None:
     args.extend(["-r", resolution])
     args.extend(["-r", resolution])
 
 
-  rrdMetric = rrdtool.fetch(args)
+  return rrdtool.fetch(args)
+
+'''
+  Collects metrics across several matching filenames.
+'''
+def collectStatMetrics(clusterName, hostName, metricName, files, cf, start, end, resolution):
+  if clusterName[0] is not '/':
+    clusterName.insert(0, '/')
+
+  metricParts = metricName.split('.')
+
+  # already know there's at least one
+  metricStat = metricParts[-1]
+  metricName = '.'.join(metricParts[:-1])
+
+  isRate = False
+  if len(metricParts) > 1 and metricParts[-2] == '_rate':
+    isRate = True
+    metricName = '.'.join(metricParts[:-2])
+
+  pattern = re.compile(metricName + '\.rrd$')
+  matchedFiles = filter(pattern.match, files)
+
+  parentPath = os.path.join(*clusterName)
+
+  actualFiles = []
+  for matchedFile in matchedFiles:
+    if hostName != "__SummaryInfo__":
+      osFiles = glob.glob(os.path.join(parentPath, hostName, matchedFile))
+    else:
+      osFiles = glob.glob(os.path.join(parentPath, '*', matchedFile))
+
+    for f in osFiles:
+      if -1 == f.find("__SummaryInfo__"):
+        actualFiles.append(f)
+
+  if len(actualFiles) == 0:
+    return
+
+  '''
+  [
+    {
+      "step_value": update each iteration
+      "count": increase by 1 each iteration
+      "sum": increase by value each iteration
+      "avg": update each iteration as sum/count
+      "min": update each iteration if step_value < old min OR min is missing (first time)
+      "max": update each iteration if step_value > old max OR max is missing (first time)
+    }
+  ]
+  '''
+
+  timestamp = None
+  stepsize = None
+  concreteMetricName = None
+  vals = None # values across all files
+
+  for file in actualFiles:
+    rrdMetric = loadRRDData(file, cf, start, end, resolution)
+    
+    if timestamp is None and stepsize is None and concreteMetricName is None:
+      timestamp = rrdMetric[0][0]
+      stepsize = rrdMetric[0][2]
+      suffix = metricStat if not isRate else '_rate.' + metricStat
+      concreteMetricName = file.split(os.sep).pop().replace('rrd', suffix)
+
+    metricValues = rrdMetric[2]
+
+    if vals is None:
+      vals = [None] * len(metricValues)
+
+    i = 0
+    for tuple in metricValues:
+      if vals[i] is None:
+        vals[i] = {}
+        vals[i]['count'] = 0
+        vals[i]['_sum'] = 0
+        vals[i]['_avg'] = 0
+        vals[i]['_min'] = 0
+        vals[i]['_max'] = 0
+
+      rawValue = tuple[0]
+      vals[i]['step_value'] = rawValue
+      if rawValue is None:
+        i += 1
+        continue
+
+      if isRate:
+        if 0 == i:
+          rawValue = 0.0
+        elif vals[i-1]['step_value'] is None:
+          rawValue = 0.0
+        else:
+          rawValue = (rawValue - vals[i-1]['step_value']) / stepsize
+      
+      vals[i]['count'] += 1 
+      vals[i]['_sum'] += rawValue
+
+      vals[i]['_avg'] = vals[i]['_sum']/vals[i]['count']
+
+      if rawValue < vals[i]['_min']:
+        vals[i]['_min'] = rawValue
+
+      if rawValue > vals[i]['_max']:
+        vals[i]['_max'] = rawValue
+      
+      i += 1
+
+  sys.stdout.write("sum\n")
+  sys.stdout.write(clusterName[len(clusterName)-1] + "\n")
+  sys.stdout.write(hostName + "\n")
+  sys.stdout.write(concreteMetricName + "\n")
+  sys.stdout.write(str(timestamp) + "\n")
+  sys.stdout.write(str(stepsize) + "\n")
+
+  for val in vals:
+    if val['step_value'] is None:
+      sys.stdout.write("[~n]")
+    else:
+      sys.stdout.write(str(val[metricStat]))
+    sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+
+  return
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end,
+                resolution, pointInTime):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+ 
+  rrdMetric = loadRRDData(file, cf, start, end, resolution)
+
   # ds_name
   # ds_name
   sys.stdout.write(rrdMetric[1][0])
   sys.stdout.write(rrdMetric[1][0])
   sys.stdout.write("\n")
   sys.stdout.write("\n")
@@ -198,14 +328,22 @@ for cluster in clusterParts:
                       os.path.join(path, file), cf, start, end, resolution,
                       os.path.join(path, file), cf, start, end, resolution,
                       pointInTime)
                       pointInTime)
         else:
         else:
-          #Regex as metric name
-          metricRegex = metric + '\.rrd$'
-          p = re.compile(metricRegex)
-          matchedFiles = filter(p.match, files)
-          for matchedFile in matchedFiles:
-            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
-                        os.path.join(path, matchedFile), cf, start, end,
-                        resolution, pointInTime)
+          need_stats = False
+          parts = metric.split(".")
+          if len(parts) > 0 and parts[-1] in ['_min', '_max', '_avg', '_sum']:
+              need_stats = True
+
+          if need_stats and not pointInTime:
+            collectStatMetrics(pathParts[:-1], pathParts[-1], metric, files, cf, start, end, resolution)
+          else:
+            #Regex as metric name
+            metricRegex = metric + '\.rrd$'
+            p = re.compile(metricRegex)
+            matchedFiles = filter(p.match, files)
+            for matchedFile in matchedFiles:
+              printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
+                         os.path.join(path, matchedFile), cf, start, end,
+                         resolution, pointInTime)
 
 
 sys.stdout.write("[~EOF]\n")
 sys.stdout.write("[~EOF]\n")
 # write end time
 # write end time

+ 0 - 18
ambari-server/src/test/java/org/apache/ambari/server/controller/nagios/NagiosPropertyProviderTest.java

@@ -442,24 +442,6 @@ public class NagiosPropertyProviderTest {
     Assert.assertTrue(summary.get("CRITICAL").equals(Integer.valueOf(1)));
     Assert.assertTrue(summary.get("CRITICAL").equals(Integer.valueOf(1)));
   }
   }
 
 
-  @Test
-  public void testConvertAlerts() throws Exception {
-    Injector inj = Guice.createInjector(new GuiceModule());
-    
-    Clusters clusters = inj.getInstance(Clusters.class);
-    Cluster cluster = createMock(Cluster.class);
-    expect(cluster.getAlerts()).andReturn(Collections.<Alert>emptySet()).anyTimes();
-    expect(clusters.getCluster("c1")).andReturn(cluster);
-    replay(clusters, cluster);
-    TestStreamProvider streamProvider = new TestStreamProvider("nagios_alerts.txt");
-    NagiosPropertyProvider npp = new NagiosPropertyProvider(Resource.Type.Service,
-    streamProvider, "ServiceInfo/cluster_name", "ServiceInfo/service_name");
-    List<NagiosAlert> list = npp.convertAlerts("c1");
-    Assert.assertNotNull(list);
-    Assert.assertEquals(0, list.size());
-  }  
-  
-  
   @Test
   @Test
   public void testNagiosServiceAlertsWithPassive() throws Exception {
   public void testNagiosServiceAlertsWithPassive() throws Exception {
     Injector inj = Guice.createInjector(new GuiceModule());
     Injector inj = Guice.createInjector(new GuiceModule());

+ 3 - 3
ambari-server/src/test/java/org/apache/ambari/server/upgrade/UpgradeCatalog160Test.java

@@ -234,17 +234,17 @@ public class UpgradeCatalog160Test {
     assertNull(column.getDefaultValue());
     assertNull(column.getDefaultValue());
     assertTrue(column.isNullable());
     assertTrue(column.isNullable());
   }
   }
-  
+
   @Test
   @Test
   public void testGetSourceVersion() {
   public void testGetSourceVersion() {
     final DBAccessor dbAccessor     = createNiceMock(DBAccessor.class);
     final DBAccessor dbAccessor     = createNiceMock(DBAccessor.class);
     UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
     UpgradeCatalog upgradeCatalog = getUpgradeCatalog(dbAccessor);
     Assert.assertEquals("1.5.1", upgradeCatalog.getSourceVersion());
     Assert.assertEquals("1.5.1", upgradeCatalog.getSourceVersion());
-  }  
+  }
   /**
   /**
    * Checks that the restart_require column was created correct when using a
    * Checks that the restart_require column was created correct when using a
    * non-Postgres DB (MySQL, Oracle, etc).
    * non-Postgres DB (MySQL, Oracle, etc).
-   * 
+   *
    * @param restartRequiredColumnCapture
    * @param restartRequiredColumnCapture
    */
    */
   private void assertRestartRequiredColumn(
   private void assertRestartRequiredColumn(

+ 3 - 3
ambari-server/src/test/python/stacks/2.0.6/FLUME/test_flume.py

@@ -57,7 +57,7 @@ class TestFlumeHandler(RMFTestCase):
       '-Dflume.monitoring.hosts=c6401.ambari.apache.org:8655"'),
       '-Dflume.monitoring.hosts=c6401.ambari.apache.org:8655"'),
       wait_for_finish = False)
       wait_for_finish = False)
 
 
-    self.assertResourceCalled('Execute', 'pgrep -o -u flume -f ^/usr/jdk64/jdk1.7.0_45 > /var/run/flume/a1.pid',
+    self.assertResourceCalled('Execute', 'pgrep -o -u flume -f ^/usr/jdk64/jdk1.7.0_45.*a1.* > /var/run/flume/a1.pid',
       logoutput = True,
       logoutput = True,
       tries = 10,
       tries = 10,
       try_sleep = 1)
       try_sleep = 1)
@@ -205,7 +205,7 @@ class TestFlumeHandler(RMFTestCase):
       '-Dflume.monitoring.hosts=c6401.ambari.apache.org:8655"'),
       '-Dflume.monitoring.hosts=c6401.ambari.apache.org:8655"'),
       wait_for_finish = False)
       wait_for_finish = False)
 
 
-    self.assertResourceCalled('Execute', 'pgrep -o -u flume -f ^/usr/jdk64/jdk1.7.0_45 > /var/run/flume/b1.pid',
+    self.assertResourceCalled('Execute', 'pgrep -o -u flume -f ^/usr/jdk64/jdk1.7.0_45.*b1.* > /var/run/flume/b1.pid',
       logoutput = True,
       logoutput = True,
       tries = 10,
       tries = 10,
       try_sleep = 1)
       try_sleep = 1)
@@ -233,7 +233,7 @@ class TestFlumeHandler(RMFTestCase):
       '-Dflume.monitoring.hosts=c6401.ambari.apache.org:8655"'),
       '-Dflume.monitoring.hosts=c6401.ambari.apache.org:8655"'),
       wait_for_finish = False)
       wait_for_finish = False)
 
 
-    self.assertResourceCalled('Execute', 'pgrep -o -u flume -f ^/usr/jdk64/jdk1.7.0_45 > /var/run/flume/b1.pid',
+    self.assertResourceCalled('Execute', 'pgrep -o -u flume -f ^/usr/jdk64/jdk1.7.0_45.*b1.* > /var/run/flume/b1.pid',
       logoutput = True,
       logoutput = True,
       tries = 10,
       tries = 10,
       try_sleep = 1)
       try_sleep = 1)