hdfs.c 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "hdfs.h"
  19. #include "hdfsJniHelper.h"
  20. /* Some frequently used Java paths */
  21. #define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
  22. #define HADOOP_PATH "org/apache/hadoop/fs/Path"
  23. #define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
  24. #define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
  25. #define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
  26. #define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
  27. #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
  28. #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
  29. #define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
  30. #define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
  31. #define HADOOP_UNIX_USER_GROUP_INFO "org/apache/hadoop/security/UnixUserGroupInformation"
  32. #define HADOOP_USER_GROUP_INFO "org/apache/hadoop/security/UserGroupInformation"
  33. #define JAVA_NET_ISA "java/net/InetSocketAddress"
  34. #define JAVA_NET_URI "java/net/URI"
  35. #define JAVA_STRING "java/lang/String"
  36. #define JAVA_VOID "V"
  37. /* Macros for constructing method signatures */
  38. #define JPARAM(X) "L" X ";"
  39. #define JARRPARAM(X) "[L" X ";"
  40. #define JMETHOD1(X, R) "(" X ")" R
  41. #define JMETHOD2(X, Y, R) "(" X Y ")" R
  42. #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
  43. /**
  44. * hdfsJniEnv: A wrapper struct to be used as 'value'
  45. * while saving thread -> JNIEnv* mappings
  46. */
  47. typedef struct
  48. {
  49. JNIEnv* env;
  50. } hdfsJniEnv;
  51. /**
  52. * Helper function to destroy a local reference of java.lang.Object
  53. * @param env: The JNIEnv pointer.
  54. * @param jFile: The local reference of java.lang.Object object
  55. * @return None.
  56. */
  57. static void destroyLocalReference(JNIEnv *env, jobject jObject)
  58. {
  59. if (jObject)
  60. (*env)->DeleteLocalRef(env, jObject);
  61. }
  62. /**
  63. * Helper function to create a org.apache.hadoop.fs.Path object.
  64. * @param env: The JNIEnv pointer.
  65. * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
  66. * object.
  67. * @return Returns a jobject on success and NULL on error.
  68. */
  69. static jobject constructNewObjectOfPath(JNIEnv *env, const char *path)
  70. {
  71. //Construct a java.lang.String object
  72. jstring jPathString = (*env)->NewStringUTF(env, path);
  73. //Construct the org.apache.hadoop.fs.Path object
  74. jobject jPath =
  75. constructNewObjectOfClass(env, NULL, "org/apache/hadoop/fs/Path",
  76. "(Ljava/lang/String;)V", jPathString);
  77. if (jPath == NULL) {
  78. fprintf(stderr, "Can't construct instance of class "
  79. "org.apache.hadoop.fs.Path for %s\n", path);
  80. errno = EINTERNAL;
  81. return NULL;
  82. }
  83. // Destroy the local reference to the java.lang.String object
  84. destroyLocalReference(env, jPathString);
  85. return jPath;
  86. }
  87. /**
  88. * Helper function to translate an exception into a meaningful errno value.
  89. * @param exc: The exception.
  90. * @param env: The JNIEnv Pointer.
  91. * @param method: The name of the method that threw the exception. This
  92. * may be format string to be used in conjuction with additional arguments.
  93. * @return Returns a meaningful errno value if possible, or EINTERNAL if not.
  94. */
  95. static int errnoFromException(jthrowable exc, JNIEnv *env,
  96. const char *method, ...)
  97. {
  98. va_list ap;
  99. int errnum = 0;
  100. char *excClass = NULL;
  101. if (exc == NULL)
  102. goto default_error;
  103. if ((excClass = classNameOfObject((jobject) exc, env)) == NULL) {
  104. errnum = EINTERNAL;
  105. goto done;
  106. }
  107. if (!strcmp(excClass, "org.apache.hadoop.security."
  108. "AccessControlException")) {
  109. errnum = EACCES;
  110. goto done;
  111. }
  112. if (!strcmp(excClass, "org.apache.hadoop.hdfs.protocol."
  113. "QuotaExceededException")) {
  114. errnum = EDQUOT;
  115. goto done;
  116. }
  117. if (!strcmp(excClass, "java.io.FileNotFoundException")) {
  118. errnum = ENOENT;
  119. goto done;
  120. }
  121. //TODO: interpret more exceptions; maybe examine exc.getMessage()
  122. default_error:
  123. //Can't tell what went wrong, so just punt
  124. (*env)->ExceptionDescribe(env);
  125. fprintf(stderr, "Call to ");
  126. va_start(ap, method);
  127. vfprintf(stderr, method, ap);
  128. va_end(ap);
  129. fprintf(stderr, " failed!\n");
  130. errnum = EINTERNAL;
  131. done:
  132. (*env)->ExceptionClear(env);
  133. if (excClass != NULL)
  134. free(excClass);
  135. return errnum;
  136. }
  137. hdfsFS hdfsConnect(const char* host, tPort port) {
  138. // conect with NULL as user name/groups
  139. return hdfsConnectAsUser(host, port, NULL, NULL, 0);
  140. }
  141. hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user , const char **groups, int groups_size )
  142. {
  143. // JAVA EQUIVALENT:
  144. // FileSystem fs = FileSystem.get(new Configuration());
  145. // return fs;
  146. JNIEnv *env = 0;
  147. jobject jConfiguration = NULL;
  148. jobject jFS = NULL;
  149. jobject jURI = NULL;
  150. jstring jURIString = NULL;
  151. jvalue jVal;
  152. jthrowable jExc = NULL;
  153. char *cURI = 0;
  154. jobject gFsRef = NULL;
  155. //Get the JNIEnv* corresponding to current thread
  156. env = getJNIEnv();
  157. if (env == NULL) {
  158. errno = EINTERNAL;
  159. return NULL;
  160. }
  161. //Create the org.apache.hadoop.conf.Configuration object
  162. jConfiguration =
  163. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  164. if (jConfiguration == NULL) {
  165. fprintf(stderr, "Can't construct instance of class "
  166. "org.apache.hadoop.conf.Configuration\n");
  167. errno = EINTERNAL;
  168. return NULL;
  169. }
  170. if (user != NULL) {
  171. if (groups == NULL || groups_size <= 0) {
  172. fprintf(stderr, "ERROR: groups must not be empty/null\n");
  173. errno = EINVAL;
  174. return NULL;
  175. }
  176. jstring jUserString = (*env)->NewStringUTF(env, user);
  177. jarray jGroups = constructNewArrayString(env, &jExc, groups, groups_size);
  178. if (jGroups == NULL) {
  179. errno = EINTERNAL;
  180. fprintf(stderr, "ERROR: could not construct groups array\n");
  181. return NULL;
  182. }
  183. jobject jUgi;
  184. if ((jUgi = constructNewObjectOfClass(env, &jExc, HADOOP_UNIX_USER_GROUP_INFO, JMETHOD2(JPARAM(JAVA_STRING), JARRPARAM(JAVA_STRING), JAVA_VOID), jUserString, jGroups)) == NULL) {
  185. fprintf(stderr,"failed to construct hadoop user unix group info object\n");
  186. errno = errnoFromException(jExc, env, HADOOP_UNIX_USER_GROUP_INFO,
  187. "init");
  188. destroyLocalReference(env, jConfiguration);
  189. destroyLocalReference(env, jUserString);
  190. if (jGroups != NULL) {
  191. destroyLocalReference(env, jGroups);
  192. }
  193. return NULL;
  194. }
  195. #define USE_UUGI
  196. #ifdef USE_UUGI
  197. // UnixUserGroupInformation.UGI_PROPERTY_NAME
  198. jstring jAttrString = (*env)->NewStringUTF(env,"hadoop.job.ugi");
  199. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_UNIX_USER_GROUP_INFO, "saveToConf",
  200. JMETHOD3(JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING), JPARAM(HADOOP_UNIX_USER_GROUP_INFO), JAVA_VOID),
  201. jConfiguration, jAttrString, jUgi) != 0) {
  202. errno = errnoFromException(jExc, env, HADOOP_FSPERM,
  203. "init");
  204. destroyLocalReference(env, jConfiguration);
  205. destroyLocalReference(env, jUserString);
  206. if (jGroups != NULL) {
  207. destroyLocalReference(env, jGroups);
  208. }
  209. destroyLocalReference(env, jUgi);
  210. return NULL;
  211. }
  212. destroyLocalReference(env, jUserString);
  213. destroyLocalReference(env, jGroups);
  214. destroyLocalReference(env, jUgi);
  215. }
  216. #else
  217. // what does "current" mean in the context of libhdfs ? does it mean for the last hdfs connection we used?
  218. // that's why this code cannot be activated. We know the above use of the conf object should work well with
  219. // multiple connections.
  220. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_USER_GROUP_INFO, "setCurrentUGI",
  221. JMETHOD1(JPARAM(HADOOP_USER_GROUP_INFO), JAVA_VOID),
  222. jUgi) != 0) {
  223. errno = errnoFromException(jExc, env, HADOOP_USER_GROUP_INFO,
  224. "setCurrentUGI");
  225. destroyLocalReference(env, jConfiguration);
  226. destroyLocalReference(env, jUserString);
  227. if (jGroups != NULL) {
  228. destroyLocalReference(env, jGroups);
  229. }
  230. destroyLocalReference(env, jUgi);
  231. return NULL;
  232. }
  233. destroyLocalReference(env, jUserString);
  234. destroyLocalReference(env, jGroups);
  235. destroyLocalReference(env, jUgi);
  236. }
  237. #endif
  238. //Check what type of FileSystem the caller wants...
  239. if (host == NULL) {
  240. // fs = FileSytem::getLocal(conf);
  241. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "getLocal",
  242. JMETHOD1(JPARAM(HADOOP_CONF),
  243. JPARAM(HADOOP_LOCALFS)),
  244. jConfiguration) != 0) {
  245. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  246. "FileSystem::getLocal");
  247. goto done;
  248. }
  249. jFS = jVal.l;
  250. }
  251. else if (!strcmp(host, "default") && port == 0) {
  252. //fs = FileSystem::get(conf);
  253. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  254. HADOOP_FS, "get",
  255. JMETHOD1(JPARAM(HADOOP_CONF),
  256. JPARAM(HADOOP_FS)),
  257. jConfiguration) != 0) {
  258. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  259. "FileSystem::get");
  260. goto done;
  261. }
  262. jFS = jVal.l;
  263. }
  264. else {
  265. // fs = FileSystem::get(URI, conf);
  266. cURI = malloc(strlen(host)+16);
  267. sprintf(cURI, "hdfs://%s:%d", host, (int)(port));
  268. jURIString = (*env)->NewStringUTF(env, cURI);
  269. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI,
  270. "create", "(Ljava/lang/String;)Ljava/net/URI;",
  271. jURIString) != 0) {
  272. errno = errnoFromException(jExc, env, "java.net.URI::create");
  273. goto done;
  274. }
  275. jURI = jVal.l;
  276. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
  277. JMETHOD2(JPARAM(JAVA_NET_URI),
  278. JPARAM(HADOOP_CONF), JPARAM(HADOOP_FS)),
  279. jURI, jConfiguration) != 0) {
  280. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  281. "Filesystem::get(URI, Configuration)");
  282. goto done;
  283. }
  284. jFS = jVal.l;
  285. }
  286. done:
  287. // Release unnecessary local references
  288. destroyLocalReference(env, jConfiguration);
  289. destroyLocalReference(env, jURIString);
  290. destroyLocalReference(env, jURI);
  291. if (cURI) free(cURI);
  292. /* Create a global reference for this fs */
  293. if (jFS) {
  294. gFsRef = (*env)->NewGlobalRef(env, jFS);
  295. destroyLocalReference(env, jFS);
  296. }
  297. return gFsRef;
  298. }
  299. int hdfsDisconnect(hdfsFS fs)
  300. {
  301. // JAVA EQUIVALENT:
  302. // fs.close()
  303. //Get the JNIEnv* corresponding to current thread
  304. JNIEnv* env = getJNIEnv();
  305. if (env == NULL) {
  306. errno = EINTERNAL;
  307. return -2;
  308. }
  309. //Parameters
  310. jobject jFS = (jobject)fs;
  311. //Caught exception
  312. jthrowable jExc = NULL;
  313. //Sanity check
  314. if (fs == NULL) {
  315. errno = EBADF;
  316. return -1;
  317. }
  318. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  319. "close", "()V") != 0) {
  320. errno = errnoFromException(jExc, env, "Filesystem::close");
  321. return -1;
  322. }
  323. //Release unnecessary references
  324. (*env)->DeleteGlobalRef(env, fs);
  325. return 0;
  326. }
  327. hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
  328. int bufferSize, short replication, tSize blockSize)
  329. {
  330. /*
  331. JAVA EQUIVALENT:
  332. File f = new File(path);
  333. FSData{Input|Output}Stream f{is|os} = fs.create(f);
  334. return f{is|os};
  335. */
  336. /* Get the JNIEnv* corresponding to current thread */
  337. JNIEnv* env = getJNIEnv();
  338. if (env == NULL) {
  339. errno = EINTERNAL;
  340. return NULL;
  341. }
  342. jobject jFS = (jobject)fs;
  343. if (flags & O_RDWR) {
  344. fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
  345. errno = ENOTSUP;
  346. return NULL;
  347. }
  348. if ((flags & O_CREAT) && (flags & O_EXCL)) {
  349. fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
  350. }
  351. /* The hadoop java api/signature */
  352. const char* method = ((flags & O_WRONLY) == 0) ? "open" : (flags & O_APPEND) ? "append" : "create";
  353. const char* signature = ((flags & O_WRONLY) == 0) ?
  354. JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
  355. (flags & O_APPEND) ?
  356. JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM)) :
  357. JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
  358. /* Return value */
  359. hdfsFile file = NULL;
  360. /* Create an object of org.apache.hadoop.fs.Path */
  361. jobject jPath = constructNewObjectOfPath(env, path);
  362. if (jPath == NULL) {
  363. return NULL;
  364. }
  365. /* Get the Configuration object from the FileSystem object */
  366. jvalue jVal;
  367. jobject jConfiguration = NULL;
  368. jthrowable jExc = NULL;
  369. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  370. "getConf", JMETHOD1("", JPARAM(HADOOP_CONF))) != 0) {
  371. errno = errnoFromException(jExc, env, "get configuration object "
  372. "from filesystem");
  373. destroyLocalReference(env, jPath);
  374. return NULL;
  375. }
  376. jConfiguration = jVal.l;
  377. jint jBufferSize = bufferSize;
  378. jshort jReplication = replication;
  379. jlong jBlockSize = blockSize;
  380. jstring jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
  381. jstring jStrReplication = (*env)->NewStringUTF(env, "dfs.replication");
  382. jstring jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
  383. //bufferSize
  384. if (!bufferSize) {
  385. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  386. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  387. jStrBufferSize, 4096) != 0) {
  388. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  389. "Configuration::getInt");
  390. goto done;
  391. }
  392. jBufferSize = jVal.i;
  393. }
  394. if ((flags & O_WRONLY) && (flags & O_APPEND) == 0) {
  395. //replication
  396. if (!replication) {
  397. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  398. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  399. jStrReplication, 1) != 0) {
  400. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  401. "Configuration::getInt");
  402. goto done;
  403. }
  404. jReplication = jVal.i;
  405. }
  406. //blockSize
  407. if (!blockSize) {
  408. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  409. HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
  410. jStrBlockSize, 67108864)) {
  411. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  412. "FileSystem::%s(%s)", method,
  413. signature);
  414. goto done;
  415. }
  416. jBlockSize = jVal.j;
  417. }
  418. }
  419. /* Create and return either the FSDataInputStream or
  420. FSDataOutputStream references jobject jStream */
  421. // READ?
  422. if ((flags & O_WRONLY) == 0) {
  423. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  424. method, signature, jPath, jBufferSize)) {
  425. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  426. "FileSystem::%s(%s)", method,
  427. signature);
  428. goto done;
  429. }
  430. } else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
  431. // WRITE/APPEND?
  432. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  433. method, signature, jPath)) {
  434. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  435. "FileSystem::%s(%s)", method,
  436. signature);
  437. goto done;
  438. }
  439. } else {
  440. // WRITE/CREATE
  441. jboolean jOverWrite = 1;
  442. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  443. method, signature, jPath, jOverWrite,
  444. jBufferSize, jReplication, jBlockSize)) {
  445. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  446. "FileSystem::%s(%s)", method,
  447. signature);
  448. goto done;
  449. }
  450. }
  451. file = malloc(sizeof(struct hdfsFile_internal));
  452. if (!file) {
  453. errno = ENOMEM;
  454. return NULL;
  455. }
  456. file->file = (*env)->NewGlobalRef(env, jVal.l);
  457. file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
  458. destroyLocalReference(env, jVal.l);
  459. done:
  460. //Delete unnecessary local references
  461. destroyLocalReference(env, jStrBufferSize);
  462. destroyLocalReference(env, jStrReplication);
  463. destroyLocalReference(env, jStrBlockSize);
  464. destroyLocalReference(env, jConfiguration);
  465. destroyLocalReference(env, jPath);
  466. return file;
  467. }
  468. int hdfsCloseFile(hdfsFS fs, hdfsFile file)
  469. {
  470. // JAVA EQUIVALENT:
  471. // file.close
  472. //Get the JNIEnv* corresponding to current thread
  473. JNIEnv* env = getJNIEnv();
  474. if (env == NULL) {
  475. errno = EINTERNAL;
  476. return -2;
  477. }
  478. //Parameters
  479. jobject jStream = (jobject)(file ? file->file : NULL);
  480. //Caught exception
  481. jthrowable jExc = NULL;
  482. //Sanity check
  483. if (!file || file->type == UNINITIALIZED) {
  484. errno = EBADF;
  485. return -1;
  486. }
  487. //The interface whose 'close' method to be called
  488. const char* interface = (file->type == INPUT) ?
  489. HADOOP_ISTRM : HADOOP_OSTRM;
  490. if (invokeMethod(env, NULL, &jExc, INSTANCE, jStream, interface,
  491. "close", "()V") != 0) {
  492. errno = errnoFromException(jExc, env, "%s::close", interface);
  493. return -1;
  494. }
  495. //De-allocate memory
  496. free(file);
  497. (*env)->DeleteGlobalRef(env, jStream);
  498. return 0;
  499. }
  500. int hdfsExists(hdfsFS fs, const char *path)
  501. {
  502. JNIEnv *env = getJNIEnv();
  503. if (env == NULL) {
  504. errno = EINTERNAL;
  505. return -2;
  506. }
  507. jobject jPath = constructNewObjectOfPath(env, path);
  508. jvalue jVal;
  509. jthrowable jExc = NULL;
  510. jobject jFS = (jobject)fs;
  511. if (jPath == NULL) {
  512. return -1;
  513. }
  514. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  515. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  516. jPath) != 0) {
  517. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  518. "FileSystem::exists");
  519. return -1;
  520. }
  521. return jVal.z ? 0 : -1;
  522. }
  523. tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  524. {
  525. // JAVA EQUIVALENT:
  526. // byte [] bR = new byte[length];
  527. // fis.read(bR);
  528. //Get the JNIEnv* corresponding to current thread
  529. JNIEnv* env = getJNIEnv();
  530. if (env == NULL) {
  531. errno = EINTERNAL;
  532. return -1;
  533. }
  534. //Parameters
  535. jobject jInputStream = (jobject)(f ? f->file : NULL);
  536. jbyteArray jbRarray;
  537. jint noReadBytes = 0;
  538. jvalue jVal;
  539. jthrowable jExc = NULL;
  540. //Sanity check
  541. if (!f || f->type == UNINITIALIZED) {
  542. errno = EBADF;
  543. return -1;
  544. }
  545. //Error checking... make sure that this file is 'readable'
  546. if (f->type != INPUT) {
  547. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  548. errno = EINVAL;
  549. return -1;
  550. }
  551. //Read the requisite bytes
  552. jbRarray = (*env)->NewByteArray(env, length);
  553. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  554. "read", "([B)I", jbRarray) != 0) {
  555. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  556. "FSDataInputStream::read");
  557. noReadBytes = -1;
  558. }
  559. else {
  560. noReadBytes = jVal.i;
  561. if (noReadBytes > 0) {
  562. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  563. } else {
  564. //This is a valid case: there aren't any bytes left to read!
  565. if (noReadBytes == 0 || noReadBytes < -1) {
  566. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  567. }
  568. noReadBytes = 0;
  569. }
  570. errno = 0;
  571. }
  572. destroyLocalReference(env, jbRarray);
  573. return noReadBytes;
  574. }
  575. tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
  576. void* buffer, tSize length)
  577. {
  578. // JAVA EQUIVALENT:
  579. // byte [] bR = new byte[length];
  580. // fis.read(pos, bR, 0, length);
  581. //Get the JNIEnv* corresponding to current thread
  582. JNIEnv* env = getJNIEnv();
  583. if (env == NULL) {
  584. errno = EINTERNAL;
  585. return -1;
  586. }
  587. //Parameters
  588. jobject jInputStream = (jobject)(f ? f->file : NULL);
  589. jbyteArray jbRarray;
  590. jint noReadBytes = 0;
  591. jvalue jVal;
  592. jthrowable jExc = NULL;
  593. //Sanity check
  594. if (!f || f->type == UNINITIALIZED) {
  595. errno = EBADF;
  596. return -1;
  597. }
  598. //Error checking... make sure that this file is 'readable'
  599. if (f->type != INPUT) {
  600. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  601. errno = EINVAL;
  602. return -1;
  603. }
  604. //Read the requisite bytes
  605. jbRarray = (*env)->NewByteArray(env, length);
  606. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  607. "read", "(J[BII)I", position, jbRarray, 0, length) != 0) {
  608. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  609. "FSDataInputStream::read");
  610. noReadBytes = -1;
  611. }
  612. else {
  613. noReadBytes = jVal.i;
  614. if (noReadBytes > 0) {
  615. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  616. } else {
  617. //This is a valid case: there aren't any bytes left to read!
  618. if (noReadBytes == 0 || noReadBytes < -1) {
  619. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  620. }
  621. noReadBytes = 0;
  622. }
  623. errno = 0;
  624. }
  625. destroyLocalReference(env, jbRarray);
  626. return noReadBytes;
  627. }
  628. tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
  629. {
  630. // JAVA EQUIVALENT
  631. // byte b[] = str.getBytes();
  632. // fso.write(b);
  633. //Get the JNIEnv* corresponding to current thread
  634. JNIEnv* env = getJNIEnv();
  635. if (env == NULL) {
  636. errno = EINTERNAL;
  637. return -1;
  638. }
  639. //Parameters
  640. jobject jOutputStream = (jobject)(f ? f->file : 0);
  641. jbyteArray jbWarray;
  642. //Caught exception
  643. jthrowable jExc = NULL;
  644. //Sanity check
  645. if (!f || f->type == UNINITIALIZED) {
  646. errno = EBADF;
  647. return -1;
  648. }
  649. if (length < 0) {
  650. errno = EINVAL;
  651. return -1;
  652. }
  653. //Error checking... make sure that this file is 'writable'
  654. if (f->type != OUTPUT) {
  655. fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
  656. errno = EINVAL;
  657. return -1;
  658. }
  659. // 'length' equals 'zero' is a valid use-case according to Posix!
  660. if (length != 0) {
  661. //Write the requisite bytes into the file
  662. jbWarray = (*env)->NewByteArray(env, length);
  663. (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
  664. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  665. HADOOP_OSTRM, "write",
  666. "([B)V", jbWarray) != 0) {
  667. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  668. "FSDataOutputStream::write");
  669. length = -1;
  670. }
  671. destroyLocalReference(env, jbWarray);
  672. }
  673. //Return no. of bytes succesfully written (libc way)
  674. //i.e. 'length' itself! ;-)
  675. return length;
  676. }
  677. int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
  678. {
  679. // JAVA EQUIVALENT
  680. // fis.seek(pos);
  681. //Get the JNIEnv* corresponding to current thread
  682. JNIEnv* env = getJNIEnv();
  683. if (env == NULL) {
  684. errno = EINTERNAL;
  685. return -1;
  686. }
  687. //Parameters
  688. jobject jInputStream = (jobject)(f ? f->file : 0);
  689. //Caught exception
  690. jthrowable jExc = NULL;
  691. //Sanity check
  692. if (!f || f->type != INPUT) {
  693. errno = EBADF;
  694. return -1;
  695. }
  696. if (invokeMethod(env, NULL, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  697. "seek", "(J)V", desiredPos) != 0) {
  698. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  699. "FSDataInputStream::seek");
  700. return -1;
  701. }
  702. return 0;
  703. }
  704. tOffset hdfsTell(hdfsFS fs, hdfsFile f)
  705. {
  706. // JAVA EQUIVALENT
  707. // pos = f.getPos();
  708. //Get the JNIEnv* corresponding to current thread
  709. JNIEnv* env = getJNIEnv();
  710. if (env == NULL) {
  711. errno = EINTERNAL;
  712. return -1;
  713. }
  714. //Parameters
  715. jobject jStream = (jobject)(f ? f->file : 0);
  716. //Sanity check
  717. if (!f || f->type == UNINITIALIZED) {
  718. errno = EBADF;
  719. return -1;
  720. }
  721. const char* interface = (f->type == INPUT) ?
  722. HADOOP_ISTRM : HADOOP_OSTRM;
  723. jlong currentPos = -1;
  724. jvalue jVal;
  725. jthrowable jExc = NULL;
  726. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStream,
  727. interface, "getPos", "()J") != 0) {
  728. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  729. "FSDataInputStream::getPos");
  730. return -1;
  731. }
  732. currentPos = jVal.j;
  733. return (tOffset)currentPos;
  734. }
  735. int hdfsFlush(hdfsFS fs, hdfsFile f)
  736. {
  737. // JAVA EQUIVALENT
  738. // fos.flush();
  739. //Get the JNIEnv* corresponding to current thread
  740. JNIEnv* env = getJNIEnv();
  741. if (env == NULL) {
  742. errno = EINTERNAL;
  743. return -1;
  744. }
  745. //Parameters
  746. jobject jOutputStream = (jobject)(f ? f->file : 0);
  747. //Caught exception
  748. jthrowable jExc = NULL;
  749. //Sanity check
  750. if (!f || f->type != OUTPUT) {
  751. errno = EBADF;
  752. return -1;
  753. }
  754. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  755. HADOOP_OSTRM, "flush", "()V") != 0) {
  756. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  757. "FSDataInputStream::flush");
  758. return -1;
  759. }
  760. return 0;
  761. }
  762. int hdfsAvailable(hdfsFS fs, hdfsFile f)
  763. {
  764. // JAVA EQUIVALENT
  765. // fis.available();
  766. //Get the JNIEnv* corresponding to current thread
  767. JNIEnv* env = getJNIEnv();
  768. if (env == NULL) {
  769. errno = EINTERNAL;
  770. return -1;
  771. }
  772. //Parameters
  773. jobject jInputStream = (jobject)(f ? f->file : 0);
  774. //Caught exception
  775. jthrowable jExc = NULL;
  776. //Sanity check
  777. if (!f || f->type != INPUT) {
  778. errno = EBADF;
  779. return -1;
  780. }
  781. jint available = -1;
  782. jvalue jVal;
  783. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
  784. HADOOP_ISTRM, "available", "()I") != 0) {
  785. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  786. "FSDataInputStream::available");
  787. return -1;
  788. }
  789. available = jVal.i;
  790. return available;
  791. }
  792. int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  793. {
  794. //JAVA EQUIVALENT
  795. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  796. // deleteSource = false, conf)
  797. //Get the JNIEnv* corresponding to current thread
  798. JNIEnv* env = getJNIEnv();
  799. if (env == NULL) {
  800. errno = EINTERNAL;
  801. return -1;
  802. }
  803. //Parameters
  804. jobject jSrcFS = (jobject)srcFS;
  805. jobject jDstFS = (jobject)dstFS;
  806. jobject jSrcPath = NULL;
  807. jobject jDstPath = NULL;
  808. jSrcPath = constructNewObjectOfPath(env, src);
  809. if (jSrcPath == NULL) {
  810. return -1;
  811. }
  812. jDstPath = constructNewObjectOfPath(env, dst);
  813. if (jDstPath == NULL) {
  814. destroyLocalReference(env, jSrcPath);
  815. return -1;
  816. }
  817. int retval = 0;
  818. //Create the org.apache.hadoop.conf.Configuration object
  819. jobject jConfiguration =
  820. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  821. if (jConfiguration == NULL) {
  822. fprintf(stderr, "Can't construct instance of class "
  823. "org.apache.hadoop.conf.Configuration\n");
  824. errno = EINTERNAL;
  825. destroyLocalReference(env, jSrcPath);
  826. destroyLocalReference(env, jDstPath);
  827. return -1;
  828. }
  829. //FileUtil::copy
  830. jboolean deleteSource = 0; //Only copy
  831. jvalue jVal;
  832. jthrowable jExc = NULL;
  833. if (invokeMethod(env, &jVal, &jExc, STATIC,
  834. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  835. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  836. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  837. jConfiguration) != 0) {
  838. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  839. "FileUtil::copy");
  840. retval = -1;
  841. goto done;
  842. }
  843. done:
  844. //Delete unnecessary local references
  845. destroyLocalReference(env, jConfiguration);
  846. destroyLocalReference(env, jSrcPath);
  847. destroyLocalReference(env, jDstPath);
  848. return retval;
  849. }
  850. int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  851. {
  852. //JAVA EQUIVALENT
  853. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  854. // deleteSource = true, conf)
  855. //Get the JNIEnv* corresponding to current thread
  856. JNIEnv* env = getJNIEnv();
  857. if (env == NULL) {
  858. errno = EINTERNAL;
  859. return -1;
  860. }
  861. //Parameters
  862. jobject jSrcFS = (jobject)srcFS;
  863. jobject jDstFS = (jobject)dstFS;
  864. jobject jSrcPath = NULL;
  865. jobject jDstPath = NULL;
  866. jSrcPath = constructNewObjectOfPath(env, src);
  867. if (jSrcPath == NULL) {
  868. return -1;
  869. }
  870. jDstPath = constructNewObjectOfPath(env, dst);
  871. if (jDstPath == NULL) {
  872. destroyLocalReference(env, jSrcPath);
  873. return -1;
  874. }
  875. int retval = 0;
  876. //Create the org.apache.hadoop.conf.Configuration object
  877. jobject jConfiguration =
  878. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  879. if (jConfiguration == NULL) {
  880. fprintf(stderr, "Can't construct instance of class "
  881. "org.apache.hadoop.conf.Configuration\n");
  882. errno = EINTERNAL;
  883. destroyLocalReference(env, jSrcPath);
  884. destroyLocalReference(env, jDstPath);
  885. return -1;
  886. }
  887. //FileUtil::copy
  888. jboolean deleteSource = 1; //Delete src after copy
  889. jvalue jVal;
  890. jthrowable jExc = NULL;
  891. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  892. "org/apache/hadoop/fs/FileUtil", "copy",
  893. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  894. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  895. jConfiguration) != 0) {
  896. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  897. "FileUtil::copy(move)");
  898. retval = -1;
  899. goto done;
  900. }
  901. done:
  902. //Delete unnecessary local references
  903. destroyLocalReference(env, jConfiguration);
  904. destroyLocalReference(env, jSrcPath);
  905. destroyLocalReference(env, jDstPath);
  906. return retval;
  907. }
  908. int hdfsDelete(hdfsFS fs, const char* path)
  909. {
  910. // JAVA EQUIVALENT:
  911. // File f = new File(path);
  912. // bool retval = fs.delete(f);
  913. //Get the JNIEnv* corresponding to current thread
  914. JNIEnv* env = getJNIEnv();
  915. if (env == NULL) {
  916. errno = EINTERNAL;
  917. return -1;
  918. }
  919. jobject jFS = (jobject)fs;
  920. //Create an object of java.io.File
  921. jobject jPath = constructNewObjectOfPath(env, path);
  922. if (jPath == NULL) {
  923. return -1;
  924. }
  925. //Delete the file
  926. jvalue jVal;
  927. jthrowable jExc = NULL;
  928. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  929. "delete", "(Lorg/apache/hadoop/fs/Path;)Z",
  930. jPath) != 0) {
  931. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  932. "FileSystem::delete");
  933. return -1;
  934. }
  935. //Delete unnecessary local references
  936. destroyLocalReference(env, jPath);
  937. return (jVal.z) ? 0 : -1;
  938. }
  939. int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
  940. {
  941. // JAVA EQUIVALENT:
  942. // Path old = new Path(oldPath);
  943. // Path new = new Path(newPath);
  944. // fs.rename(old, new);
  945. //Get the JNIEnv* corresponding to current thread
  946. JNIEnv* env = getJNIEnv();
  947. if (env == NULL) {
  948. errno = EINTERNAL;
  949. return -1;
  950. }
  951. jobject jFS = (jobject)fs;
  952. //Create objects of org.apache.hadoop.fs.Path
  953. jobject jOldPath = NULL;
  954. jobject jNewPath = NULL;
  955. jOldPath = constructNewObjectOfPath(env, oldPath);
  956. if (jOldPath == NULL) {
  957. return -1;
  958. }
  959. jNewPath = constructNewObjectOfPath(env, newPath);
  960. if (jNewPath == NULL) {
  961. destroyLocalReference(env, jOldPath);
  962. return -1;
  963. }
  964. //Rename the file
  965. jvalue jVal;
  966. jthrowable jExc = NULL;
  967. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, "rename",
  968. JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
  969. jOldPath, jNewPath) != 0) {
  970. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  971. "FileSystem::rename");
  972. return -1;
  973. }
  974. //Delete unnecessary local references
  975. destroyLocalReference(env, jOldPath);
  976. destroyLocalReference(env, jNewPath);
  977. return (jVal.z) ? 0 : -1;
  978. }
  979. char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
  980. {
  981. // JAVA EQUIVALENT:
  982. // Path p = fs.getWorkingDirectory();
  983. // return p.toString()
  984. //Get the JNIEnv* corresponding to current thread
  985. JNIEnv* env = getJNIEnv();
  986. if (env == NULL) {
  987. errno = EINTERNAL;
  988. return NULL;
  989. }
  990. jobject jFS = (jobject)fs;
  991. jobject jPath = NULL;
  992. jvalue jVal;
  993. jthrowable jExc = NULL;
  994. //FileSystem::getWorkingDirectory()
  995. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  996. HADOOP_FS, "getWorkingDirectory",
  997. "()Lorg/apache/hadoop/fs/Path;") != 0 ||
  998. jVal.l == NULL) {
  999. errno = errnoFromException(jExc, env, "FileSystem::"
  1000. "getWorkingDirectory");
  1001. return NULL;
  1002. }
  1003. jPath = jVal.l;
  1004. //Path::toString()
  1005. jstring jPathString;
  1006. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath,
  1007. "org/apache/hadoop/fs/Path", "toString",
  1008. "()Ljava/lang/String;") != 0) {
  1009. errno = errnoFromException(jExc, env, "Path::toString");
  1010. destroyLocalReference(env, jPath);
  1011. return NULL;
  1012. }
  1013. jPathString = jVal.l;
  1014. const char *jPathChars = (const char*)
  1015. ((*env)->GetStringUTFChars(env, jPathString, NULL));
  1016. //Copy to user-provided buffer
  1017. strncpy(buffer, jPathChars, bufferSize);
  1018. //Delete unnecessary local references
  1019. (*env)->ReleaseStringUTFChars(env, jPathString, jPathChars);
  1020. destroyLocalReference(env, jPathString);
  1021. destroyLocalReference(env, jPath);
  1022. return buffer;
  1023. }
  1024. int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
  1025. {
  1026. // JAVA EQUIVALENT:
  1027. // fs.setWorkingDirectory(Path(path));
  1028. //Get the JNIEnv* corresponding to current thread
  1029. JNIEnv* env = getJNIEnv();
  1030. if (env == NULL) {
  1031. errno = EINTERNAL;
  1032. return -1;
  1033. }
  1034. jobject jFS = (jobject)fs;
  1035. int retval = 0;
  1036. jthrowable jExc = NULL;
  1037. //Create an object of org.apache.hadoop.fs.Path
  1038. jobject jPath = constructNewObjectOfPath(env, path);
  1039. if (jPath == NULL) {
  1040. return -1;
  1041. }
  1042. //FileSystem::setWorkingDirectory()
  1043. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1044. "setWorkingDirectory",
  1045. "(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) {
  1046. errno = errnoFromException(jExc, env, "FileSystem::"
  1047. "setWorkingDirectory");
  1048. retval = -1;
  1049. }
  1050. //Delete unnecessary local references
  1051. destroyLocalReference(env, jPath);
  1052. return retval;
  1053. }
  1054. int hdfsCreateDirectory(hdfsFS fs, const char* path)
  1055. {
  1056. // JAVA EQUIVALENT:
  1057. // fs.mkdirs(new Path(path));
  1058. //Get the JNIEnv* corresponding to current thread
  1059. JNIEnv* env = getJNIEnv();
  1060. if (env == NULL) {
  1061. errno = EINTERNAL;
  1062. return -1;
  1063. }
  1064. jobject jFS = (jobject)fs;
  1065. //Create an object of org.apache.hadoop.fs.Path
  1066. jobject jPath = constructNewObjectOfPath(env, path);
  1067. if (jPath == NULL) {
  1068. return -1;
  1069. }
  1070. //Create the directory
  1071. jvalue jVal;
  1072. jVal.z = 0;
  1073. jthrowable jExc = NULL;
  1074. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1075. "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
  1076. jPath) != 0) {
  1077. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1078. "FileSystem::mkdirs");
  1079. goto done;
  1080. }
  1081. done:
  1082. //Delete unnecessary local references
  1083. destroyLocalReference(env, jPath);
  1084. return (jVal.z) ? 0 : -1;
  1085. }
  1086. int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
  1087. {
  1088. // JAVA EQUIVALENT:
  1089. // fs.setReplication(new Path(path), replication);
  1090. //Get the JNIEnv* corresponding to current thread
  1091. JNIEnv* env = getJNIEnv();
  1092. if (env == NULL) {
  1093. errno = EINTERNAL;
  1094. return -1;
  1095. }
  1096. jobject jFS = (jobject)fs;
  1097. //Create an object of org.apache.hadoop.fs.Path
  1098. jobject jPath = constructNewObjectOfPath(env, path);
  1099. if (jPath == NULL) {
  1100. return -1;
  1101. }
  1102. //Create the directory
  1103. jvalue jVal;
  1104. jthrowable jExc = NULL;
  1105. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1106. "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
  1107. jPath, replication) != 0) {
  1108. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1109. "FileSystem::setReplication");
  1110. goto done;
  1111. }
  1112. done:
  1113. //Delete unnecessary local references
  1114. destroyLocalReference(env, jPath);
  1115. return (jVal.z) ? 0 : -1;
  1116. }
  1117. int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
  1118. {
  1119. // JAVA EQUIVALENT:
  1120. // fs.setOwner(path, owner, group)
  1121. //Get the JNIEnv* corresponding to current thread
  1122. JNIEnv* env = getJNIEnv();
  1123. if (env == NULL) {
  1124. errno = EINTERNAL;
  1125. return -1;
  1126. }
  1127. if (owner == NULL && group == NULL) {
  1128. fprintf(stderr, "Both owner and group cannot be null in chown");
  1129. errno = EINVAL;
  1130. return -1;
  1131. }
  1132. jobject jFS = (jobject)fs;
  1133. jobject jPath = constructNewObjectOfPath(env, path);
  1134. if (jPath == NULL) {
  1135. return -1;
  1136. }
  1137. jstring jOwnerString = (*env)->NewStringUTF(env, owner);
  1138. jstring jGroupString = (*env)->NewStringUTF(env, group);
  1139. //Create the directory
  1140. int ret = 0;
  1141. jthrowable jExc = NULL;
  1142. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1143. "setOwner", JMETHOD3(JPARAM(HADOOP_PATH), JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
  1144. jPath, jOwnerString, jGroupString) != 0) {
  1145. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1146. "FileSystem::setOwner");
  1147. ret = -1;
  1148. goto done;
  1149. }
  1150. done:
  1151. destroyLocalReference(env, jPath);
  1152. destroyLocalReference(env, jOwnerString);
  1153. destroyLocalReference(env, jGroupString);
  1154. return ret;
  1155. }
  1156. int hdfsChmod(hdfsFS fs, const char* path, short mode)
  1157. {
  1158. // JAVA EQUIVALENT:
  1159. // fs.setPermission(path, FsPermission)
  1160. //Get the JNIEnv* corresponding to current thread
  1161. JNIEnv* env = getJNIEnv();
  1162. if (env == NULL) {
  1163. errno = EINTERNAL;
  1164. return -1;
  1165. }
  1166. jobject jFS = (jobject)fs;
  1167. // construct jPerm = FsPermission.createImmutable(short mode);
  1168. jshort jmode = mode;
  1169. jobject jPermObj =
  1170. constructNewObjectOfClass(env, NULL, HADOOP_FSPERM,"(S)V",jmode);
  1171. if (jPermObj == NULL) {
  1172. return -2;
  1173. }
  1174. //Create an object of org.apache.hadoop.fs.Path
  1175. jobject jPath = constructNewObjectOfPath(env, path);
  1176. if (jPath == NULL) {
  1177. return -3;
  1178. }
  1179. //Create the directory
  1180. int ret = 0;
  1181. jthrowable jExc = NULL;
  1182. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1183. "setPermission", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
  1184. jPath, jPermObj) != 0) {
  1185. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1186. "FileSystem::setPermission");
  1187. ret = -1;
  1188. goto done;
  1189. }
  1190. done:
  1191. destroyLocalReference(env, jPath);
  1192. destroyLocalReference(env, jPermObj);
  1193. return ret;
  1194. }
  1195. int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
  1196. {
  1197. // JAVA EQUIVALENT:
  1198. // fs.setTimes(src, mtime, atime)
  1199. //Get the JNIEnv* corresponding to current thread
  1200. JNIEnv* env = getJNIEnv();
  1201. if (env == NULL) {
  1202. errno = EINTERNAL;
  1203. return -1;
  1204. }
  1205. jobject jFS = (jobject)fs;
  1206. //Create an object of org.apache.hadoop.fs.Path
  1207. jobject jPath = constructNewObjectOfPath(env, path);
  1208. if (jPath == NULL) {
  1209. fprintf(stderr, "could not construct path object\n");
  1210. return -2;
  1211. }
  1212. jlong jmtime = mtime * (jlong)1000;
  1213. jlong jatime = atime * (jlong)1000;
  1214. int ret = 0;
  1215. jthrowable jExc = NULL;
  1216. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1217. "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
  1218. jPath, jmtime, jatime) != 0) {
  1219. fprintf(stderr, "call to setTime failed\n");
  1220. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1221. "FileSystem::setTimes");
  1222. ret = -1;
  1223. goto done;
  1224. }
  1225. done:
  1226. destroyLocalReference(env, jPath);
  1227. return ret;
  1228. }
  1229. char***
  1230. hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
  1231. {
  1232. // JAVA EQUIVALENT:
  1233. // fs.getFileBlockLoctions(new Path(path), start, length);
  1234. //Get the JNIEnv* corresponding to current thread
  1235. JNIEnv* env = getJNIEnv();
  1236. if (env == NULL) {
  1237. errno = EINTERNAL;
  1238. return NULL;
  1239. }
  1240. jobject jFS = (jobject)fs;
  1241. //Create an object of org.apache.hadoop.fs.Path
  1242. jobject jPath = constructNewObjectOfPath(env, path);
  1243. if (jPath == NULL) {
  1244. return NULL;
  1245. }
  1246. jvalue jFSVal;
  1247. jthrowable jFSExc = NULL;
  1248. if (invokeMethod(env, &jFSVal, &jFSExc, INSTANCE, jFS,
  1249. HADOOP_FS, "getFileStatus",
  1250. "(Lorg/apache/hadoop/fs/Path;)"
  1251. "Lorg/apache/hadoop/fs/FileStatus;",
  1252. jPath) != 0) {
  1253. errno = errnoFromException(jFSExc, env, "org.apache.hadoop.fs."
  1254. "FileSystem::getFileStatus");
  1255. destroyLocalReference(env, jPath);
  1256. return NULL;
  1257. }
  1258. jobject jFileStatus = jFSVal.l;
  1259. //org.apache.hadoop.fs.FileSystem::getFileBlockLocations
  1260. char*** blockHosts = NULL;
  1261. jobjectArray jBlockLocations;;
  1262. jvalue jVal;
  1263. jthrowable jExc = NULL;
  1264. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  1265. HADOOP_FS, "getFileBlockLocations",
  1266. "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
  1267. "[Lorg/apache/hadoop/fs/BlockLocation;",
  1268. jFileStatus, start, length) != 0) {
  1269. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1270. "FileSystem::getFileBlockLocations");
  1271. destroyLocalReference(env, jPath);
  1272. destroyLocalReference(env, jFileStatus);
  1273. return NULL;
  1274. }
  1275. jBlockLocations = jVal.l;
  1276. //Figure out no of entries in jBlockLocations
  1277. //Allocate memory and add NULL at the end
  1278. jsize jNumFileBlocks = (*env)->GetArrayLength(env, jBlockLocations);
  1279. blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1));
  1280. if (blockHosts == NULL) {
  1281. errno = ENOMEM;
  1282. goto done;
  1283. }
  1284. blockHosts[jNumFileBlocks] = NULL;
  1285. if (jNumFileBlocks == 0) {
  1286. errno = 0;
  1287. goto done;
  1288. }
  1289. //Now parse each block to get hostnames
  1290. int i = 0;
  1291. for (i=0; i < jNumFileBlocks; ++i) {
  1292. jobject jFileBlock =
  1293. (*env)->GetObjectArrayElement(env, jBlockLocations, i);
  1294. jvalue jVal;
  1295. jobjectArray jFileBlockHosts;
  1296. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
  1297. "getHosts", "()[Ljava/lang/String;") ||
  1298. jVal.l == NULL) {
  1299. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1300. "BlockLocation::getHosts");
  1301. destroyLocalReference(env, jPath);
  1302. destroyLocalReference(env, jFileStatus);
  1303. destroyLocalReference(env, jBlockLocations);
  1304. return NULL;
  1305. }
  1306. jFileBlockHosts = jVal.l;
  1307. //Figure out no of hosts in jFileBlockHosts
  1308. //Allocate memory and add NULL at the end
  1309. jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
  1310. blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1));
  1311. if (blockHosts[i] == NULL) {
  1312. int x = 0;
  1313. for (x=0; x < i; ++x) {
  1314. free(blockHosts[x]);
  1315. }
  1316. free(blockHosts);
  1317. errno = ENOMEM;
  1318. goto done;
  1319. }
  1320. blockHosts[i][jNumBlockHosts] = NULL;
  1321. //Now parse each hostname
  1322. int j = 0;
  1323. const char *hostName;
  1324. for (j=0; j < jNumBlockHosts; ++j) {
  1325. jstring jHost =
  1326. (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
  1327. hostName =
  1328. (const char*)((*env)->GetStringUTFChars(env, jHost, NULL));
  1329. blockHosts[i][j] = strdup(hostName);
  1330. (*env)->ReleaseStringUTFChars(env, jHost, hostName);
  1331. destroyLocalReference(env, jHost);
  1332. }
  1333. destroyLocalReference(env, jFileBlockHosts);
  1334. }
  1335. done:
  1336. //Delete unnecessary local references
  1337. destroyLocalReference(env, jPath);
  1338. destroyLocalReference(env, jFileStatus);
  1339. destroyLocalReference(env, jBlockLocations);
  1340. return blockHosts;
  1341. }
  1342. void hdfsFreeHosts(char ***blockHosts)
  1343. {
  1344. int i, j;
  1345. for (i=0; blockHosts[i]; i++) {
  1346. for (j=0; blockHosts[i][j]; j++) {
  1347. free(blockHosts[i][j]);
  1348. }
  1349. free(blockHosts[i]);
  1350. }
  1351. free(blockHosts);
  1352. }
  1353. tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
  1354. {
  1355. // JAVA EQUIVALENT:
  1356. // fs.getDefaultBlockSize();
  1357. //Get the JNIEnv* corresponding to current thread
  1358. JNIEnv* env = getJNIEnv();
  1359. if (env == NULL) {
  1360. errno = EINTERNAL;
  1361. return -1;
  1362. }
  1363. jobject jFS = (jobject)fs;
  1364. //FileSystem::getDefaultBlockSize()
  1365. tOffset blockSize = -1;
  1366. jvalue jVal;
  1367. jthrowable jExc = NULL;
  1368. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1369. "getDefaultBlockSize", "()J") != 0) {
  1370. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1371. "FileSystem::getDefaultBlockSize");
  1372. return -1;
  1373. }
  1374. blockSize = jVal.j;
  1375. return blockSize;
  1376. }
  1377. tOffset hdfsGetCapacity(hdfsFS fs)
  1378. {
  1379. // JAVA EQUIVALENT:
  1380. // fs.getRawCapacity();
  1381. //Get the JNIEnv* corresponding to current thread
  1382. JNIEnv* env = getJNIEnv();
  1383. if (env == NULL) {
  1384. errno = EINTERNAL;
  1385. return -1;
  1386. }
  1387. jobject jFS = (jobject)fs;
  1388. if (!((*env)->IsInstanceOf(env, jFS,
  1389. globalClassReference(HADOOP_DFS, env)))) {
  1390. fprintf(stderr, "hdfsGetCapacity works only on a "
  1391. "DistributedFileSystem!\n");
  1392. return -1;
  1393. }
  1394. //FileSystem::getRawCapacity()
  1395. jvalue jVal;
  1396. jthrowable jExc = NULL;
  1397. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
  1398. "getRawCapacity", "()J") != 0) {
  1399. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1400. "FileSystem::getRawCapacity");
  1401. return -1;
  1402. }
  1403. return jVal.j;
  1404. }
  1405. tOffset hdfsGetUsed(hdfsFS fs)
  1406. {
  1407. // JAVA EQUIVALENT:
  1408. // fs.getRawUsed();
  1409. //Get the JNIEnv* corresponding to current thread
  1410. JNIEnv* env = getJNIEnv();
  1411. if (env == NULL) {
  1412. errno = EINTERNAL;
  1413. return -1;
  1414. }
  1415. jobject jFS = (jobject)fs;
  1416. if (!((*env)->IsInstanceOf(env, jFS,
  1417. globalClassReference(HADOOP_DFS, env)))) {
  1418. fprintf(stderr, "hdfsGetUsed works only on a "
  1419. "DistributedFileSystem!\n");
  1420. return -1;
  1421. }
  1422. //FileSystem::getRawUsed()
  1423. jvalue jVal;
  1424. jthrowable jExc = NULL;
  1425. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
  1426. "getRawUsed", "()J") != 0) {
  1427. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1428. "FileSystem::getRawUsed");
  1429. return -1;
  1430. }
  1431. return jVal.j;
  1432. }
  1433. static int
  1434. getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
  1435. {
  1436. jvalue jVal;
  1437. jthrowable jExc = NULL;
  1438. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1439. HADOOP_STAT, "isDir", "()Z") != 0) {
  1440. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1441. "FileStatus::isDir");
  1442. return -1;
  1443. }
  1444. fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
  1445. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1446. HADOOP_STAT, "getReplication", "()S") != 0) {
  1447. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1448. "FileStatus::getReplication");
  1449. return -1;
  1450. }
  1451. fileInfo->mReplication = jVal.s;
  1452. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1453. HADOOP_STAT, "getBlockSize", "()J") != 0) {
  1454. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1455. "FileStatus::getBlockSize");
  1456. return -1;
  1457. }
  1458. fileInfo->mBlockSize = jVal.j;
  1459. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1460. HADOOP_STAT, "getModificationTime", "()J") != 0) {
  1461. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1462. "FileStatus::getModificationTime");
  1463. return -1;
  1464. }
  1465. fileInfo->mLastMod = (tTime) (jVal.j / 1000);
  1466. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1467. HADOOP_STAT, "getAccessTime", "()J") != 0) {
  1468. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1469. "FileStatus::getAccessTime");
  1470. return -1;
  1471. }
  1472. fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
  1473. if (fileInfo->mKind == kObjectKindFile) {
  1474. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1475. HADOOP_STAT, "getLen", "()J") != 0) {
  1476. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1477. "FileStatus::getLen");
  1478. return -1;
  1479. }
  1480. fileInfo->mSize = jVal.j;
  1481. }
  1482. jobject jPath;
  1483. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1484. "getPath", "()Lorg/apache/hadoop/fs/Path;") ||
  1485. jVal.l == NULL) {
  1486. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1487. "Path::getPath");
  1488. return -1;
  1489. }
  1490. jPath = jVal.l;
  1491. jstring jPathName;
  1492. const char *cPathName;
  1493. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, HADOOP_PATH,
  1494. "toString", "()Ljava/lang/String;")) {
  1495. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1496. "Path::toString");
  1497. destroyLocalReference(env, jPath);
  1498. return -1;
  1499. }
  1500. jPathName = jVal.l;
  1501. cPathName = (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
  1502. fileInfo->mName = strdup(cPathName);
  1503. (*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
  1504. destroyLocalReference(env, jPath);
  1505. destroyLocalReference(env, jPathName);
  1506. jstring jUserName;
  1507. const char* cUserName;
  1508. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1509. "getOwner", "()Ljava/lang/String;")) {
  1510. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1511. "FileStatus::getOwner failed!\n");
  1512. errno = EINTERNAL;
  1513. return -1;
  1514. }
  1515. jUserName = jVal.l;
  1516. cUserName = (const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
  1517. fileInfo->mOwner = strdup(cUserName);
  1518. (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
  1519. destroyLocalReference(env, jUserName);
  1520. jstring jGroupName;
  1521. const char* cGroupName;
  1522. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1523. "getGroup", "()Ljava/lang/String;")) {
  1524. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1525. "FileStatus::getGroup failed!\n");
  1526. errno = EINTERNAL;
  1527. return -1;
  1528. }
  1529. jGroupName = jVal.l;
  1530. cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName, NULL));
  1531. fileInfo->mGroup = strdup(cGroupName);
  1532. (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
  1533. destroyLocalReference(env, jGroupName);
  1534. jobject jPermission;
  1535. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1536. "getPermission", "()Lorg/apache/hadoop/fs/permission/FsPermission;") ||
  1537. jVal.l == NULL) {
  1538. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1539. "FileStatus::getPermission failed!\n");
  1540. errno = EINTERNAL;
  1541. return -1;
  1542. }
  1543. jPermission = jVal.l;
  1544. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPermission, HADOOP_FSPERM,
  1545. "toShort", "()S") != 0) {
  1546. fprintf(stderr, "Call to org.apache.hadoop.fs.permission."
  1547. "FsPermission::toShort failed!\n");
  1548. errno = EINTERNAL;
  1549. return -1;
  1550. }
  1551. fileInfo->mPermissions = jVal.s;
  1552. destroyLocalReference(env, jPermission);
  1553. return 0;
  1554. }
  1555. static int
  1556. getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo)
  1557. {
  1558. // JAVA EQUIVALENT:
  1559. // fs.isDirectory(f)
  1560. // fs.getModificationTime()
  1561. // fs.getAccessTime()
  1562. // fs.getLength(f)
  1563. // f.getPath()
  1564. // f.getOwner()
  1565. // f.getGroup()
  1566. // f.getPermission().toShort()
  1567. jobject jStat;
  1568. jvalue jVal;
  1569. jthrowable jExc = NULL;
  1570. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1571. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  1572. jPath) != 0) {
  1573. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1574. "FileSystem::exists");
  1575. return -1;
  1576. }
  1577. if (jVal.z == 0) {
  1578. errno = ENOENT;
  1579. return -1;
  1580. }
  1581. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1582. "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)),
  1583. jPath) != 0) {
  1584. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1585. "FileSystem::getFileStatus");
  1586. return -1;
  1587. }
  1588. jStat = jVal.l;
  1589. int ret = getFileInfoFromStat(env, jStat, fileInfo);
  1590. destroyLocalReference(env, jStat);
  1591. return ret;
  1592. }
  1593. hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
  1594. {
  1595. // JAVA EQUIVALENT:
  1596. // Path p(path);
  1597. // Path []pathList = fs.listPaths(p)
  1598. // foreach path in pathList
  1599. // getFileInfo(path)
  1600. //Get the JNIEnv* corresponding to current thread
  1601. JNIEnv* env = getJNIEnv();
  1602. if (env == NULL) {
  1603. errno = EINTERNAL;
  1604. return NULL;
  1605. }
  1606. jobject jFS = (jobject)fs;
  1607. //Create an object of org.apache.hadoop.fs.Path
  1608. jobject jPath = constructNewObjectOfPath(env, path);
  1609. if (jPath == NULL) {
  1610. return NULL;
  1611. }
  1612. hdfsFileInfo *pathList = 0;
  1613. jobjectArray jPathList = NULL;
  1614. jvalue jVal;
  1615. jthrowable jExc = NULL;
  1616. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, "listStatus",
  1617. JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
  1618. jPath) != 0) {
  1619. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1620. "FileSystem::listStatus");
  1621. destroyLocalReference(env, jPath);
  1622. return NULL;
  1623. }
  1624. jPathList = jVal.l;
  1625. //Figure out no of entries in that directory
  1626. jsize jPathListSize = (*env)->GetArrayLength(env, jPathList);
  1627. *numEntries = jPathListSize;
  1628. if (jPathListSize == 0) {
  1629. errno = 0;
  1630. goto done;
  1631. }
  1632. //Allocate memory
  1633. pathList = calloc(jPathListSize, sizeof(hdfsFileInfo));
  1634. if (pathList == NULL) {
  1635. errno = ENOMEM;
  1636. goto done;
  1637. }
  1638. //Save path information in pathList
  1639. jsize i;
  1640. jobject tmpStat;
  1641. for (i=0; i < jPathListSize; ++i) {
  1642. tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
  1643. if (getFileInfoFromStat(env, tmpStat, &pathList[i])) {
  1644. hdfsFreeFileInfo(pathList, jPathListSize);
  1645. destroyLocalReference(env, tmpStat);
  1646. pathList = NULL;
  1647. goto done;
  1648. }
  1649. destroyLocalReference(env, tmpStat);
  1650. }
  1651. done:
  1652. //Delete unnecessary local references
  1653. destroyLocalReference(env, jPath);
  1654. destroyLocalReference(env, jPathList);
  1655. return pathList;
  1656. }
  1657. hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
  1658. {
  1659. // JAVA EQUIVALENT:
  1660. // File f(path);
  1661. // fs.isDirectory(f)
  1662. // fs.lastModified() ??
  1663. // fs.getLength(f)
  1664. // f.getPath()
  1665. //Get the JNIEnv* corresponding to current thread
  1666. JNIEnv* env = getJNIEnv();
  1667. if (env == NULL) {
  1668. errno = EINTERNAL;
  1669. return NULL;
  1670. }
  1671. jobject jFS = (jobject)fs;
  1672. //Create an object of org.apache.hadoop.fs.Path
  1673. jobject jPath = constructNewObjectOfPath(env, path);
  1674. if (jPath == NULL) {
  1675. return NULL;
  1676. }
  1677. hdfsFileInfo *fileInfo = calloc(1, sizeof(hdfsFileInfo));
  1678. if (getFileInfo(env, jFS, jPath, fileInfo)) {
  1679. hdfsFreeFileInfo(fileInfo, 1);
  1680. fileInfo = NULL;
  1681. goto done;
  1682. }
  1683. done:
  1684. //Delete unnecessary local references
  1685. destroyLocalReference(env, jPath);
  1686. return fileInfo;
  1687. }
  1688. void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
  1689. {
  1690. //Free the mName
  1691. int i;
  1692. for (i=0; i < numEntries; ++i) {
  1693. if (hdfsFileInfo[i].mName) {
  1694. free(hdfsFileInfo[i].mName);
  1695. }
  1696. }
  1697. //Free entire block
  1698. free(hdfsFileInfo);
  1699. }
  1700. /**
  1701. * vim: ts=4: sw=4: et:
  1702. */