hdfs.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "hdfs.h"
  19. #include "hdfsJniHelper.h"
  20. /* Some frequently used Java paths */
  21. #define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
  22. #define HADOOP_PATH "org/apache/hadoop/fs/Path"
  23. #define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
  24. #define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
  25. #define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
  26. #define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
  27. #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
  28. #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
  29. #define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
  30. #define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
  31. #define HADOOP_UNIX_USER_GROUP_INFO "org/apache/hadoop/security/UnixUserGroupInformation"
  32. #define HADOOP_USER_GROUP_INFO "org/apache/hadoop/security/UserGroupInformation"
  33. #define JAVA_NET_ISA "java/net/InetSocketAddress"
  34. #define JAVA_NET_URI "java/net/URI"
  35. #define JAVA_STRING "java/lang/String"
  36. #define JAVA_VOID "V"
  37. /* Macros for constructing method signatures */
  38. #define JPARAM(X) "L" X ";"
  39. #define JARRPARAM(X) "[L" X ";"
  40. #define JMETHOD1(X, R) "(" X ")" R
  41. #define JMETHOD2(X, Y, R) "(" X Y ")" R
  42. #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
  43. /**
  44. * hdfsJniEnv: A wrapper struct to be used as 'value'
  45. * while saving thread -> JNIEnv* mappings
  46. */
  47. typedef struct
  48. {
  49. JNIEnv* env;
  50. } hdfsJniEnv;
  51. /**
  52. * Helper function to destroy a local reference of java.lang.Object
  53. * @param env: The JNIEnv pointer.
  54. * @param jFile: The local reference of java.lang.Object object
  55. * @return None.
  56. */
  57. static void destroyLocalReference(JNIEnv *env, jobject jObject)
  58. {
  59. if (jObject)
  60. (*env)->DeleteLocalRef(env, jObject);
  61. }
  62. /**
  63. * Helper function to create a org.apache.hadoop.fs.Path object.
  64. * @param env: The JNIEnv pointer.
  65. * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
  66. * object.
  67. * @return Returns a jobject on success and NULL on error.
  68. */
  69. static jobject constructNewObjectOfPath(JNIEnv *env, const char *path)
  70. {
  71. //Construct a java.lang.String object
  72. jstring jPathString = (*env)->NewStringUTF(env, path);
  73. //Construct the org.apache.hadoop.fs.Path object
  74. jobject jPath =
  75. constructNewObjectOfClass(env, NULL, "org/apache/hadoop/fs/Path",
  76. "(Ljava/lang/String;)V", jPathString);
  77. if (jPath == NULL) {
  78. fprintf(stderr, "Can't construct instance of class "
  79. "org.apache.hadoop.fs.Path for %s\n", path);
  80. errno = EINTERNAL;
  81. return NULL;
  82. }
  83. // Destroy the local reference to the java.lang.String object
  84. destroyLocalReference(env, jPathString);
  85. return jPath;
  86. }
  87. /**
  88. * Helper function to translate an exception into a meaningful errno value.
  89. * @param exc: The exception.
  90. * @param env: The JNIEnv Pointer.
  91. * @param method: The name of the method that threw the exception. This
  92. * may be format string to be used in conjuction with additional arguments.
  93. * @return Returns a meaningful errno value if possible, or EINTERNAL if not.
  94. */
  95. static int errnoFromException(jthrowable exc, JNIEnv *env,
  96. const char *method, ...)
  97. {
  98. va_list ap;
  99. int errnum = 0;
  100. char *excClass = NULL;
  101. if (exc == NULL)
  102. goto default_error;
  103. if ((excClass = classNameOfObject((jobject) exc, env)) == NULL) {
  104. errnum = EINTERNAL;
  105. goto done;
  106. }
  107. if (!strcmp(excClass, "org.apache.hadoop.fs.permission."
  108. "AccessControlException")) {
  109. errnum = EACCES;
  110. goto done;
  111. }
  112. //TODO: interpret more exceptions; maybe examine exc.getMessage()
  113. default_error:
  114. //Can't tell what went wrong, so just punt
  115. (*env)->ExceptionDescribe(env);
  116. fprintf(stderr, "Call to ");
  117. va_start(ap, method);
  118. vfprintf(stderr, method, ap);
  119. va_end(ap);
  120. fprintf(stderr, " failed!\n");
  121. errnum = EINTERNAL;
  122. done:
  123. (*env)->ExceptionClear(env);
  124. if (excClass != NULL)
  125. free(excClass);
  126. return errnum;
  127. }
  128. hdfsFS hdfsConnect(const char* host, tPort port) {
  129. // conect with NULL as user name/groups
  130. return hdfsConnectAsUser(host, port, NULL, NULL, 0);
  131. }
  132. hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user , const char **groups, int groups_size )
  133. {
  134. // JAVA EQUIVALENT:
  135. // FileSystem fs = FileSystem.get(new Configuration());
  136. // return fs;
  137. JNIEnv *env = 0;
  138. jobject jConfiguration = NULL;
  139. jobject jFS = NULL;
  140. jobject jURI = NULL;
  141. jstring jURIString = NULL;
  142. jvalue jVal;
  143. jthrowable jExc = NULL;
  144. char *cURI = 0;
  145. jobject gFsRef = NULL;
  146. //Get the JNIEnv* corresponding to current thread
  147. env = getJNIEnv();
  148. if (env == NULL) {
  149. errno = EINTERNAL;
  150. return NULL;
  151. }
  152. //Create the org.apache.hadoop.conf.Configuration object
  153. jConfiguration =
  154. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  155. if (jConfiguration == NULL) {
  156. fprintf(stderr, "Can't construct instance of class "
  157. "org.apache.hadoop.conf.Configuration\n");
  158. errno = EINTERNAL;
  159. return NULL;
  160. }
  161. if (user != NULL) {
  162. if (groups == NULL || groups_size <= 0) {
  163. fprintf(stderr, "ERROR: groups must not be empty/null\n");
  164. errno = EINVAL;
  165. return NULL;
  166. }
  167. jstring jUserString = (*env)->NewStringUTF(env, user);
  168. jarray jGroups = constructNewArrayString(env, &jExc, groups, groups_size);
  169. if (jGroups == NULL) {
  170. errno = EINTERNAL;
  171. fprintf(stderr, "ERROR: could not construct groups array\n");
  172. return NULL;
  173. }
  174. jobject jUgi;
  175. if ((jUgi = constructNewObjectOfClass(env, &jExc, HADOOP_UNIX_USER_GROUP_INFO, JMETHOD2(JPARAM(JAVA_STRING), JARRPARAM(JAVA_STRING), JAVA_VOID), jUserString, jGroups)) == NULL) {
  176. fprintf(stderr,"failed to construct hadoop user unix group info object\n");
  177. errno = errnoFromException(jExc, env, HADOOP_UNIX_USER_GROUP_INFO,
  178. "init");
  179. destroyLocalReference(env, jConfiguration);
  180. destroyLocalReference(env, jUserString);
  181. if (jGroups != NULL) {
  182. destroyLocalReference(env, jGroups);
  183. }
  184. return NULL;
  185. }
  186. #define USE_UUGI
  187. #ifdef USE_UUGI
  188. // UnixUserGroupInformation.UGI_PROPERTY_NAME
  189. jstring jAttrString = (*env)->NewStringUTF(env,"hadoop.job.ugi");
  190. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_UNIX_USER_GROUP_INFO, "saveToConf",
  191. JMETHOD3(JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING), JPARAM(HADOOP_UNIX_USER_GROUP_INFO), JAVA_VOID),
  192. jConfiguration, jAttrString, jUgi) != 0) {
  193. errno = errnoFromException(jExc, env, HADOOP_FSPERM,
  194. "init");
  195. destroyLocalReference(env, jConfiguration);
  196. destroyLocalReference(env, jUserString);
  197. if (jGroups != NULL) {
  198. destroyLocalReference(env, jGroups);
  199. }
  200. destroyLocalReference(env, jUgi);
  201. return NULL;
  202. }
  203. destroyLocalReference(env, jUserString);
  204. destroyLocalReference(env, jGroups);
  205. destroyLocalReference(env, jUgi);
  206. }
  207. #else
  208. // what does "current" mean in the context of libhdfs ? does it mean for the last hdfs connection we used?
  209. // that's why this code cannot be activated. We know the above use of the conf object should work well with
  210. // multiple connections.
  211. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_USER_GROUP_INFO, "setCurrentUGI",
  212. JMETHOD1(JPARAM(HADOOP_USER_GROUP_INFO), JAVA_VOID),
  213. jUgi) != 0) {
  214. errno = errnoFromException(jExc, env, HADOOP_USER_GROUP_INFO,
  215. "setCurrentUGI");
  216. destroyLocalReference(env, jConfiguration);
  217. destroyLocalReference(env, jUserString);
  218. if (jGroups != NULL) {
  219. destroyLocalReference(env, jGroups);
  220. }
  221. destroyLocalReference(env, jUgi);
  222. return NULL;
  223. }
  224. destroyLocalReference(env, jUserString);
  225. destroyLocalReference(env, jGroups);
  226. destroyLocalReference(env, jUgi);
  227. }
  228. #endif
  229. //Check what type of FileSystem the caller wants...
  230. if (host == NULL) {
  231. // fs = FileSytem::getLocal(conf);
  232. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "getLocal",
  233. JMETHOD1(JPARAM(HADOOP_CONF),
  234. JPARAM(HADOOP_LOCALFS)),
  235. jConfiguration) != 0) {
  236. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  237. "FileSystem::getLocal");
  238. goto done;
  239. }
  240. jFS = jVal.l;
  241. }
  242. else if (!strcmp(host, "default") && port == 0) {
  243. //fs = FileSystem::get(conf);
  244. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  245. HADOOP_FS, "get",
  246. JMETHOD1(JPARAM(HADOOP_CONF),
  247. JPARAM(HADOOP_FS)),
  248. jConfiguration) != 0) {
  249. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  250. "FileSystem::get");
  251. goto done;
  252. }
  253. jFS = jVal.l;
  254. }
  255. else {
  256. // fs = FileSystem::get(URI, conf);
  257. cURI = malloc(strlen(host)+16);
  258. sprintf(cURI, "hdfs://%s:%d", host, (int)(port));
  259. jURIString = (*env)->NewStringUTF(env, cURI);
  260. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI,
  261. "create", "(Ljava/lang/String;)Ljava/net/URI;",
  262. jURIString) != 0) {
  263. errno = errnoFromException(jExc, env, "java.net.URI::create");
  264. goto done;
  265. }
  266. jURI = jVal.l;
  267. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
  268. JMETHOD2(JPARAM(JAVA_NET_URI),
  269. JPARAM(HADOOP_CONF), JPARAM(HADOOP_FS)),
  270. jURI, jConfiguration) != 0) {
  271. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  272. "Filesystem::get(URI, Configuration)");
  273. goto done;
  274. }
  275. jFS = jVal.l;
  276. }
  277. done:
  278. // Release unnecessary local references
  279. destroyLocalReference(env, jConfiguration);
  280. destroyLocalReference(env, jURIString);
  281. destroyLocalReference(env, jURI);
  282. if (cURI) free(cURI);
  283. /* Create a global reference for this fs */
  284. if (jFS) {
  285. gFsRef = (*env)->NewGlobalRef(env, jFS);
  286. destroyLocalReference(env, jFS);
  287. }
  288. return gFsRef;
  289. }
  290. int hdfsDisconnect(hdfsFS fs)
  291. {
  292. // JAVA EQUIVALENT:
  293. // fs.close()
  294. //Get the JNIEnv* corresponding to current thread
  295. JNIEnv* env = getJNIEnv();
  296. if (env == NULL) {
  297. errno = EINTERNAL;
  298. return -2;
  299. }
  300. //Parameters
  301. jobject jFS = (jobject)fs;
  302. //Caught exception
  303. jthrowable jExc = NULL;
  304. //Sanity check
  305. if (fs == NULL) {
  306. errno = EBADF;
  307. return -1;
  308. }
  309. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  310. "close", "()V") != 0) {
  311. errno = errnoFromException(jExc, env, "Filesystem::close");
  312. return -1;
  313. }
  314. //Release unnecessary references
  315. (*env)->DeleteGlobalRef(env, fs);
  316. return 0;
  317. }
  318. hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
  319. int bufferSize, short replication, tSize blockSize)
  320. {
  321. /*
  322. JAVA EQUIVALENT:
  323. File f = new File(path);
  324. FSData{Input|Output}Stream f{is|os} = fs.create(f);
  325. return f{is|os};
  326. */
  327. /* Get the JNIEnv* corresponding to current thread */
  328. JNIEnv* env = getJNIEnv();
  329. if (env == NULL) {
  330. errno = EINTERNAL;
  331. return NULL;
  332. }
  333. jobject jFS = (jobject)fs;
  334. if (flags & O_RDWR) {
  335. fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
  336. errno = ENOTSUP;
  337. return NULL;
  338. }
  339. /* The hadoop java api/signature */
  340. const char* method = ((flags & O_WRONLY) == 0) ? "open" : "create";
  341. const char* signature = ((flags & O_WRONLY) == 0) ?
  342. JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
  343. JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
  344. /* Return value */
  345. hdfsFile file = NULL;
  346. /* Create an object of org.apache.hadoop.fs.Path */
  347. jobject jPath = constructNewObjectOfPath(env, path);
  348. if (jPath == NULL) {
  349. return NULL;
  350. }
  351. /* Get the Configuration object from the FileSystem object */
  352. jvalue jVal;
  353. jobject jConfiguration = NULL;
  354. jthrowable jExc = NULL;
  355. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  356. "getConf", JMETHOD1("", JPARAM(HADOOP_CONF))) != 0) {
  357. errno = errnoFromException(jExc, env, "get configuration object "
  358. "from filesystem");
  359. destroyLocalReference(env, jPath);
  360. return NULL;
  361. }
  362. jConfiguration = jVal.l;
  363. jint jBufferSize = bufferSize;
  364. jshort jReplication = replication;
  365. jlong jBlockSize = blockSize;
  366. jstring jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
  367. jstring jStrReplication = (*env)->NewStringUTF(env, "dfs.replication");
  368. jstring jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
  369. //bufferSize
  370. if (!bufferSize) {
  371. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  372. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  373. jStrBufferSize, 4096) != 0) {
  374. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  375. "Configuration::getInt");
  376. goto done;
  377. }
  378. jBufferSize = jVal.i;
  379. }
  380. if (flags & O_WRONLY) {
  381. //replication
  382. if (!replication) {
  383. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  384. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  385. jStrReplication, 1) != 0) {
  386. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  387. "Configuration::getInt");
  388. goto done;
  389. }
  390. jReplication = jVal.i;
  391. }
  392. //blockSize
  393. if (!blockSize) {
  394. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  395. HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
  396. jStrBlockSize, 67108864)) {
  397. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  398. "FileSystem::%s(%s)", method,
  399. signature);
  400. goto done;
  401. }
  402. jBlockSize = jVal.j;
  403. }
  404. }
  405. /* Create and return either the FSDataInputStream or
  406. FSDataOutputStream references jobject jStream */
  407. if ((flags & O_WRONLY) == 0) {
  408. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  409. method, signature, jPath, jBufferSize)) {
  410. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  411. "FileSystem::%s(%s)", method,
  412. signature);
  413. goto done;
  414. }
  415. }
  416. else {
  417. jboolean jOverWrite = 1;
  418. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  419. method, signature, jPath, jOverWrite,
  420. jBufferSize, jReplication, jBlockSize)) {
  421. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  422. "FileSystem::%s(%s)", method,
  423. signature);
  424. goto done;
  425. }
  426. }
  427. file = malloc(sizeof(struct hdfsFile_internal));
  428. if (!file) {
  429. errno = ENOMEM;
  430. return NULL;
  431. }
  432. file->file = (*env)->NewGlobalRef(env, jVal.l);
  433. file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
  434. destroyLocalReference(env, jVal.l);
  435. done:
  436. //Delete unnecessary local references
  437. destroyLocalReference(env, jStrBufferSize);
  438. destroyLocalReference(env, jStrReplication);
  439. destroyLocalReference(env, jStrBlockSize);
  440. destroyLocalReference(env, jConfiguration);
  441. destroyLocalReference(env, jPath);
  442. return file;
  443. }
  444. int hdfsCloseFile(hdfsFS fs, hdfsFile file)
  445. {
  446. // JAVA EQUIVALENT:
  447. // file.close
  448. //Get the JNIEnv* corresponding to current thread
  449. JNIEnv* env = getJNIEnv();
  450. if (env == NULL) {
  451. errno = EINTERNAL;
  452. return -2;
  453. }
  454. //Parameters
  455. jobject jStream = (jobject)(file ? file->file : NULL);
  456. //Caught exception
  457. jthrowable jExc = NULL;
  458. //Sanity check
  459. if (!file || file->type == UNINITIALIZED) {
  460. errno = EBADF;
  461. return -1;
  462. }
  463. //The interface whose 'close' method to be called
  464. const char* interface = (file->type == INPUT) ?
  465. HADOOP_ISTRM : HADOOP_OSTRM;
  466. if (invokeMethod(env, NULL, &jExc, INSTANCE, jStream, interface,
  467. "close", "()V") != 0) {
  468. errno = errnoFromException(jExc, env, "%s::close", interface);
  469. return -1;
  470. }
  471. //De-allocate memory
  472. free(file);
  473. (*env)->DeleteGlobalRef(env, jStream);
  474. return 0;
  475. }
  476. int hdfsExists(hdfsFS fs, const char *path)
  477. {
  478. JNIEnv *env = getJNIEnv();
  479. if (env == NULL) {
  480. errno = EINTERNAL;
  481. return -2;
  482. }
  483. jobject jPath = constructNewObjectOfPath(env, path);
  484. jvalue jVal;
  485. jthrowable jExc = NULL;
  486. jobject jFS = (jobject)fs;
  487. if (jPath == NULL) {
  488. return -1;
  489. }
  490. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  491. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  492. jPath) != 0) {
  493. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  494. "FileSystem::exists");
  495. return -1;
  496. }
  497. return jVal.z ? 0 : -1;
  498. }
  499. tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  500. {
  501. // JAVA EQUIVALENT:
  502. // byte [] bR = new byte[length];
  503. // fis.read(bR);
  504. //Get the JNIEnv* corresponding to current thread
  505. JNIEnv* env = getJNIEnv();
  506. if (env == NULL) {
  507. errno = EINTERNAL;
  508. return -1;
  509. }
  510. //Parameters
  511. jobject jInputStream = (jobject)(f ? f->file : NULL);
  512. jbyteArray jbRarray;
  513. jint noReadBytes = 0;
  514. jvalue jVal;
  515. jthrowable jExc = NULL;
  516. //Sanity check
  517. if (!f || f->type == UNINITIALIZED) {
  518. errno = EBADF;
  519. return -1;
  520. }
  521. //Error checking... make sure that this file is 'readable'
  522. if (f->type != INPUT) {
  523. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  524. errno = EINVAL;
  525. return -1;
  526. }
  527. //Read the requisite bytes
  528. jbRarray = (*env)->NewByteArray(env, length);
  529. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  530. "read", "([B)I", jbRarray) != 0) {
  531. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  532. "FSDataInputStream::read");
  533. noReadBytes = -1;
  534. }
  535. else {
  536. noReadBytes = jVal.i;
  537. if (noReadBytes > 0) {
  538. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  539. } else {
  540. //This is a valid case: there aren't any bytes left to read!
  541. noReadBytes = 0;
  542. }
  543. errno = 0;
  544. }
  545. destroyLocalReference(env, jbRarray);
  546. return noReadBytes;
  547. }
  548. tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
  549. void* buffer, tSize length)
  550. {
  551. // JAVA EQUIVALENT:
  552. // byte [] bR = new byte[length];
  553. // fis.read(pos, bR, 0, length);
  554. //Get the JNIEnv* corresponding to current thread
  555. JNIEnv* env = getJNIEnv();
  556. if (env == NULL) {
  557. errno = EINTERNAL;
  558. return -1;
  559. }
  560. //Parameters
  561. jobject jInputStream = (jobject)(f ? f->file : NULL);
  562. jbyteArray jbRarray;
  563. jint noReadBytes = 0;
  564. jvalue jVal;
  565. jthrowable jExc = NULL;
  566. //Sanity check
  567. if (!f || f->type == UNINITIALIZED) {
  568. errno = EBADF;
  569. return -1;
  570. }
  571. //Error checking... make sure that this file is 'readable'
  572. if (f->type != INPUT) {
  573. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  574. errno = EINVAL;
  575. return -1;
  576. }
  577. //Read the requisite bytes
  578. jbRarray = (*env)->NewByteArray(env, length);
  579. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  580. "read", "(J[BII)I", position, jbRarray, 0, length) != 0) {
  581. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  582. "FSDataInputStream::read");
  583. noReadBytes = -1;
  584. }
  585. else {
  586. noReadBytes = jVal.i;
  587. if (noReadBytes > 0) {
  588. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  589. } else {
  590. //This is a valid case: there aren't any bytes left to read!
  591. noReadBytes = 0;
  592. }
  593. errno = 0;
  594. }
  595. destroyLocalReference(env, jbRarray);
  596. return noReadBytes;
  597. }
  598. tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
  599. {
  600. // JAVA EQUIVALENT
  601. // byte b[] = str.getBytes();
  602. // fso.write(b);
  603. //Get the JNIEnv* corresponding to current thread
  604. JNIEnv* env = getJNIEnv();
  605. if (env == NULL) {
  606. errno = EINTERNAL;
  607. return -1;
  608. }
  609. //Parameters
  610. jobject jOutputStream = (jobject)(f ? f->file : 0);
  611. jbyteArray jbWarray;
  612. //Caught exception
  613. jthrowable jExc = NULL;
  614. //Sanity check
  615. if (!f || f->type == UNINITIALIZED) {
  616. errno = EBADF;
  617. return -1;
  618. }
  619. if (length < 0) {
  620. errno = EINVAL;
  621. return -1;
  622. }
  623. //Error checking... make sure that this file is 'writable'
  624. if (f->type != OUTPUT) {
  625. fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
  626. errno = EINVAL;
  627. return -1;
  628. }
  629. // 'length' equals 'zero' is a valid use-case according to Posix!
  630. if (length != 0) {
  631. //Write the requisite bytes into the file
  632. jbWarray = (*env)->NewByteArray(env, length);
  633. (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
  634. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  635. HADOOP_OSTRM, "write",
  636. "([B)V", jbWarray) != 0) {
  637. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  638. "FSDataOutputStream::write");
  639. length = -1;
  640. }
  641. destroyLocalReference(env, jbWarray);
  642. }
  643. //Return no. of bytes succesfully written (libc way)
  644. //i.e. 'length' itself! ;-)
  645. return length;
  646. }
  647. int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
  648. {
  649. // JAVA EQUIVALENT
  650. // fis.seek(pos);
  651. //Get the JNIEnv* corresponding to current thread
  652. JNIEnv* env = getJNIEnv();
  653. if (env == NULL) {
  654. errno = EINTERNAL;
  655. return -1;
  656. }
  657. //Parameters
  658. jobject jInputStream = (jobject)(f ? f->file : 0);
  659. //Caught exception
  660. jthrowable jExc = NULL;
  661. //Sanity check
  662. if (!f || f->type != INPUT) {
  663. errno = EBADF;
  664. return -1;
  665. }
  666. if (invokeMethod(env, NULL, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  667. "seek", "(J)V", desiredPos) != 0) {
  668. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  669. "FSDataInputStream::seek");
  670. return -1;
  671. }
  672. return 0;
  673. }
  674. tOffset hdfsTell(hdfsFS fs, hdfsFile f)
  675. {
  676. // JAVA EQUIVALENT
  677. // pos = f.getPos();
  678. //Get the JNIEnv* corresponding to current thread
  679. JNIEnv* env = getJNIEnv();
  680. if (env == NULL) {
  681. errno = EINTERNAL;
  682. return -1;
  683. }
  684. //Parameters
  685. jobject jStream = (jobject)(f ? f->file : 0);
  686. //Sanity check
  687. if (!f || f->type == UNINITIALIZED) {
  688. errno = EBADF;
  689. return -1;
  690. }
  691. const char* interface = (f->type == INPUT) ?
  692. HADOOP_ISTRM : HADOOP_OSTRM;
  693. jlong currentPos = -1;
  694. jvalue jVal;
  695. jthrowable jExc = NULL;
  696. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStream,
  697. interface, "getPos", "()J") != 0) {
  698. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  699. "FSDataInputStream::getPos");
  700. return -1;
  701. }
  702. currentPos = jVal.j;
  703. return (tOffset)currentPos;
  704. }
  705. int hdfsFlush(hdfsFS fs, hdfsFile f)
  706. {
  707. // JAVA EQUIVALENT
  708. // fos.flush();
  709. //Get the JNIEnv* corresponding to current thread
  710. JNIEnv* env = getJNIEnv();
  711. if (env == NULL) {
  712. errno = EINTERNAL;
  713. return -1;
  714. }
  715. //Parameters
  716. jobject jOutputStream = (jobject)(f ? f->file : 0);
  717. //Caught exception
  718. jthrowable jExc = NULL;
  719. //Sanity check
  720. if (!f || f->type != OUTPUT) {
  721. errno = EBADF;
  722. return -1;
  723. }
  724. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  725. HADOOP_OSTRM, "flush", "()V") != 0) {
  726. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  727. "FSDataInputStream::flush");
  728. return -1;
  729. }
  730. return 0;
  731. }
  732. int hdfsAvailable(hdfsFS fs, hdfsFile f)
  733. {
  734. // JAVA EQUIVALENT
  735. // fis.available();
  736. //Get the JNIEnv* corresponding to current thread
  737. JNIEnv* env = getJNIEnv();
  738. if (env == NULL) {
  739. errno = EINTERNAL;
  740. return -1;
  741. }
  742. //Parameters
  743. jobject jInputStream = (jobject)(f ? f->file : 0);
  744. //Caught exception
  745. jthrowable jExc = NULL;
  746. //Sanity check
  747. if (!f || f->type != INPUT) {
  748. errno = EBADF;
  749. return -1;
  750. }
  751. jint available = -1;
  752. jvalue jVal;
  753. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
  754. HADOOP_ISTRM, "available", "()I") != 0) {
  755. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  756. "FSDataInputStream::available");
  757. return -1;
  758. }
  759. available = jVal.i;
  760. return available;
  761. }
  762. int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  763. {
  764. //JAVA EQUIVALENT
  765. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  766. // deleteSource = false, conf)
  767. //Get the JNIEnv* corresponding to current thread
  768. JNIEnv* env = getJNIEnv();
  769. if (env == NULL) {
  770. errno = EINTERNAL;
  771. return -1;
  772. }
  773. //Parameters
  774. jobject jSrcFS = (jobject)srcFS;
  775. jobject jDstFS = (jobject)dstFS;
  776. jobject jSrcPath = NULL;
  777. jobject jDstPath = NULL;
  778. jSrcPath = constructNewObjectOfPath(env, src);
  779. if (jSrcPath == NULL) {
  780. return -1;
  781. }
  782. jDstPath = constructNewObjectOfPath(env, dst);
  783. if (jDstPath == NULL) {
  784. destroyLocalReference(env, jSrcPath);
  785. return -1;
  786. }
  787. int retval = 0;
  788. //Create the org.apache.hadoop.conf.Configuration object
  789. jobject jConfiguration =
  790. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  791. if (jConfiguration == NULL) {
  792. fprintf(stderr, "Can't construct instance of class "
  793. "org.apache.hadoop.conf.Configuration\n");
  794. errno = EINTERNAL;
  795. destroyLocalReference(env, jSrcPath);
  796. destroyLocalReference(env, jDstPath);
  797. return -1;
  798. }
  799. //FileUtil::copy
  800. jboolean deleteSource = 0; //Only copy
  801. jvalue jVal;
  802. jthrowable jExc = NULL;
  803. if (invokeMethod(env, &jVal, &jExc, STATIC,
  804. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  805. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  806. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  807. jConfiguration) != 0) {
  808. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  809. "FileUtil::copy");
  810. retval = -1;
  811. goto done;
  812. }
  813. done:
  814. //Delete unnecessary local references
  815. destroyLocalReference(env, jConfiguration);
  816. destroyLocalReference(env, jSrcPath);
  817. destroyLocalReference(env, jDstPath);
  818. return retval;
  819. }
  820. int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  821. {
  822. //JAVA EQUIVALENT
  823. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  824. // deleteSource = true, conf)
  825. //Get the JNIEnv* corresponding to current thread
  826. JNIEnv* env = getJNIEnv();
  827. if (env == NULL) {
  828. errno = EINTERNAL;
  829. return -1;
  830. }
  831. //Parameters
  832. jobject jSrcFS = (jobject)srcFS;
  833. jobject jDstFS = (jobject)dstFS;
  834. jobject jSrcPath = NULL;
  835. jobject jDstPath = NULL;
  836. jSrcPath = constructNewObjectOfPath(env, src);
  837. if (jSrcPath == NULL) {
  838. return -1;
  839. }
  840. jDstPath = constructNewObjectOfPath(env, dst);
  841. if (jDstPath == NULL) {
  842. destroyLocalReference(env, jSrcPath);
  843. return -1;
  844. }
  845. int retval = 0;
  846. //Create the org.apache.hadoop.conf.Configuration object
  847. jobject jConfiguration =
  848. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  849. if (jConfiguration == NULL) {
  850. fprintf(stderr, "Can't construct instance of class "
  851. "org.apache.hadoop.conf.Configuration\n");
  852. errno = EINTERNAL;
  853. destroyLocalReference(env, jSrcPath);
  854. destroyLocalReference(env, jDstPath);
  855. return -1;
  856. }
  857. //FileUtil::copy
  858. jboolean deleteSource = 1; //Delete src after copy
  859. jvalue jVal;
  860. jthrowable jExc = NULL;
  861. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  862. "org/apache/hadoop/fs/FileUtil", "copy",
  863. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  864. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  865. jConfiguration) != 0) {
  866. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  867. "FileUtil::copy(move)");
  868. retval = -1;
  869. goto done;
  870. }
  871. done:
  872. //Delete unnecessary local references
  873. destroyLocalReference(env, jConfiguration);
  874. destroyLocalReference(env, jSrcPath);
  875. destroyLocalReference(env, jDstPath);
  876. return retval;
  877. }
  878. int hdfsDelete(hdfsFS fs, const char* path)
  879. {
  880. // JAVA EQUIVALENT:
  881. // File f = new File(path);
  882. // bool retval = fs.delete(f);
  883. //Get the JNIEnv* corresponding to current thread
  884. JNIEnv* env = getJNIEnv();
  885. if (env == NULL) {
  886. errno = EINTERNAL;
  887. return -1;
  888. }
  889. jobject jFS = (jobject)fs;
  890. //Create an object of java.io.File
  891. jobject jPath = constructNewObjectOfPath(env, path);
  892. if (jPath == NULL) {
  893. return -1;
  894. }
  895. //Delete the file
  896. jvalue jVal;
  897. jthrowable jExc = NULL;
  898. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  899. "delete", "(Lorg/apache/hadoop/fs/Path;)Z",
  900. jPath) != 0) {
  901. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  902. "FileSystem::delete");
  903. return -1;
  904. }
  905. //Delete unnecessary local references
  906. destroyLocalReference(env, jPath);
  907. return (jVal.z) ? 0 : -1;
  908. }
  909. int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
  910. {
  911. // JAVA EQUIVALENT:
  912. // Path old = new Path(oldPath);
  913. // Path new = new Path(newPath);
  914. // fs.rename(old, new);
  915. //Get the JNIEnv* corresponding to current thread
  916. JNIEnv* env = getJNIEnv();
  917. if (env == NULL) {
  918. errno = EINTERNAL;
  919. return -1;
  920. }
  921. jobject jFS = (jobject)fs;
  922. //Create objects of org.apache.hadoop.fs.Path
  923. jobject jOldPath = NULL;
  924. jobject jNewPath = NULL;
  925. jOldPath = constructNewObjectOfPath(env, oldPath);
  926. if (jOldPath == NULL) {
  927. return -1;
  928. }
  929. jNewPath = constructNewObjectOfPath(env, newPath);
  930. if (jNewPath == NULL) {
  931. destroyLocalReference(env, jOldPath);
  932. return -1;
  933. }
  934. //Rename the file
  935. jvalue jVal;
  936. jthrowable jExc = NULL;
  937. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, "rename",
  938. JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
  939. jOldPath, jNewPath) != 0) {
  940. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  941. "FileSystem::rename");
  942. return -1;
  943. }
  944. //Delete unnecessary local references
  945. destroyLocalReference(env, jOldPath);
  946. destroyLocalReference(env, jNewPath);
  947. return (jVal.z) ? 0 : -1;
  948. }
  949. char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
  950. {
  951. // JAVA EQUIVALENT:
  952. // Path p = fs.getWorkingDirectory();
  953. // return p.toString()
  954. //Get the JNIEnv* corresponding to current thread
  955. JNIEnv* env = getJNIEnv();
  956. if (env == NULL) {
  957. errno = EINTERNAL;
  958. return NULL;
  959. }
  960. jobject jFS = (jobject)fs;
  961. jobject jPath = NULL;
  962. jvalue jVal;
  963. jthrowable jExc = NULL;
  964. //FileSystem::getWorkingDirectory()
  965. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  966. HADOOP_FS, "getWorkingDirectory",
  967. "()Lorg/apache/hadoop/fs/Path;") != 0 ||
  968. jVal.l == NULL) {
  969. errno = errnoFromException(jExc, env, "FileSystem::"
  970. "getWorkingDirectory");
  971. return NULL;
  972. }
  973. jPath = jVal.l;
  974. //Path::toString()
  975. jstring jPathString;
  976. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath,
  977. "org/apache/hadoop/fs/Path", "toString",
  978. "()Ljava/lang/String;") != 0) {
  979. errno = errnoFromException(jExc, env, "Path::toString");
  980. destroyLocalReference(env, jPath);
  981. return NULL;
  982. }
  983. jPathString = jVal.l;
  984. const char *jPathChars = (const char*)
  985. ((*env)->GetStringUTFChars(env, jPathString, NULL));
  986. //Copy to user-provided buffer
  987. strncpy(buffer, jPathChars, bufferSize);
  988. //Delete unnecessary local references
  989. (*env)->ReleaseStringUTFChars(env, jPathString, jPathChars);
  990. destroyLocalReference(env, jPathString);
  991. destroyLocalReference(env, jPath);
  992. return buffer;
  993. }
  994. int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
  995. {
  996. // JAVA EQUIVALENT:
  997. // fs.setWorkingDirectory(Path(path));
  998. //Get the JNIEnv* corresponding to current thread
  999. JNIEnv* env = getJNIEnv();
  1000. if (env == NULL) {
  1001. errno = EINTERNAL;
  1002. return -1;
  1003. }
  1004. jobject jFS = (jobject)fs;
  1005. int retval = 0;
  1006. jthrowable jExc = NULL;
  1007. //Create an object of org.apache.hadoop.fs.Path
  1008. jobject jPath = constructNewObjectOfPath(env, path);
  1009. if (jPath == NULL) {
  1010. return -1;
  1011. }
  1012. //FileSystem::setWorkingDirectory()
  1013. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1014. "setWorkingDirectory",
  1015. "(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) {
  1016. errno = errnoFromException(jExc, env, "FileSystem::"
  1017. "setWorkingDirectory");
  1018. retval = -1;
  1019. }
  1020. //Delete unnecessary local references
  1021. destroyLocalReference(env, jPath);
  1022. return retval;
  1023. }
  1024. int hdfsCreateDirectory(hdfsFS fs, const char* path)
  1025. {
  1026. // JAVA EQUIVALENT:
  1027. // fs.mkdirs(new Path(path));
  1028. //Get the JNIEnv* corresponding to current thread
  1029. JNIEnv* env = getJNIEnv();
  1030. if (env == NULL) {
  1031. errno = EINTERNAL;
  1032. return -1;
  1033. }
  1034. jobject jFS = (jobject)fs;
  1035. //Create an object of org.apache.hadoop.fs.Path
  1036. jobject jPath = constructNewObjectOfPath(env, path);
  1037. if (jPath == NULL) {
  1038. return -1;
  1039. }
  1040. //Create the directory
  1041. jvalue jVal;
  1042. jVal.z = 0;
  1043. jthrowable jExc = NULL;
  1044. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1045. "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
  1046. jPath) != 0) {
  1047. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1048. "FileSystem::mkdirs");
  1049. goto done;
  1050. }
  1051. done:
  1052. //Delete unnecessary local references
  1053. destroyLocalReference(env, jPath);
  1054. return (jVal.z) ? 0 : -1;
  1055. }
  1056. int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
  1057. {
  1058. // JAVA EQUIVALENT:
  1059. // fs.setReplication(new Path(path), replication);
  1060. //Get the JNIEnv* corresponding to current thread
  1061. JNIEnv* env = getJNIEnv();
  1062. if (env == NULL) {
  1063. errno = EINTERNAL;
  1064. return -1;
  1065. }
  1066. jobject jFS = (jobject)fs;
  1067. //Create an object of org.apache.hadoop.fs.Path
  1068. jobject jPath = constructNewObjectOfPath(env, path);
  1069. if (jPath == NULL) {
  1070. return -1;
  1071. }
  1072. //Create the directory
  1073. jvalue jVal;
  1074. jthrowable jExc = NULL;
  1075. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1076. "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
  1077. jPath, replication) != 0) {
  1078. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1079. "FileSystem::setReplication");
  1080. goto done;
  1081. }
  1082. done:
  1083. //Delete unnecessary local references
  1084. destroyLocalReference(env, jPath);
  1085. return (jVal.z) ? 0 : -1;
  1086. }
  1087. int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
  1088. {
  1089. // JAVA EQUIVALENT:
  1090. // fs.setOwner(path, owner, group)
  1091. //Get the JNIEnv* corresponding to current thread
  1092. JNIEnv* env = getJNIEnv();
  1093. if (env == NULL) {
  1094. errno = EINTERNAL;
  1095. return -1;
  1096. }
  1097. if (owner == NULL && group == NULL) {
  1098. fprintf(stderr, "Both owner and group cannot be null in chown");
  1099. errno = EINVAL;
  1100. return -1;
  1101. }
  1102. jobject jFS = (jobject)fs;
  1103. jobject jPath = constructNewObjectOfPath(env, path);
  1104. if (jPath == NULL) {
  1105. return -1;
  1106. }
  1107. jstring jOwnerString = (*env)->NewStringUTF(env, owner);
  1108. jstring jGroupString = (*env)->NewStringUTF(env, group);
  1109. //Create the directory
  1110. int ret = 0;
  1111. jthrowable jExc = NULL;
  1112. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1113. "setOwner", JMETHOD3(JPARAM(HADOOP_PATH), JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
  1114. jPath, jOwnerString, jGroupString) != 0) {
  1115. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1116. "FileSystem::setOwner");
  1117. ret = -1;
  1118. goto done;
  1119. }
  1120. done:
  1121. destroyLocalReference(env, jPath);
  1122. destroyLocalReference(env, jOwnerString);
  1123. destroyLocalReference(env, jGroupString);
  1124. return ret;
  1125. }
  1126. int hdfsChmod(hdfsFS fs, const char* path, short mode)
  1127. {
  1128. // JAVA EQUIVALENT:
  1129. // fs.setPermission(path, FsPermission)
  1130. //Get the JNIEnv* corresponding to current thread
  1131. JNIEnv* env = getJNIEnv();
  1132. if (env == NULL) {
  1133. errno = EINTERNAL;
  1134. return -1;
  1135. }
  1136. jobject jFS = (jobject)fs;
  1137. // construct jPerm = FsPermission.createImmutable(short mode);
  1138. jshort jmode = mode;
  1139. jobject jPermObj =
  1140. constructNewObjectOfClass(env, NULL, HADOOP_FSPERM,"(S)V",jmode);
  1141. if (jPermObj == NULL) {
  1142. return -2;
  1143. }
  1144. //Create an object of org.apache.hadoop.fs.Path
  1145. jobject jPath = constructNewObjectOfPath(env, path);
  1146. if (jPath == NULL) {
  1147. return -3;
  1148. }
  1149. //Create the directory
  1150. int ret = 0;
  1151. jthrowable jExc = NULL;
  1152. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1153. "setPermission", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
  1154. jPath, jPermObj) != 0) {
  1155. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1156. "FileSystem::setPermission");
  1157. ret = -1;
  1158. goto done;
  1159. }
  1160. done:
  1161. destroyLocalReference(env, jPath);
  1162. destroyLocalReference(env, jPermObj);
  1163. return ret;
  1164. }
  1165. int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
  1166. {
  1167. // JAVA EQUIVALENT:
  1168. // fs.setTimes(src, mtime, atime)
  1169. //Get the JNIEnv* corresponding to current thread
  1170. JNIEnv* env = getJNIEnv();
  1171. if (env == NULL) {
  1172. errno = EINTERNAL;
  1173. return -1;
  1174. }
  1175. jobject jFS = (jobject)fs;
  1176. //Create an object of org.apache.hadoop.fs.Path
  1177. jobject jPath = constructNewObjectOfPath(env, path);
  1178. if (jPath == NULL) {
  1179. fprintf(stderr, "could not construct path object\n");
  1180. return -2;
  1181. }
  1182. jlong jmtime = mtime * (jlong)1000;
  1183. jlong jatime = atime * (jlong)1000;
  1184. int ret = 0;
  1185. jthrowable jExc = NULL;
  1186. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1187. "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
  1188. jPath, jmtime, jatime) != 0) {
  1189. fprintf(stderr, "call to setTime failed\n");
  1190. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1191. "FileSystem::setTimes");
  1192. ret = -1;
  1193. goto done;
  1194. }
  1195. done:
  1196. destroyLocalReference(env, jPath);
  1197. return ret;
  1198. }
  1199. char***
  1200. hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
  1201. {
  1202. // JAVA EQUIVALENT:
  1203. // fs.getFileBlockLoctions(new Path(path), start, length);
  1204. //Get the JNIEnv* corresponding to current thread
  1205. JNIEnv* env = getJNIEnv();
  1206. if (env == NULL) {
  1207. errno = EINTERNAL;
  1208. return NULL;
  1209. }
  1210. jobject jFS = (jobject)fs;
  1211. //Create an object of org.apache.hadoop.fs.Path
  1212. jobject jPath = constructNewObjectOfPath(env, path);
  1213. if (jPath == NULL) {
  1214. return NULL;
  1215. }
  1216. jvalue jFSVal;
  1217. jthrowable jFSExc = NULL;
  1218. if (invokeMethod(env, &jFSVal, &jFSExc, INSTANCE, jFS,
  1219. HADOOP_FS, "getFileStatus",
  1220. "(Lorg/apache/hadoop/fs/Path;)"
  1221. "Lorg/apache/hadoop/fs/FileStatus;",
  1222. jPath) != 0) {
  1223. errno = errnoFromException(jFSExc, env, "org.apache.hadoop.fs."
  1224. "FileSystem::getFileStatus");
  1225. destroyLocalReference(env, jPath);
  1226. return NULL;
  1227. }
  1228. jobject jFileStatus = jFSVal.l;
  1229. //org.apache.hadoop.fs.FileSystem::getFileBlockLocations
  1230. char*** blockHosts = NULL;
  1231. jobjectArray jBlockLocations;;
  1232. jvalue jVal;
  1233. jthrowable jExc = NULL;
  1234. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  1235. HADOOP_FS, "getFileBlockLocations",
  1236. "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
  1237. "[Lorg/apache/hadoop/fs/BlockLocation;",
  1238. jFileStatus, start, length) != 0) {
  1239. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1240. "FileSystem::getFileBlockLocations");
  1241. destroyLocalReference(env, jPath);
  1242. destroyLocalReference(env, jFileStatus);
  1243. return NULL;
  1244. }
  1245. jBlockLocations = jVal.l;
  1246. //Figure out no of entries in jBlockLocations
  1247. //Allocate memory and add NULL at the end
  1248. jsize jNumFileBlocks = (*env)->GetArrayLength(env, jBlockLocations);
  1249. blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1));
  1250. if (blockHosts == NULL) {
  1251. errno = ENOMEM;
  1252. goto done;
  1253. }
  1254. blockHosts[jNumFileBlocks] = NULL;
  1255. if (jNumFileBlocks == 0) {
  1256. errno = 0;
  1257. goto done;
  1258. }
  1259. //Now parse each block to get hostnames
  1260. int i = 0;
  1261. for (i=0; i < jNumFileBlocks; ++i) {
  1262. jobject jFileBlock =
  1263. (*env)->GetObjectArrayElement(env, jBlockLocations, i);
  1264. jvalue jVal;
  1265. jobjectArray jFileBlockHosts;
  1266. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
  1267. "getHosts", "()[Ljava/lang/String;") ||
  1268. jVal.l == NULL) {
  1269. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1270. "BlockLocation::getHosts");
  1271. destroyLocalReference(env, jPath);
  1272. destroyLocalReference(env, jFileStatus);
  1273. destroyLocalReference(env, jBlockLocations);
  1274. return NULL;
  1275. }
  1276. jFileBlockHosts = jVal.l;
  1277. //Figure out no of hosts in jFileBlockHosts
  1278. //Allocate memory and add NULL at the end
  1279. jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
  1280. blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1));
  1281. if (blockHosts[i] == NULL) {
  1282. int x = 0;
  1283. for (x=0; x < i; ++x) {
  1284. free(blockHosts[x]);
  1285. }
  1286. free(blockHosts);
  1287. errno = ENOMEM;
  1288. goto done;
  1289. }
  1290. blockHosts[i][jNumBlockHosts] = NULL;
  1291. //Now parse each hostname
  1292. int j = 0;
  1293. const char *hostName;
  1294. for (j=0; j < jNumBlockHosts; ++j) {
  1295. jstring jHost =
  1296. (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
  1297. hostName =
  1298. (const char*)((*env)->GetStringUTFChars(env, jHost, NULL));
  1299. blockHosts[i][j] = strdup(hostName);
  1300. (*env)->ReleaseStringUTFChars(env, jHost, hostName);
  1301. destroyLocalReference(env, jHost);
  1302. }
  1303. destroyLocalReference(env, jFileBlockHosts);
  1304. }
  1305. done:
  1306. //Delete unnecessary local references
  1307. destroyLocalReference(env, jPath);
  1308. destroyLocalReference(env, jFileStatus);
  1309. destroyLocalReference(env, jBlockLocations);
  1310. return blockHosts;
  1311. }
  1312. void hdfsFreeHosts(char ***blockHosts)
  1313. {
  1314. int i, j;
  1315. for (i=0; blockHosts[i]; i++) {
  1316. for (j=0; blockHosts[i][j]; j++) {
  1317. free(blockHosts[i][j]);
  1318. }
  1319. free(blockHosts[i]);
  1320. }
  1321. free(blockHosts);
  1322. }
  1323. tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
  1324. {
  1325. // JAVA EQUIVALENT:
  1326. // fs.getDefaultBlockSize();
  1327. //Get the JNIEnv* corresponding to current thread
  1328. JNIEnv* env = getJNIEnv();
  1329. if (env == NULL) {
  1330. errno = EINTERNAL;
  1331. return -1;
  1332. }
  1333. jobject jFS = (jobject)fs;
  1334. //FileSystem::getDefaultBlockSize()
  1335. tOffset blockSize = -1;
  1336. jvalue jVal;
  1337. jthrowable jExc = NULL;
  1338. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1339. "getDefaultBlockSize", "()J") != 0) {
  1340. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1341. "FileSystem::getDefaultBlockSize");
  1342. return -1;
  1343. }
  1344. blockSize = jVal.j;
  1345. return blockSize;
  1346. }
  1347. tOffset hdfsGetCapacity(hdfsFS fs)
  1348. {
  1349. // JAVA EQUIVALENT:
  1350. // fs.getRawCapacity();
  1351. //Get the JNIEnv* corresponding to current thread
  1352. JNIEnv* env = getJNIEnv();
  1353. if (env == NULL) {
  1354. errno = EINTERNAL;
  1355. return -1;
  1356. }
  1357. jobject jFS = (jobject)fs;
  1358. if (!((*env)->IsInstanceOf(env, jFS,
  1359. globalClassReference(HADOOP_DFS, env)))) {
  1360. fprintf(stderr, "hdfsGetCapacity works only on a "
  1361. "DistributedFileSystem!\n");
  1362. return -1;
  1363. }
  1364. //FileSystem::getRawCapacity()
  1365. jvalue jVal;
  1366. jthrowable jExc = NULL;
  1367. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
  1368. "getRawCapacity", "()J") != 0) {
  1369. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1370. "FileSystem::getRawCapacity");
  1371. return -1;
  1372. }
  1373. return jVal.j;
  1374. }
  1375. tOffset hdfsGetUsed(hdfsFS fs)
  1376. {
  1377. // JAVA EQUIVALENT:
  1378. // fs.getRawUsed();
  1379. //Get the JNIEnv* corresponding to current thread
  1380. JNIEnv* env = getJNIEnv();
  1381. if (env == NULL) {
  1382. errno = EINTERNAL;
  1383. return -1;
  1384. }
  1385. jobject jFS = (jobject)fs;
  1386. if (!((*env)->IsInstanceOf(env, jFS,
  1387. globalClassReference(HADOOP_DFS, env)))) {
  1388. fprintf(stderr, "hdfsGetUsed works only on a "
  1389. "DistributedFileSystem!\n");
  1390. return -1;
  1391. }
  1392. //FileSystem::getRawUsed()
  1393. jvalue jVal;
  1394. jthrowable jExc = NULL;
  1395. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
  1396. "getRawUsed", "()J") != 0) {
  1397. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1398. "FileSystem::getRawUsed");
  1399. return -1;
  1400. }
  1401. return jVal.j;
  1402. }
  1403. static int
  1404. getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
  1405. {
  1406. jvalue jVal;
  1407. jthrowable jExc = NULL;
  1408. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1409. HADOOP_STAT, "isDir", "()Z") != 0) {
  1410. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1411. "FileStatus::isDir");
  1412. return -1;
  1413. }
  1414. fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
  1415. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1416. HADOOP_STAT, "getReplication", "()S") != 0) {
  1417. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1418. "FileStatus::getReplication");
  1419. return -1;
  1420. }
  1421. fileInfo->mReplication = jVal.s;
  1422. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1423. HADOOP_STAT, "getBlockSize", "()J") != 0) {
  1424. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1425. "FileStatus::getBlockSize");
  1426. return -1;
  1427. }
  1428. fileInfo->mBlockSize = jVal.j;
  1429. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1430. HADOOP_STAT, "getModificationTime", "()J") != 0) {
  1431. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1432. "FileStatus::getModificationTime");
  1433. return -1;
  1434. }
  1435. fileInfo->mLastMod = (tTime) (jVal.j / 1000);
  1436. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1437. HADOOP_STAT, "getAccessTime", "()J") != 0) {
  1438. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1439. "FileStatus::getAccessTime");
  1440. return -1;
  1441. }
  1442. fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
  1443. if (fileInfo->mKind == kObjectKindFile) {
  1444. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1445. HADOOP_STAT, "getLen", "()J") != 0) {
  1446. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1447. "FileStatus::getLen");
  1448. return -1;
  1449. }
  1450. fileInfo->mSize = jVal.j;
  1451. }
  1452. jobject jPath;
  1453. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1454. "getPath", "()Lorg/apache/hadoop/fs/Path;") ||
  1455. jVal.l == NULL) {
  1456. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1457. "Path::getPath");
  1458. return -1;
  1459. }
  1460. jPath = jVal.l;
  1461. jstring jPathName;
  1462. const char *cPathName;
  1463. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, HADOOP_PATH,
  1464. "toString", "()Ljava/lang/String;")) {
  1465. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1466. "Path::toString");
  1467. destroyLocalReference(env, jPath);
  1468. return -1;
  1469. }
  1470. jPathName = jVal.l;
  1471. cPathName = (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
  1472. fileInfo->mName = strdup(cPathName);
  1473. (*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
  1474. destroyLocalReference(env, jPath);
  1475. destroyLocalReference(env, jPathName);
  1476. jstring jUserName;
  1477. const char* cUserName;
  1478. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1479. "getOwner", "()Ljava/lang/String;")) {
  1480. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1481. "FileStatus::getOwner failed!\n");
  1482. errno = EINTERNAL;
  1483. return -1;
  1484. }
  1485. jUserName = jVal.l;
  1486. cUserName = (const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
  1487. fileInfo->mOwner = strdup(cUserName);
  1488. (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
  1489. destroyLocalReference(env, jUserName);
  1490. jstring jGroupName;
  1491. const char* cGroupName;
  1492. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1493. "getGroup", "()Ljava/lang/String;")) {
  1494. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1495. "FileStatus::getGroup failed!\n");
  1496. errno = EINTERNAL;
  1497. return -1;
  1498. }
  1499. jGroupName = jVal.l;
  1500. cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName, NULL));
  1501. fileInfo->mGroup = strdup(cGroupName);
  1502. (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
  1503. destroyLocalReference(env, jGroupName);
  1504. jobject jPermission;
  1505. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1506. "getPermission", "()Lorg/apache/hadoop/fs/permission/FsPermission;") ||
  1507. jVal.l == NULL) {
  1508. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1509. "FileStatus::getPermission failed!\n");
  1510. errno = EINTERNAL;
  1511. return -1;
  1512. }
  1513. jPermission = jVal.l;
  1514. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPermission, HADOOP_FSPERM,
  1515. "toShort", "()S") != 0) {
  1516. fprintf(stderr, "Call to org.apache.hadoop.fs.permission."
  1517. "FsPermission::toShort failed!\n");
  1518. errno = EINTERNAL;
  1519. return -1;
  1520. }
  1521. fileInfo->mPermissions = jVal.s;
  1522. destroyLocalReference(env, jPermission);
  1523. return 0;
  1524. }
  1525. static int
  1526. getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo)
  1527. {
  1528. // JAVA EQUIVALENT:
  1529. // fs.isDirectory(f)
  1530. // fs.getModificationTime()
  1531. // fs.getAccessTime()
  1532. // fs.getLength(f)
  1533. // f.getPath()
  1534. // f.getOwner()
  1535. // f.getGroup()
  1536. // f.getPermission().toShort()
  1537. jobject jStat;
  1538. jvalue jVal;
  1539. jthrowable jExc = NULL;
  1540. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1541. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  1542. jPath) != 0) {
  1543. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1544. "FileSystem::exists");
  1545. return -1;
  1546. }
  1547. if (jVal.z == 0) {
  1548. errno = ENOENT;
  1549. return -1;
  1550. }
  1551. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1552. "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)),
  1553. jPath) != 0) {
  1554. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1555. "FileSystem::getFileStatus");
  1556. return -1;
  1557. }
  1558. jStat = jVal.l;
  1559. int ret = getFileInfoFromStat(env, jStat, fileInfo);
  1560. destroyLocalReference(env, jStat);
  1561. return ret;
  1562. }
  1563. hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
  1564. {
  1565. // JAVA EQUIVALENT:
  1566. // Path p(path);
  1567. // Path []pathList = fs.listPaths(p)
  1568. // foreach path in pathList
  1569. // getFileInfo(path)
  1570. //Get the JNIEnv* corresponding to current thread
  1571. JNIEnv* env = getJNIEnv();
  1572. if (env == NULL) {
  1573. errno = EINTERNAL;
  1574. return NULL;
  1575. }
  1576. jobject jFS = (jobject)fs;
  1577. //Create an object of org.apache.hadoop.fs.Path
  1578. jobject jPath = constructNewObjectOfPath(env, path);
  1579. if (jPath == NULL) {
  1580. return NULL;
  1581. }
  1582. hdfsFileInfo *pathList = 0;
  1583. jobjectArray jPathList = NULL;
  1584. jvalue jVal;
  1585. jthrowable jExc = NULL;
  1586. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, "listStatus",
  1587. JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
  1588. jPath) != 0) {
  1589. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1590. "FileSystem::listStatus");
  1591. destroyLocalReference(env, jPath);
  1592. return NULL;
  1593. }
  1594. jPathList = jVal.l;
  1595. //Figure out no of entries in that directory
  1596. jsize jPathListSize = (*env)->GetArrayLength(env, jPathList);
  1597. *numEntries = jPathListSize;
  1598. if (jPathListSize == 0) {
  1599. errno = 0;
  1600. goto done;
  1601. }
  1602. //Allocate memory
  1603. pathList = calloc(jPathListSize, sizeof(hdfsFileInfo));
  1604. if (pathList == NULL) {
  1605. errno = ENOMEM;
  1606. goto done;
  1607. }
  1608. //Save path information in pathList
  1609. jsize i;
  1610. jobject tmpStat;
  1611. for (i=0; i < jPathListSize; ++i) {
  1612. tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
  1613. if (getFileInfoFromStat(env, tmpStat, &pathList[i])) {
  1614. hdfsFreeFileInfo(pathList, jPathListSize);
  1615. destroyLocalReference(env, tmpStat);
  1616. pathList = NULL;
  1617. goto done;
  1618. }
  1619. destroyLocalReference(env, tmpStat);
  1620. }
  1621. done:
  1622. //Delete unnecessary local references
  1623. destroyLocalReference(env, jPath);
  1624. destroyLocalReference(env, jPathList);
  1625. return pathList;
  1626. }
  1627. hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
  1628. {
  1629. // JAVA EQUIVALENT:
  1630. // File f(path);
  1631. // fs.isDirectory(f)
  1632. // fs.lastModified() ??
  1633. // fs.getLength(f)
  1634. // f.getPath()
  1635. //Get the JNIEnv* corresponding to current thread
  1636. JNIEnv* env = getJNIEnv();
  1637. if (env == NULL) {
  1638. errno = EINTERNAL;
  1639. return NULL;
  1640. }
  1641. jobject jFS = (jobject)fs;
  1642. //Create an object of org.apache.hadoop.fs.Path
  1643. jobject jPath = constructNewObjectOfPath(env, path);
  1644. if (jPath == NULL) {
  1645. return NULL;
  1646. }
  1647. hdfsFileInfo *fileInfo = calloc(1, sizeof(hdfsFileInfo));
  1648. if (getFileInfo(env, jFS, jPath, fileInfo)) {
  1649. hdfsFreeFileInfo(fileInfo, 1);
  1650. fileInfo = NULL;
  1651. goto done;
  1652. }
  1653. done:
  1654. //Delete unnecessary local references
  1655. destroyLocalReference(env, jPath);
  1656. return fileInfo;
  1657. }
  1658. void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
  1659. {
  1660. //Free the mName
  1661. int i;
  1662. for (i=0; i < numEntries; ++i) {
  1663. if (hdfsFileInfo[i].mName) {
  1664. free(hdfsFileInfo[i].mName);
  1665. }
  1666. }
  1667. //Free entire block
  1668. free(hdfsFileInfo);
  1669. }
  1670. /**
  1671. * vim: ts=4: sw=4: et:
  1672. */