test_libhdfs_ops.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "expect.h"
  19. #include "hdfs/hdfs.h"
  20. #include "hdfs_test.h"
  21. #include "native_mini_dfs.h"
  22. #include "platform.h"
  23. #include <inttypes.h>
  24. #include <jni.h>
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include <string.h>
  28. #include <time.h>
  29. #include <unistd.h>
  30. void permission_disp(short permissions, char *rtr) {
  31. int i;
  32. short permissionsId;
  33. char* perm;
  34. rtr[9] = '\0';
  35. for(i=2;i>=0;i--)
  36. {
  37. permissionsId = permissions >> (i * 3) & (short)7;
  38. switch(permissionsId) {
  39. case 7:
  40. perm = "rwx"; break;
  41. case 6:
  42. perm = "rw-"; break;
  43. case 5:
  44. perm = "r-x"; break;
  45. case 4:
  46. perm = "r--"; break;
  47. case 3:
  48. perm = "-wx"; break;
  49. case 2:
  50. perm = "-w-"; break;
  51. case 1:
  52. perm = "--x"; break;
  53. case 0:
  54. perm = "---"; break;
  55. default:
  56. perm = "???";
  57. }
  58. strncpy(rtr, perm, 3);
  59. rtr+=3;
  60. }
  61. }
  62. /**
  63. * Shutdown and free the given mini cluster, and then exit with the provided exit_code. This method is meant to be
  64. * called with a non-zero exit code, which is why we ignore the return status of calling MiniDFSCluster#shutdown since
  65. * the process is going to fail anyway.
  66. */
  67. void shutdown_and_exit(struct NativeMiniDfsCluster* cl, int exit_code) {
  68. nmdShutdown(cl);
  69. nmdFree(cl);
  70. exit(exit_code);
  71. }
  72. int main(int argc, char **argv) {
  73. const char *writePath = "/tmp/testfile.txt";
  74. const char *fileContents = "Hello, World!";
  75. const char *readPath = "/tmp/testfile.txt";
  76. const char *srcPath = "/tmp/testfile.txt";
  77. const char *dstPath = "/tmp/testfile2.txt";
  78. const char *slashTmp = "/tmp";
  79. const char *newDirectory = "/tmp/newdir";
  80. const char *newOwner = "root";
  81. const char *tuser = "nobody";
  82. const char *appendPath = "/tmp/appends";
  83. const char *userPath = "/tmp/usertestfile.txt";
  84. char buffer[32], buffer2[256], rdbuffer[32];
  85. tSize num_written_bytes, num_read_bytes;
  86. hdfsFS fs, lfs;
  87. hdfsFile writeFile, readFile, localFile, appendFile, userFile;
  88. tOffset currentPos, seekPos;
  89. int exists, totalResult, result, numEntries, i, j;
  90. const char *resp;
  91. hdfsFileInfo *fileInfo, *fileList, *finfo;
  92. char *buffer3;
  93. char permissions[10];
  94. char ***hosts;
  95. short newPerm = 0666;
  96. tTime newMtime, newAtime;
  97. // Create and start the mini cluster
  98. struct NativeMiniDfsCluster* cl;
  99. struct NativeMiniDfsConf conf = {
  100. 1, /* doFormat */
  101. };
  102. cl = nmdCreate(&conf);
  103. EXPECT_NONNULL(cl);
  104. EXPECT_ZERO(nmdWaitClusterUp(cl));
  105. tPort port;
  106. port = (tPort) nmdGetNameNodePort(cl);
  107. // Create a hdfs connection to the mini cluster
  108. struct hdfsBuilder *bld;
  109. bld = hdfsNewBuilder();
  110. EXPECT_NONNULL(bld);
  111. hdfsBuilderSetForceNewInstance(bld);
  112. hdfsBuilderSetNameNode(bld, "localhost");
  113. hdfsBuilderSetNameNodePort(bld, port);
  114. // The HDFS append tests require setting this property otherwise the tests fail with:
  115. //
  116. // IOException: Failed to replace a bad datanode on the existing pipeline due to no more good datanodes being
  117. // available to try. The current failed datanode replacement policy is DEFAULT, and a client may configure this
  118. // via 'dfs.client.block.write.replace-datanode-on-failure.policy' in its configuration.
  119. //
  120. // It seems that when operating against a mini DFS cluster, some HDFS append tests require setting this property
  121. // (for example, see TestFileAppend#testMultipleAppends)
  122. hdfsBuilderConfSetStr(bld, "dfs.client.block.write.replace-datanode-on-failure.enable", "false");
  123. fs = hdfsBuilderConnect(bld);
  124. if(!fs) {
  125. fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
  126. shutdown_and_exit(cl, -1);
  127. }
  128. lfs = hdfsConnectNewInstance(NULL, 0);
  129. if(!lfs) {
  130. fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
  131. shutdown_and_exit(cl, -1);
  132. }
  133. {
  134. //Write tests
  135. writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
  136. if(!writeFile) {
  137. fprintf(stderr, "Failed to open %s for writing!\n", writePath);
  138. shutdown_and_exit(cl, -1);
  139. }
  140. fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
  141. num_written_bytes =
  142. hdfsWrite(fs, writeFile, (void*)fileContents,
  143. (tSize)(strlen(fileContents)+1));
  144. if (num_written_bytes != strlen(fileContents) + 1) {
  145. fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
  146. (int)(strlen(fileContents) + 1), (int)num_written_bytes);
  147. shutdown_and_exit(cl, -1);
  148. }
  149. fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
  150. currentPos = -1;
  151. if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
  152. fprintf(stderr,
  153. "Failed to get current file position correctly! Got %" PRId64 "!\n",
  154. currentPos);
  155. shutdown_and_exit(cl, -1);
  156. }
  157. fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
  158. if (hdfsFlush(fs, writeFile)) {
  159. fprintf(stderr, "Failed to 'flush' %s\n", writePath);
  160. shutdown_and_exit(cl, -1);
  161. }
  162. fprintf(stderr, "Flushed %s successfully!\n", writePath);
  163. if (hdfsHFlush(fs, writeFile)) {
  164. fprintf(stderr, "Failed to 'hflush' %s\n", writePath);
  165. shutdown_and_exit(cl, -1);
  166. }
  167. fprintf(stderr, "HFlushed %s successfully!\n", writePath);
  168. hdfsCloseFile(fs, writeFile);
  169. }
  170. {
  171. //Read tests
  172. exists = hdfsExists(fs, readPath);
  173. if (exists) {
  174. fprintf(stderr, "Failed to validate existence of %s\n", readPath);
  175. shutdown_and_exit(cl, -1);
  176. }
  177. readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
  178. if (!readFile) {
  179. fprintf(stderr, "Failed to open %s for reading!\n", readPath);
  180. shutdown_and_exit(cl, -1);
  181. }
  182. if (!hdfsFileIsOpenForRead(readFile)) {
  183. fprintf(stderr, "hdfsFileIsOpenForRead: we just opened a file "
  184. "with O_RDONLY, and it did not show up as 'open for "
  185. "read'\n");
  186. shutdown_and_exit(cl, -1);
  187. }
  188. fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
  189. seekPos = 1;
  190. if(hdfsSeek(fs, readFile, seekPos)) {
  191. fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
  192. shutdown_and_exit(cl, -1);
  193. }
  194. currentPos = -1;
  195. if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
  196. fprintf(stderr,
  197. "Failed to get current file position correctly! Got %" PRId64 "!\n",
  198. currentPos);
  199. shutdown_and_exit(cl, -1);
  200. }
  201. fprintf(stderr, "Current position: %" PRId64 "\n", currentPos);
  202. if (!hdfsFileUsesDirectRead(readFile)) {
  203. fprintf(stderr, "Direct read support incorrectly not detected "
  204. "for HDFS filesystem\n");
  205. shutdown_and_exit(cl, -1);
  206. }
  207. fprintf(stderr, "Direct read support detected for HDFS\n");
  208. // Test the direct read path
  209. if(hdfsSeek(fs, readFile, 0)) {
  210. fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
  211. shutdown_and_exit(cl, -1);
  212. }
  213. memset(buffer, 0, sizeof(buffer));
  214. num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
  215. sizeof(buffer));
  216. if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
  217. fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
  218. fileContents, buffer, num_read_bytes);
  219. shutdown_and_exit(cl, -1);
  220. }
  221. fprintf(stderr, "Read (direct) following %d bytes:\n%s\n",
  222. num_read_bytes, buffer);
  223. if (hdfsSeek(fs, readFile, 0L)) {
  224. fprintf(stderr, "Failed to seek to file start!\n");
  225. shutdown_and_exit(cl, -1);
  226. }
  227. // Disable the direct read path so that we really go through the slow
  228. // read path
  229. hdfsFileDisableDirectRead(readFile);
  230. num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
  231. sizeof(buffer));
  232. fprintf(stderr, "Read following %d bytes:\n%s\n",
  233. num_read_bytes, buffer);
  234. memset(buffer, 0, strlen(fileContents + 1));
  235. num_read_bytes = hdfsPread(fs, readFile, 0, (void*)buffer,
  236. sizeof(buffer));
  237. fprintf(stderr, "Read following %d bytes:\n%s\n",
  238. num_read_bytes, buffer);
  239. hdfsCloseFile(fs, readFile);
  240. // Test correct behaviour for unsupported filesystems
  241. localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
  242. if(!localFile) {
  243. fprintf(stderr, "Failed to open %s for writing!\n", writePath);
  244. shutdown_and_exit(cl, -1);
  245. }
  246. num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
  247. (tSize)(strlen(fileContents) + 1));
  248. hdfsCloseFile(lfs, localFile);
  249. localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
  250. if (hdfsFileUsesDirectRead(localFile)) {
  251. fprintf(stderr, "Direct read support incorrectly detected for local "
  252. "filesystem\n");
  253. shutdown_and_exit(cl, -1);
  254. }
  255. hdfsCloseFile(lfs, localFile);
  256. }
  257. totalResult = 0;
  258. result = 0;
  259. {
  260. //Generic file-system operations
  261. fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
  262. totalResult += result;
  263. fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
  264. totalResult += result;
  265. fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
  266. totalResult += result;
  267. fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
  268. totalResult += result;
  269. fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
  270. totalResult += result;
  271. fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
  272. totalResult += result;
  273. fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
  274. totalResult += result;
  275. fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
  276. totalResult += result;
  277. fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
  278. totalResult += (resp ? 0 : 1);
  279. fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
  280. totalResult += result;
  281. fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
  282. totalResult += (resp ? 0 : 1);
  283. fprintf(stderr, "hdfsGetDefaultBlockSize: %" PRId64 "\n", hdfsGetDefaultBlockSize(fs));
  284. fprintf(stderr, "hdfsGetCapacity: %" PRId64 "\n", hdfsGetCapacity(fs));
  285. fprintf(stderr, "hdfsGetUsed: %" PRId64 "\n", hdfsGetUsed(fs));
  286. fileInfo = NULL;
  287. if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
  288. fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
  289. fprintf(stderr, "Name: %s, ", fileInfo->mName);
  290. fprintf(stderr, "Type: %c, ", (char)(fileInfo->mKind));
  291. fprintf(stderr, "Replication: %d, ", fileInfo->mReplication);
  292. fprintf(stderr, "BlockSize: %" PRId64 ", ", fileInfo->mBlockSize);
  293. fprintf(stderr, "Size: %" PRId64 ", ", fileInfo->mSize);
  294. fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod));
  295. fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
  296. fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
  297. permission_disp(fileInfo->mPermissions, permissions);
  298. fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
  299. hdfsFreeFileInfo(fileInfo, 1);
  300. } else {
  301. totalResult++;
  302. fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
  303. }
  304. fileList = 0;
  305. fileList = hdfsListDirectory(fs, newDirectory, &numEntries);
  306. if (!(fileList == NULL && numEntries == 0 && !errno)) {
  307. fprintf(stderr, "waah! hdfsListDirectory for empty %s - FAILED!\n", newDirectory);
  308. totalResult++;
  309. } else {
  310. fprintf(stderr, "hdfsListDirectory for empty %s - SUCCESS!\n", newDirectory);
  311. }
  312. fileList = 0;
  313. if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
  314. for(i=0; i < numEntries; ++i) {
  315. fprintf(stderr, "Name: %s, ", fileList[i].mName);
  316. fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
  317. fprintf(stderr, "Replication: %d, ", fileList[i].mReplication);
  318. fprintf(stderr, "BlockSize: %" PRId64 ", ", fileList[i].mBlockSize);
  319. fprintf(stderr, "Size: %" PRId64 ", ", fileList[i].mSize);
  320. fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
  321. fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
  322. fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
  323. permission_disp(fileList[i].mPermissions, permissions);
  324. fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
  325. }
  326. hdfsFreeFileInfo(fileList, numEntries);
  327. } else {
  328. if (errno) {
  329. totalResult++;
  330. fprintf(stderr, "waah! hdfsListDirectory - FAILED!\n");
  331. } else {
  332. fprintf(stderr, "Empty directory!\n");
  333. }
  334. }
  335. hosts = hdfsGetHosts(fs, srcPath, 0, 1);
  336. if(hosts) {
  337. fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
  338. i=0;
  339. while(hosts[i]) {
  340. j = 0;
  341. while(hosts[i][j]) {
  342. fprintf(stderr,
  343. "\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
  344. ++j;
  345. }
  346. ++i;
  347. }
  348. } else {
  349. totalResult++;
  350. fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
  351. }
  352. // setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
  353. // chown write
  354. fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
  355. totalResult += result;
  356. fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
  357. totalResult += result;
  358. // chmod write
  359. fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
  360. totalResult += result;
  361. sleep(2);
  362. newMtime = time(NULL);
  363. newAtime = time(NULL);
  364. // utime write
  365. fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
  366. totalResult += result;
  367. // chown/chmod/utime read
  368. finfo = hdfsGetPathInfo(fs, writePath);
  369. fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
  370. totalResult += result;
  371. fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
  372. totalResult += result;
  373. // will later use /tmp/ as a different user so enable it
  374. fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
  375. totalResult += result;
  376. fprintf(stderr,"newMTime=%ld\n",newMtime);
  377. fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
  378. fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
  379. totalResult += result;
  380. // No easy way to turn on access times from hdfs_test right now
  381. // fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
  382. // totalResult += result;
  383. hdfsFreeFileInfo(finfo, 1);
  384. // Clean up
  385. fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
  386. totalResult += result;
  387. fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
  388. totalResult += result;
  389. fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
  390. totalResult += result;
  391. fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
  392. totalResult += result;
  393. fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
  394. totalResult += (result ? 0 : 1);
  395. }
  396. {
  397. // TEST APPENDS
  398. // CREATE
  399. appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
  400. if(!appendFile) {
  401. fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
  402. shutdown_and_exit(cl, -1);
  403. }
  404. fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
  405. buffer3 = "Hello,";
  406. num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
  407. (tSize)strlen(buffer3));
  408. fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
  409. if (hdfsFlush(fs, appendFile)) {
  410. fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
  411. shutdown_and_exit(cl, -1);
  412. }
  413. fprintf(stderr, "Flushed %s successfully!\n", appendPath);
  414. hdfsCloseFile(fs, appendFile);
  415. // RE-OPEN
  416. appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
  417. if(!appendFile) {
  418. fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
  419. shutdown_and_exit(cl, -1);
  420. }
  421. fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
  422. buffer3 = " World";
  423. num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
  424. (tSize)(strlen(buffer3) + 1));
  425. fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
  426. if (hdfsFlush(fs, appendFile)) {
  427. fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
  428. shutdown_and_exit(cl, -1);
  429. }
  430. fprintf(stderr, "Flushed %s successfully!\n", appendPath);
  431. hdfsCloseFile(fs, appendFile);
  432. // CHECK size
  433. finfo = hdfsGetPathInfo(fs, appendPath);
  434. fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
  435. totalResult += (result ? 0 : 1);
  436. // READ and check data
  437. readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
  438. if (!readFile) {
  439. fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
  440. shutdown_and_exit(cl, -1);
  441. }
  442. num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
  443. fprintf(stderr, "Read following %d bytes:\n%s\n",
  444. num_read_bytes, rdbuffer);
  445. fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
  446. hdfsCloseFile(fs, readFile);
  447. // DONE test appends
  448. }
  449. totalResult += (hdfsDisconnect(fs) != 0);
  450. {
  451. //
  452. // Now test as connecting as a specific user
  453. // This is only meant to test that we connected as that user, not to test
  454. // the actual fs user capabilities. Thus just create a file and read
  455. // the owner is correct.
  456. fs = hdfsConnectAsUserNewInstance("localhost", port, tuser);
  457. if(!fs) {
  458. fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
  459. shutdown_and_exit(cl, -1);
  460. }
  461. userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
  462. if(!userFile) {
  463. fprintf(stderr, "Failed to open %s for writing!\n", userPath);
  464. shutdown_and_exit(cl, -1);
  465. }
  466. fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
  467. num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
  468. (tSize)(strlen(fileContents)+1));
  469. fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
  470. if (hdfsFlush(fs, userFile)) {
  471. fprintf(stderr, "Failed to 'flush' %s\n", userPath);
  472. shutdown_and_exit(cl, -1);
  473. }
  474. fprintf(stderr, "Flushed %s successfully!\n", userPath);
  475. hdfsCloseFile(fs, userFile);
  476. finfo = hdfsGetPathInfo(fs, userPath);
  477. fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
  478. totalResult += result;
  479. }
  480. totalResult += (hdfsDisconnect(fs) != 0);
  481. EXPECT_ZERO(nmdShutdown(cl));
  482. nmdFree(cl);
  483. if (totalResult != 0) {
  484. return -1;
  485. } else {
  486. return 0;
  487. }
  488. }
  489. /**
  490. * vim: ts=4: sw=4: et:
  491. */